[Sqlalchemy-commits] [1411] sqlalchemy/branches/schema/test: more doc fixes, unit tests working
Brought to you by:
zzzeek
From: <co...@sq...> - 2006-05-06 00:55:06
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"> <head><style type="text/css"><!-- #msg dl { border: 1px #006 solid; background: #369; padding: 6px; color: #fff; } #msg dt { float: left; width: 6em; font-weight: bold; } #msg dt:after { content:':';} #msg dl, #msg dt, #msg ul, #msg li { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; } #msg dl a { font-weight: bold} #msg dl a:link { color:#fc3; } #msg dl a:active { color:#ff0; } #msg dl a:visited { color:#cc6; } h3 { font-family: verdana,arial,helvetica,sans-serif; font-size: 10pt; font-weight: bold; } #msg pre { overflow: auto; background: #ffc; border: 1px #fc0 solid; padding: 6px; } #msg ul, pre { overflow: auto; } #patch { width: 100%; } #patch h4 {font-family: verdana,arial,helvetica,sans-serif;font-size:10pt;padding:8px;background:#369;color:#fff;margin:0;} #patch .propset h4, #patch .binary h4 {margin:0;} #patch pre {padding:0;line-height:1.2em;margin:0;} #patch .diff {width:100%;background:#eee;padding: 0 0 10px 0;overflow:auto;} #patch .propset .diff, #patch .binary .diff {padding:10px 0;} #patch span {display:block;padding:0 10px;} #patch .modfile, #patch .addfile, #patch .delfile, #patch .propset, #patch .binary, #patch .copfile {border:1px solid #ccc;margin:10px 0;} #patch ins {background:#dfd;text-decoration:none;display:block;padding:0 10px;} #patch del {background:#fdd;text-decoration:none;display:block;padding:0 10px;} #patch .lines, .info {color:#888;background:#fff;} --></style> <title>[1411] sqlalchemy/branches/schema/test: more doc fixes, unit tests working</title> </head> <body> <div id="msg"> <dl> <dt>Revision</dt> <dd>1411</dd> <dt>Author</dt> <dd>zzzeek</dd> <dt>Date</dt> <dd>2006-05-05 19:54:41 -0500 (Fri, 05 May 2006)</dd> </dl> <h3>Log Message</h3> <pre>more doc fixes, unit tests working</pre> <h3>Modified Paths</h3> <ul> <li><a href="#sqlalchemybranchesschemadocbuildcomponentspydocmyt">sqlalchemy/branches/schema/doc/build/components/pydoc.myt</a></li> <li><a href="#sqlalchemybranchesschemadocbuildcontentdbenginetxt">sqlalchemy/branches/schema/doc/build/content/dbengine.txt</a></li> <li><a href="#sqlalchemybranchesschemadocbuildcontentmetadatatxt">sqlalchemy/branches/schema/doc/build/content/metadata.txt</a></li> <li><a href="#sqlalchemybranchesschemadocbuildcontentpluginstxt">sqlalchemy/branches/schema/doc/build/content/plugins.txt</a></li> <li><a href="#sqlalchemybranchesschemadocbuildcontentpoolingmyt">sqlalchemy/branches/schema/doc/build/content/pooling.myt</a></li> <li><a href="#sqlalchemybranchesschemadocbuildcontenttutorialtxt">sqlalchemy/branches/schema/doc/build/content/tutorial.txt</a></li> <li><a href="#sqlalchemybranchesschemadocbuildcontenttypestxt">sqlalchemy/branches/schema/doc/build/content/types.txt</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemyexceptionspy">sqlalchemy/branches/schema/lib/sqlalchemy/exceptions.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemymodsthreadlocalpy">sqlalchemy/branches/schema/lib/sqlalchemy/mods/threadlocal.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemyorm__init__py">sqlalchemy/branches/schema/lib/sqlalchemy/orm/__init__.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemyormdependencypy">sqlalchemy/branches/schema/lib/sqlalchemy/orm/dependency.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemyormmapperpy">sqlalchemy/branches/schema/lib/sqlalchemy/orm/mapper.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemyormquerypy">sqlalchemy/branches/schema/lib/sqlalchemy/orm/query.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemyormunitofworkpy">sqlalchemy/branches/schema/lib/sqlalchemy/orm/unitofwork.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemyschemapy">sqlalchemy/branches/schema/lib/sqlalchemy/schema.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemysqlpy">sqlalchemy/branches/schema/lib/sqlalchemy/sql.py</a></li> <li><a href="#sqlalchemybranchesschemalibsqlalchemytypespy">sqlalchemy/branches/schema/lib/sqlalchemy/types.py</a></li> <li><a href="#sqlalchemybranchesschematestmanytomanypy">sqlalchemy/branches/schema/test/manytomany.py</a></li> <li><a href="#sqlalchemybranchesschematestobjectstorepy">sqlalchemy/branches/schema/test/objectstore.py</a></li> </ul> </div> <div id="patch"> <h3>Diff</h3> <a id="sqlalchemybranchesschemadocbuildcomponentspydocmyt"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/doc/build/components/pydoc.myt (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/doc/build/components/pydoc.myt 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/doc/build/components/pydoc.myt 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -1,7 +1,7 @@ </span><span class="cx"> <%global> </span><span class="cx"> import re, types, string </span><span class="cx"> def format_paragraphs(text): </span><del>- return re.sub(r'([\w ])\n([\w ])', r'\1 \2', text or '', re.S) </del><ins>+ return re.sub(r'([\w])\n([\w])', r'\1 \2', text or '', re.S) </ins><span class="cx"> </%global> </span><span class="cx"> </span><span class="cx"> <%method obj_doc> </span></span></pre></div> <a id="sqlalchemybranchesschemadocbuildcontentdbenginetxt"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/doc/build/content/dbengine.txt (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/doc/build/content/dbengine.txt 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/doc/build/content/dbengine.txt 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -176,6 +176,20 @@ </span><span class="cx"> </span><span class="cx"> While the `close()` method is still available with the "threadlocal" strategy, it should be used carefully. Above, if we issued a `close()` call on `r1`, and then tried to further work with results from `r2`, `r2` would be in an invalid state since its connection was already returned to the pool. By relying on `__del__()` to automatically clean up resources, this condition will never occur. </span><span class="cx"> </span><ins>+To get at the actual `Connection` object which is used by implicit executions, call the `contextual_connection()` method on `Engine`: + + {python title="Contextual Connection"} + # threadlocal strategy + db = create_engine('mysql://localhost/test', strategy='threadlocal') + + conn1 = db.contextual_connection() + conn2 = db.contextual_connection() + + >>> assert conn1 is conn2 + True + +When the `plain` strategy is used, the `contextual_connection()` method is synonymous with the `connect()` method; both return a distinct connection from the pool. + </ins><span class="cx"> At this point, you're probably saying, "wow, why would anyone *ever* want to use the [insert name here] strategy ??" Advantages to `plain` include that connection resources are immediately returned to the connection pool, without any reliance upon the `__del__()` method; there is no chance of resources being left around by a Python implementation that doesn't necessarily call `__del__()` immediately. Advantages to `threadlocal` include that resources can be left to clean up after themselves, application code can be more minimal, its guaranteed that only one connection is used per thread, and there is no chance of a "connection pool block", which is when an execution hangs because the current thread has already checked out all remaining resources. </span><span class="cx"> </span><span class="cx"> ### Transactions {@name=transactions} </span></span></pre></div> <a id="sqlalchemybranchesschemadocbuildcontentmetadatatxt"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/doc/build/content/metadata.txt (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/doc/build/content/metadata.txt 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/doc/build/content/metadata.txt 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -29,7 +29,7 @@ </span><span class="cx"> Column('pref_value', String(100)) </span><span class="cx"> ) </span><span class="cx"> </span><del>-The specific datatypes for each Column, such as Integer, String, etc. are defined in [types](rel:types) and are part of the `sqlalchemy` module namespace. </del><ins>+The specific datatypes for each Column, such as Integer, String, etc. are described in [types](rel:types), and exist within the module `sqlalchemy.types` as well as the global `sqlalchemy` namespace. </ins><span class="cx"> </span><span class="cx"> The `MetaData` object supports some handy methods, such as getting a list of Tables in the order (or reverse) of their dependency: </span><span class="cx"> </span></span></pre></div> <a id="sqlalchemybranchesschemadocbuildcontentpluginstxt"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/doc/build/content/plugins.txt (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/doc/build/content/plugins.txt 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/doc/build/content/plugins.txt 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -7,6 +7,8 @@ </span><span class="cx"> </span><span class="cx"> ### threadlocal </span><span class="cx"> </span><ins>+Establishes 'threadlocal' as the default strategy for new `Engine`s, and installs an implicitly-accessible threadlocal `Session` context for the `current_session()` function. Usually this is used in combination with `Tables` that are associated with `BoundMetaData` or `DynamicMetaData`, so that the `Session` does not need to be bound to any `Engine` explicitly. + </ins><span class="cx"> {python} </span><span class="cx"> import sqlalchemy.mods.threadlocal </span><span class="cx"> from sqlalchemy import * </span><span class="lines">@@ -22,20 +24,68 @@ </span><span class="cx"> mapper(User, user_table) </span><span class="cx"> </span><span class="cx"> # thread local session </span><del>- session = get_session() </del><ins>+ session = current_session() </ins><span class="cx"> </span><span class="cx"> # "user" object is added to the session automatically </span><span class="cx"> user = User() </span><span class="cx"> </span><span class="cx"> session.flush() </span><span class="cx"> </span><del>-Establishes 'threadlocal' as the default strategy for new `Engine`s, and installs a thread local Session context for the `get_session()` function. Usually this is used in combination with `BoundMetaData` or `DynamicMetaData` for `Table` objects, so that the `Session` does not need to be bound to any `Engine` explicitly. </del><span class="cx"> </span><del>-#### get_session() Method {@name=getsession} </del><ins>+#### current_session() Method {@name=currentsession} + +`current_session()` is a method that always exists in the `sqlalchemy.orm.session` module, however by default it returns `None`. When `threadlocal` is installed, `current_session()` returns the `Session` that is associated with the current thread. `current_session()` also takes an object instance as an optional argument, which is to allow objects or classes that are associated with a specific session context; this feature is not used by the `threadlocal` mod. + +This method is called when new, transient objects are created in order to locate a `Session` to which the new object can be attached to. This occurs because when a `Mapper` is first constructed for a class, it decorates the classes' `__init__()` method in a manner like the following: + + {python} + oldinit = class_.__init__ # the previous init method + def __init__(self): + session = current_session(self) + if session is not None: + session.save(self) # attach to the current session + oldinit(self) # call previous init method + +Since the `threadlocal` module provides an implementation for `current_session()` which returns the thread-associated `Session`, the result is that the `__init__()` method of all mapped classes will automatically add the new instance to the current thread's `Session`. + +An instance can be redirected at construction time to a different `Session` upon construction by specifying the keyword parameter `_sa_session`: + + {python} + session = create_session() # create a new session distinct from the thread-local session + myuser = User(_sa_session=session) # make a new User that is saved to this session + +Similarly, the **entity_name** parameter, which specifies an alternate `Mapper` to be used for persisting an instance, can be specified via `_sa_entity_name`: + + {python} + myuser = User(_sa_session=session, _sa_entity_name='altentity') + +#### Default Query Objects + +`current_session()` is also used by the `Query` object to locate a `Session` with which to store newly loaded instances, if the `Query` is not already associated with a specific `Session`. As a result, the `Query` can be constructed standalone from a mapper or class: + + {python} + # create a Query from a class + query = Query(User) + + # specify entity name + query = Query(User, entity_name='foo') + + # create a Query from a mapper + query = Query(mapper) + </ins><span class="cx"> #### objectstore Namespace {@name=objectstore} </span><ins>+ +The `objectstore` is an object added to the `sqlalchemy` namespace which provides a global proxy to the `Session` returned by `current_session()`. `objectstore` can be treated just like the `Session` itself: + + {python} + objectstore.save(instance) + objectstore.flush() + + objectstore.clear() + </ins><span class="cx"> #### Attaching Mappers to their Class {@name=attaching} </span><span class="cx"> </span><del>-A full-blown "monkeypatch" function that creates a primary mapper, attaches the mapper to the class, and also the methods `get, get_by, select, select_by, selectone, selectfirst, commit, expire, refresh, expunge` and `delete`: </del><ins>+With `current_session()` handling the details of providing a `Session` in all cases, the `assign_mapper` function provides some of the functionality of `Query` and `Session` directly off the mapped instances themselves. This is a "monkeypatch" function that creates a primary mapper, attaches the mapper to the class, and also the methods `get, get_by, select, select_by, selectone, selectfirst, commit, expire, refresh, expunge` and `delete`: </ins><span class="cx"> </span><span class="cx"> {python} </span><span class="cx"> # "assign" a mapper to the User class/users table </span><span class="lines">@@ -51,7 +101,20 @@ </span><span class="cx"> </span><span class="cx"> # flush the changes on a specific object </span><span class="cx"> myotheruser.flush() </span><ins>+ +#### Engine Strategy Set to threadlocal By Default {@name=engine} + +The `threadlocal` mod also establishes `threadlocal` as the default *strategy* when calling the `create_engine()` function. This strategy is specified by the `strategy` keyword argument to `create_engine()` and can still be overridden to be "`plain`" or "`threadlocal`" explicitly. + +An `Engine` created with the `threadlocal` strategy will use a thread-locally managed connection object for all **implicit** statement executions and schema operations. Recall from [dbengine](rel:dbengine) that an implicit execution is an execution where the `Connection` object is opened and closed internally, and the `connect()` method on `Engine` is not used; such as: + + {python} + result = table.select().execute() </ins><span class="cx"> </span><ins>+Above, the `result` variable holds onto a `ResultProxy` which is still referencing a connection returned by the connection pool. `threadlocal` strategy means that a second `execute()` statement in the same thread will use the same connection as the one referenced by `result`, assuming `result` is still referenced in memory. + +The `Mapper`, `Session`, and `Query` implementations have no dependency upon the strategy used by the underlying `Engine`; `Session` provides explicit connections for all internal executions which are properly managed. Additionally, when the `Session` uses a transaction, it internally insures that all operations are performed with the single `Connection` corresponding to the transaction. However, when the `threadlocal` strategy is used, orm operations will make usage of the same `contextual_connection()` method which will return the same connection used by other implicit executions. + </ins><span class="cx"> ### SessionContext </span><span class="cx"> </span><span class="cx"> This plugin solves many of the problems that `threadlocal` solves, but does it in a more class-localized way. </span></span></pre></div> <a id="sqlalchemybranchesschemadocbuildcontentpoolingmyt"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/doc/build/content/pooling.myt (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/doc/build/content/pooling.myt 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/doc/build/content/pooling.myt 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -1,66 +1,87 @@ </span><span class="cx"> <%flags>inherit='document_base.myt'</%flags> </span><span class="cx"> <%attr>title='Connection Pooling'</%attr> </span><del>-<&|doclib.myt:item, name="pooling", description="Connection Pooling" &> - <P><b>Note:</b>This section describes the connection pool module of SQLAlchemy, which is the smallest component of the library that can be used on its own. If you are interested in using SQLAlchemy for query construction or Object Relational Mapping, this module is automatically managed behind the scenes; you can skip ahead to <&formatting.myt:link,path="dbengine"&> in that case.</p> - <p>At the base of any database helper library is a system of efficiently acquiring connections to the database. Since the establishment of a database connection is typically a somewhat expensive operation, an application needs a way to get at database connections repeatedly without incurring the full overhead each time. Particularly for server-side web applications, a connection pool is the standard way to maintain a "pool" of database connections which are used over and over again among many requests. Connection pools typically are configured to maintain a certain "size", which represents how many connections can be used simultaneously without resorting to creating more newly-established connections. - </p> - <p>SQLAlchemy includes a pooling module that can be used completely independently of the rest of the toolset. This section describes how it can be used on its own, as well as the available options. If SQLAlchemy is being used more fully, the connection pooling described below occurs automatically. The options are still available, though, so this core feature is a good place to start. - </p> - <&|doclib.myt:item, name="establishing", description="Establishing a Transparent Connection Pool" &> - Any DBAPI module can be "proxied" through the connection pool using the following technique (note that the usage of 'psycopg2' is <b>just an example</b>; substitute whatever DBAPI module you'd like): - - <&|formatting.myt:code&> - import sqlalchemy.pool as pool - import psycopg2 as psycopg - psycopg = pool.manage(psycopg) - - # then connect normally - connection = psycopg.connect(database='test', username='scott', password='tiger') - </&> - <p>This produces a <span class="codeline">sqlalchemy.pool.DBProxy</span> object which supports the same <span class="codeline">connect()</span> function as the original DBAPI module. Upon connection, a thread-local connection proxy object is returned, which delegates its calls to a real DBAPI connection object. This connection object is stored persistently within a connection pool (an instance of <span class="codeline">sqlalchemy.pool.Pool</span>) that corresponds to the exact connection arguments sent to the <span class="codeline">connect()</span> function. The connection proxy also returns a proxied cursor object upon calling <span class="codeline">connection.cursor()</span>. When all cursors as well as the connection proxy are de-referenced, the connection is automatically made available again by the owning pool object.</p> - - <p>Basically, the <span class="codeline">connect()</span> function is used in its usual way, and the pool module transparently returns thread-local pooled connections. Each distinct set of connect arguments corresponds to a brand new connection pool created; in this way, an application can maintain connections to multiple schemas and/or databases, and each unique connect argument set will be managed by a different pool object.</p> - </&> </del><ins>+<!-- WARNING! This file was automatically generated. + Modify .txt file if need you to change the content.--> +<&|doclib.myt:item, name="pooling", description="Connection Pooling"&> </ins><span class="cx"> </span><del>- <&|doclib.myt:item, name="configuration", description="Connection Pool Configuration" &> - <p>When proxying a DBAPI module through the <span class="codeline">pool</span> module, options exist for how the connections should be pooled: - </p> - <ul> - <li>echo=False : if set to True, connections being pulled and retrieved from/to the pool will be logged to the standard output, as well as pool sizing information.</li> - <li>use_threadlocal=True : if set to True, repeated calls to connect() within the same application thread will be guaranteed to return the <b>same</b> connection object, if one has already been retrieved from the pool and has not been returned yet. This allows code to retrieve a connection from the pool, and then while still holding on to that connection, to call other functions which also ask the pool for a connection of the same arguments; those functions will act upon the same connection that the calling method is using. Note that once the connection is returned to the pool, it then may be used by another thread. To guarantee a single unique connection per thread that <b>never</b> changes, use the option <span class="codeline">poolclass=SingletonThreadPool</span>, in which case the use_threadlocal parameter is automatically set to False.</li> - <li>poolclass=QueuePool : the Pool class used by the pool module to provide pooling. QueuePool uses the Python <span class="codeline">Queue.Queue</span> class to maintain a list of available connections. A developer can supply his or her own Pool class to supply a different pooling algorithm. Also included is the ThreadSingletonPool, which provides a single distinct connection per thread and is required with SQLite.</li> - <li>pool_size=5 : used by QueuePool - the size of the pool to be maintained. This is the largest number of connections that will be kept persistently in the pool. Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain.</li> - <li>max_overflow=10 : used by QueuePool - the maximum overflow size of the pool. When the number of checked-out connections reaches the size set in pool_size, additional connections will be returned up to this limit. When those additional connections are returned to the pool, they are disconnected and discarded. It follows then that the total number of simultaneous connections the pool will allow is pool_size + max_overflow, and the total number of "sleeping" connections the pool will allow is pool_size. max_overflow can be set to -1 to indicate no overflow limit; no limit will be placed on the total number of concurrent connections.</li> - </ul> - </&> - - <&|doclib.myt:item, name="custom", description="Custom Pool Construction" &> - <p>One level below using a DBProxy to make transparent pools is creating the pool yourself. The pool module comes with two implementations of connection pools: <span class="codeline">QueuePool</span> and <span class="codeline">SingletonThreadPool</span>. While QueuePool uses Queue.Queue to provide connections, SingletonThreadPool provides a single per-thread connection which SQLite requires.</p> - - <p>Constructing your own pool involves passing a callable used to create a connection. Through this method, custom connection schemes can be made, such as a connection that automatically executes some initialization commands to start. The options from the previous section can be used as they apply to QueuePool or SingletonThreadPool.</p> - <&|formatting.myt:code, title="Plain QueuePool"&> - import sqlalchemy.pool as pool - import psycopg2 - - def getconn(): - c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') - # execute an initialization function on the connection before returning - c.cursor.execute("setup_encodings()") - return c - - p = pool.QueuePool(getconn, max_overflow=10, pool_size=5, use_threadlocal=True) - </&> </del><ins>+<p>This section describes the connection pool module of SQLAlchemy. The <code>Pool</code> object it provides is normally embedded within an <code>Engine</code> instance. For most cases, explicit access to the pool module is not required. However, the <code>Pool</code> object can be used on its own, without the rest of SA, to manage DBAPI connections; this section describes that usage. Also, this section will describe in more detail how to customize the pooling strategy used by an <code>Engine</code>. +</p> +<p>At the base of any database helper library is a system of efficiently acquiring connections to the database. Since the establishment of a database connection is typically a somewhat expensive operation, an application needs a way to get at database connections repeatedly without incurring the full overhead each time. Particularly for server-side web applications, a connection pool is the standard way to maintain a "pool" of database connections which are used over and over again among many requests. Connection pools typically are configured to maintain a certain "size", which represents how many connections can be used simultaneously without resorting to creating more newly-established connections. +</p> </ins><span class="cx"> </span><del>- <&|formatting.myt:code, title="SingletonThreadPool"&> - import sqlalchemy.pool as pool - import sqlite - - def getconn(): - return sqlite.connect(filename='myfile.db') - - # SQLite connections require the SingletonThreadPool - p = pool.SingletonThreadPool(getconn) - </&> </del><ins>+<&|doclib.myt:item, name="establishing", description="Establishing a Transparent Connection Pool"&> </ins><span class="cx"> </span><del>- </&> -</&> </del><span class="cx">\ No newline at end of file </span><ins>+<p>Any DBAPI module can be "proxied" through the connection pool using the following technique (note that the usage of 'psycopg2' is <strong>just an example</strong>; substitute whatever DBAPI module you'd like): +</p> +<&|formatting.myt:code, use_sliders="True", syntaxtype="python"&> import sqlalchemy.pool as pool + import psycopg2 as psycopg + psycopg = pool.manage(psycopg) + + # then connect normally + connection = psycopg.connect(database='test', username='scott', password='tiger') + </&><p>This produces a <code>sqlalchemy.pool.DBProxy</code> object which supports the same <code>connect()</code> function as the original DBAPI module. Upon connection, a thread-local connection proxy object is returned, which delegates its calls to a real DBAPI connection object. This connection object is stored persistently within a connection pool (an instance of <code>sqlalchemy.pool.Pool</code>) that corresponds to the exact connection arguments sent to the <code>connect()</code> function. The connection proxy also returns a proxied cursor object upon calling <code>connection.cursor()</code>. When all cursors as well as the connection proxy are de-referenced, the connection is automatically made available again by the owning pool object. +</p> +<p>Basically, the <code>connect()</code> function is used in its usual way, and the pool module transparently returns thread-local pooled connections. Each distinct set of connect arguments corresponds to a brand new connection pool created; in this way, an application can maintain connections to multiple schemas and/or databases, and each unique connect argument set will be managed by a different pool object. +</p> + +</&> +<&|doclib.myt:item, name="configuration", description="Connection Pool Configuration"&> + +<p>When proxying a DBAPI module through the <code>pool</code> module, options exist for how the connections should be pooled: +</p> +<ul> + <li> + echo=False : if set to True, connections being pulled and retrieved from/to the pool will be logged to the standard output, as well as pool sizing information. + </li> + + <li> + use_threadlocal=True : if set to True, repeated calls to connect() within the same application thread will be guaranteed to return the <strong>same</strong> connection object, if one has already been retrieved from the pool and has not been returned yet. This allows code to retrieve a connection from the pool, and then while still holding on to that connection, to call other functions which also ask the pool for a connection of the same arguments; those functions will act upon the same connection that the calling method is using. Note that once the connection is returned to the pool, it then may be used by another thread. To guarantee a single unique connection per thread that <strong>never</strong> changes, use the option <code>poolclass=SingletonThreadPool</code>, in which case the use_threadlocal parameter is automatically set to False. + </li> + + <li> + poolclass=QueuePool : the Pool class used by the pool module to provide pooling. QueuePool uses the Python <code>Queue.Queue</code> class to maintain a list of available connections. A developer can supply his or her own Pool class to supply a different pooling algorithm. Also included is the <code>SingletonThreadPool</code>, which provides a single distinct connection per thread and is required with SQLite. + </li> + + <li> + pool_size=5 : used by <code>QueuePool</code> - the size of the pool to be maintained. This is the largest number of connections that will be kept persistently in the pool. Note that the pool begins with no connections; once this number of connections is requested, that number of connections will remain. + </li> + + <li> + max_overflow=10 : used by <code>QueuePool</code> - the maximum overflow size of the pool. When the number of checked-out connections reaches the size set in pool_size, additional connections will be returned up to this limit. When those additional connections are returned to the pool, they are disconnected and discarded. It follows then that the total number of simultaneous connections the pool will allow is <code>pool_size</code> + <code>max_overflow</code>, and the total number of "sleeping" connections the pool will allow is <code>pool_size</code>. <code>max_overflow</code> can be set to -1 to indicate no overflow limit; no limit will be placed on the total number of concurrent connections. + </li> + + <li> + timeout=30 : used by <code>QueuePool</code> - the timeout before giving up on returning a connection, if none are available and the <code>max_overflow</code> has been reached. + </li> +</ul> + +</&> +<&|doclib.myt:item, name="custom", description="Custom Pool Construction"&> + +<p>One level below using a DBProxy to make transparent pools is creating the pool yourself. The pool module comes with two implementations of connection pools: <code>QueuePool</code> and <code>SingletonThreadPool</code>. While <code>QueuePool</code> uses <code>Queue.Queue</code> to provide connections, <code>SingletonThreadPool</code> provides a single per-thread connection which SQLite requires. +</p> +<p>Constructing your own pool involves passing a callable used to create a connection. Through this method, custom connection schemes can be made, such as a connection that automatically executes some initialization commands to start. The options from the previous section can be used as they apply to <code>QueuePool</code> or <code>SingletonThreadPool</code>. +</p> +<&|formatting.myt:code, use_sliders="True", syntaxtype="python", title="Plain QueuePool"&> import sqlalchemy.pool as pool + import psycopg2 + + def getconn(): + c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test') + # execute an initialization function on the connection before returning + c.cursor.execute("setup_encodings()") + return c + + p = pool.QueuePool(getconn, max_overflow=10, pool_size=5, use_threadlocal=True) + </&><p>Or with SingletonThreadPool: +</p> +<&|formatting.myt:code, use_sliders="True", syntaxtype="python", title="SingletonThreadPool"&> import sqlalchemy.pool as pool + import sqlite + + def getconn(): + return sqlite.connect(filename='myfile.db') + + # SQLite connections require the SingletonThreadPool + p = pool.SingletonThreadPool(getconn) + </&> +</&> +</&> </ins></span></pre></div> <a id="sqlalchemybranchesschemadocbuildcontenttutorialtxt"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/doc/build/content/tutorial.txt (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/doc/build/content/tutorial.txt 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/doc/build/content/tutorial.txt 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -87,9 +87,9 @@ </span><span class="cx"> {python} </span><span class="cx"> >>> dynamic = DynamicMetaData() # create a Dynamic metadata object </span><span class="cx"> >>> dynamic.connect('sqlite:///:memory:') # connect it to SQLite </span><del>- >>> dynamic.connect('postgres:///scott:tiger@localhost/mydb') # connect it to PostGres </del><ins>+ >>> dynamic.connect('postgres://scott:tiger@localhost/mydb') # connect it to PostGres </ins><span class="cx"> </span><del>- >>> myengine = create_engine('mysql:///127.0.0.1') </del><ins>+ >>> myengine = create_engine('mysql://127.0.0.1') </ins><span class="cx"> >>> dynamic.connect(myengine) # connect to an externally-defined engine </span><span class="cx"> </span><span class="cx"> The `DynamicMetaData` object binds to different engines on a thread local basis. This means that one thread of your application can be connected to one database, while another is connected to a different database. The `DynamicMetaData` object also keeps a reference to each bound engine internally, so that each connection string is only initialized once. </span></span></pre></div> <a id="sqlalchemybranchesschemadocbuildcontenttypestxt"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/doc/build/content/types.txt (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/doc/build/content/types.txt 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/doc/build/content/types.txt 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -23,6 +23,7 @@ </span><span class="cx"> class Float(Numeric): </span><span class="cx"> def __init__(self, precision=10) </span><span class="cx"> </span><ins>+ # DateTime, Date and Time types deal with datetime objects from the Python datetime module </ins><span class="cx"> class DateTime(TypeEngine) </span><span class="cx"> </span><span class="cx"> class Date(TypeEngine) </span><span class="lines">@@ -38,11 +39,13 @@ </span><span class="cx"> # as bind params, raw bytes to unicode as </span><span class="cx"> # rowset values, using the unicode encoding </span><span class="cx"> # setting on the engine (defaults to 'utf-8') </span><del>- class Unicode(String) </del><ins>+ class Unicode(TypeDecorator): + impl = String </ins><span class="cx"> </span><span class="cx"> # uses the pickle protocol to serialize data </span><span class="cx"> # in/out of Binary columns </span><del>- class PickleType(Binary) </del><ins>+ class PickleType(TypeDecorator): + impl = Binary </ins><span class="cx"> </span><span class="cx"> More specific subclasses of these types are available, which various database engines may choose to implement specifically, allowing finer grained control over types: </span><span class="cx"> </span><span class="lines">@@ -76,53 +79,25 @@ </span><span class="cx"> </span><span class="cx"> ### Creating your Own Types {@name=custom} </span><span class="cx"> </span><del>-User-defined types can be created, to support either database-specific types, or customized pre-processing of query parameters as well as post-processing of result set data. You can make your own classes to perform these operations. They are specified by subclassing the desired type class: </del><ins>+User-defined types can be created, to support either database-specific types, or customized pre-processing of query parameters as well as post-processing of result set data. You can make your own classes to perform these operations. To augment the behavior of a `TypeEngine` type, such as `String`, the `TypeDecorator` class is used: </ins><span class="cx"> </span><del>- {python title="Basic Example"} </del><ins>+ {python} </ins><span class="cx"> import sqlalchemy.types as types </span><span class="cx"> </span><del>- class MyType(types.String): </del><ins>+ class MyType(types.TypeDecorator): </ins><span class="cx"> """basic type that decorates String, prefixes values with "PREFIX:" on </span><span class="cx"> the way in and strips it off on the way out.""" </span><ins>+ impl = types.String </ins><span class="cx"> def convert_bind_param(self, value, engine): </span><span class="cx"> return "PREFIX:" + value </span><span class="cx"> def convert_result_value(self, value, engine): </span><span class="cx"> return value[7:] </span><ins>+ +The `Unicode` and `PickleType` classes are instances of `TypeDecorator` already and can be subclassed directly. </ins><span class="cx"> </span><del>-A common desire is for a "pickle" type, which overrides a Binary object to provide pickling behavior: </del><ins>+To build a type object from scratch, which will not have a corresponding database-specific implementation, subclass `TypeEngine`: </ins><span class="cx"> </span><del>- {python title="Pickle Type"} - import cPickle - - class PickleType(Binary): - def __init__(self, protocol=pickle.HIGHEST_PROTOCOL): - """allows the pickle protocol to be specified""" - self.protocol = protocol - def convert_result_value(self, value, engine): - if value is None: - return None - buf = Binary.convert_result_value(self, value, engine) - return pickle.loads(str(buf)) - def convert_bind_param(self, value, engine): - if value is None: - return None - return Binary.convert_bind_param(self, pickle.dumps(value, self.protocol), engine) - def get_constructor_args(self): - return {} - -Which can be used like: - </del><span class="cx"> {python} </span><del>- mytable = Table('mytable', engine, - Column('id', Integer, primary_key=True), - Column('data', PickleType())) - - my_object = MyObject() - mytable.insert().execute(data=my_object) - -Another example, which illustrates a fully defined datatype. This just overrides the base type class TypeEngine: - - {python} </del><span class="cx"> import sqlalchemy.types as types </span><span class="cx"> </span><span class="cx"> class MyType(types.TypeEngine): </span><span class="lines">@@ -134,7 +109,4 @@ </span><span class="cx"> return value </span><span class="cx"> def convert_result_value(self, value, engine): </span><span class="cx"> return value </span><del>- def adapt_args(self): - """allows for the adaptation of this TypeEngine object into a new kind of type depending on its arguments.""" - return self </del><span class="cx"> </span></span></pre></div> <a id="sqlalchemybranchesschemalibsqlalchemyexceptionspy"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/lib/sqlalchemy/exceptions.py (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/lib/sqlalchemy/exceptions.py 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/lib/sqlalchemy/exceptions.py 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -25,8 +25,8 @@ </span><span class="cx"> objects. This error generally corresponds to construction time state errors.""" </span><span class="cx"> pass </span><span class="cx"> </span><del>-class CommitError(SQLAlchemyError): - """raised when an invalid condition is detected upon a commit()""" </del><ins>+class FlushError(SQLAlchemyError): + """raised when an invalid condition is detected upon a flush()""" </ins><span class="cx"> pass </span><span class="cx"> </span><span class="cx"> class InvalidRequestError(SQLAlchemyError): </span></span></pre></div> <a id="sqlalchemybranchesschemalibsqlalchemymodsthreadlocalpy"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/lib/sqlalchemy/mods/threadlocal.py (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/lib/sqlalchemy/mods/threadlocal.py 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/lib/sqlalchemy/mods/threadlocal.py 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -1,5 +1,5 @@ </span><span class="cx"> from sqlalchemy import util, engine, mapper </span><del>-from sqlalchemy.orm import unitofwork, session </del><ins>+from sqlalchemy.orm import session, current_session </ins><span class="cx"> import sqlalchemy </span><span class="cx"> import sys, types </span><span class="cx"> </span><span class="lines">@@ -10,107 +10,44 @@ </span><span class="cx"> from the pool. this greatly helps functions that call multiple statements to be able to easily use just one connection </span><span class="cx"> without explicit "close" statements on result handles. </span><span class="cx"> </span><del>-on the Session side, the get_session() method will be modified to return a thread-local Session when no arguments </del><ins>+on the Session side, the current_session() method will be modified to return a thread-local Session when no arguments </ins><span class="cx"> are sent. It will also install module-level methods within the objectstore module, such as flush(), delete(), etc. </span><del>-which call this method on the thread-local session returned by get_session(). </del><ins>+which call this method on the thread-local session returned by current_session(). </ins><span class="cx"> </span><del>-Without this plugin in use, all statement.execute() calls must be matched by a corresponding close() statement -on the returned result (or the result must be consumed completely). Also, all mapper operations must use -explicit Session objects when creating instances and creating queries. </del><ins>+ </ins><span class="cx"> """ </span><span class="cx"> </span><del>-get_session = session.get_session - </del><span class="cx"> class Objectstore(object): </span><del>- def begin(self, obj): - return get_session().begin(obj) - def commit(self, obj): - return get_session().commit(obj) - def get_session(self, obj=None): - return get_session(obj=obj) - def flush(self, obj=None): - """flushes the current UnitOfWork transaction. if a transaction was begun - via begin(), flushes only those objects that were created, modified, or deleted - since that begin statement. otherwise flushes all objects that have been - changed. </del><ins>+ def __getattr__(self, key): + return getattr(current_session(), key) + def get_session(self): + return current_session() + +def monkeypatch_query_method(class_, name): + def do(self, *args, **kwargs): + query = class_.mapper.query() + getattr(query, name)(*args, **kwargs) + setattr(class_, name, classmethod(do)) </ins><span class="cx"> </span><del>- if individual objects are submitted, then only those objects are committed, and the - begin/commit cycle is not affected.""" - get_session().flush(obj) - - def clear(self): - """removes all current UnitOfWorks and IdentityMaps for this thread and - establishes a new one. It is probably a good idea to discard all - current mapped object instances, as they are no longer in the Identity Map.""" - get_session().clear() - - def refresh(self, obj): - """reloads the state of this object from the database, and cancels any in-memory - changes.""" - get_session().refresh(obj) - - def expire(self, obj): - """invalidates the data in the given objects and sets them to refresh themselves - the next time they are requested.""" - get_session().expire(obj) - - def expunge(self, obj): - get_session().expunge(obj) - - def delete(self, obj): - """registers the given objects as to be deleted upon the next commit""" - s = get_session().delete(obj) - - def has_key(self, key): - """returns True if the current thread-local IdentityMap contains the given instance key""" - return get_session().has_key(key) - - def has_instance(self, instance): - """returns True if the current thread-local IdentityMap contains the given instance""" - return get_session().has_instance(instance) - - def is_dirty(self, obj): - """returns True if the given object is in the current UnitOfWork's new or dirty list, - or if its a modified list attribute on an object.""" - return get_session().is_dirty(obj) - - def instance_key(self, instance): - """returns the IdentityMap key for the given instance""" - return get_session().instance_key(instance) - - def import_instance(self, instance): - return get_session().import_instance(instance) - -def assign_mapper(class_, *args, **params): - params.setdefault("is_primary", True) </del><ins>+def monkeypatch_objectstore_method(class_, name): + def do(self, *args, **kwargs): + session = current_session() + getattr(session, name)(self, *args, **kwargs) + setattr(class_, name, do) + +def assign_mapper(class_, *args, **kwargs): + kwargs.setdefault("is_primary", True) </ins><span class="cx"> if not isinstance(getattr(class_, '__init__'), types.MethodType): </span><span class="cx"> def __init__(self, **kwargs): </span><span class="cx"> for key, value in kwargs.items(): </span><span class="cx"> setattr(self, key, value) </span><span class="cx"> class_.__init__ = __init__ </span><del>- m = mapper(class_, *args, **params) </del><ins>+ m = mapper(class_, *args, **kwargs) </ins><span class="cx"> class_.mapper = m </span><del>- # TODO: get these outta here, have to go off explicit session - class_.get = m.get - class_.select = m.select - class_.select_by = m.select_by - class_.selectone = m.selectone - class_.get_by = m.get_by - def commit(self): - sqlalchemy.objectstore.commit(self) - def delete(self): - sqlalchemy.objectstore.delete(self) - def expire(self): - sqlalchemy.objectstore.expire(self) - def refresh(self): - sqlalchemy.objectstore.refresh(self) - def expunge(self): - sqlalchemy.objectstore.expunge(self) - class_.commit = commit - class_.delete = delete - class_.expire = expire - class_.refresh = refresh - class_.expunge = expunge </del><ins>+ for name in ['get', 'select', 'select_by', 'selectone', 'get_by']: + monkeypatch_query_method(class_, name) + for name in ['flush', 'delete', 'expire', 'refresh', 'expunge', 'merge', 'update', 'save_or_update']: + monkeypatch_objectstore_method(class_, name) </ins><span class="cx"> </span><span class="cx"> def install_plugin(): </span><span class="cx"> reg = util.ScopedRegistry(session.Session) </span></span></pre></div> <a id="sqlalchemybranchesschemalibsqlalchemyorm__init__py"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/lib/sqlalchemy/orm/__init__.py (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/lib/sqlalchemy/orm/__init__.py 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/lib/sqlalchemy/orm/__init__.py 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -11,14 +11,15 @@ </span><span class="cx"> from sqlalchemy import sql, schema, engine, util, exceptions </span><span class="cx"> from mapper import * </span><span class="cx"> from mapper import mapper_registry </span><ins>+from query import Query </ins><span class="cx"> from util import polymorphic_union </span><span class="cx"> import properties </span><span class="cx"> from session import current_session </span><span class="cx"> from session import Session as create_session </span><span class="cx"> </span><span class="cx"> __all__ = ['relation', 'backref', 'eagerload', 'lazyload', 'noload', 'deferred', 'defer', 'undefer', </span><del>- 'mapper', 'clear_mappers', 'sql', 'extension', 'class_mapper', 'object_mapper', 'MapperExtension', - 'cascade_mappers', 'polymorphic_union', 'current_session', 'create_session', 'class_mapper', 'object_mapper' </del><ins>+ 'mapper', 'clear_mappers', 'sql', 'extension', 'class_mapper', 'object_mapper', 'MapperExtension', 'Query', + 'cascade_mappers', 'polymorphic_union', 'current_session', 'create_session', 'class_mapper', 'object_mapper' </ins><span class="cx"> ] </span><span class="cx"> </span><span class="cx"> def relation(*args, **kwargs): </span></span></pre></div> <a id="sqlalchemybranchesschemalibsqlalchemyormdependencypy"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/lib/sqlalchemy/orm/dependency.py (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/lib/sqlalchemy/orm/dependency.py 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/lib/sqlalchemy/orm/dependency.py 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -1,3 +1,10 @@ </span><ins>+# orm/dependency.py +# Copyright (C) 2005,2006 Michael Bayer mi...@zz... +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + + </ins><span class="cx"> """bridges the PropertyLoader (i.e. a relation()) and the UOWTransaction </span><span class="cx"> together to allow processing of scalar- and list-based dependencies at flush time.""" </span><span class="cx"> </span><span class="lines">@@ -20,7 +27,7 @@ </span><span class="cx"> </span><span class="cx"> class MapperStub(object): </span><span class="cx"> """poses as a Mapper representing the association table in a many-to-many </span><del>- join, when performing a commit(). </del><ins>+ join, when performing a flush(). </ins><span class="cx"> </span><span class="cx"> The Task objects in the objectstore module treat it just like </span><span class="cx"> any other Mapper, but in fact it only serves as a "dependency" placeholder </span><span class="lines">@@ -97,11 +104,9 @@ </span><span class="cx"> else: </span><span class="cx"> raise AssertionError(" no foreign key ?") </span><span class="cx"> </span><del>- # TODO: this method should be moved to an external object </del><span class="cx"> def get_object_dependencies(self, obj, uowcommit, passive = True): </span><span class="cx"> return uowcommit.uow.attributes.get_history(obj, self.key, passive = passive) </span><span class="cx"> </span><del>- # TODO: this method should be moved to an external object </del><span class="cx"> def whose_dependent_on_who(self, obj1, obj2): </span><span class="cx"> """given an object pair assuming obj2 is a child of obj1, returns a tuple </span><span class="cx"> with the dependent object second, or None if they are equal. </span><span class="lines">@@ -114,7 +119,6 @@ </span><span class="cx"> else: </span><span class="cx"> return (obj2, obj1) </span><span class="cx"> </span><del>- # TODO: this method should be moved to an external object </del><span class="cx"> def process_dependencies(self, task, deplist, uowcommit, delete = False): </span><span class="cx"> """this method is called during a commit operation to synchronize data between a parent and child object. </span><span class="cx"> it also can establish child or parent objects within the unit of work as "to be saved" or "deleted" </span><span class="lines">@@ -248,7 +252,6 @@ </span><span class="cx"> elif childlist.hasparent(child) is False: </span><span class="cx"> uowcommit.register_object(child, isdelete=True) </span><span class="cx"> </span><del>- # TODO: this method should be moved to an external object </del><span class="cx"> def _synchronize(self, obj, child, associationrow, clearkeys): </span><span class="cx"> """called during a commit to execute the full list of syncrules on the </span><span class="cx"> given object/child/optional association row""" </span></span></pre></div> <a id="sqlalchemybranchesschemalibsqlalchemyormmapperpy"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/lib/sqlalchemy/orm/mapper.py (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/lib/sqlalchemy/orm/mapper.py 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/lib/sqlalchemy/orm/mapper.py 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -1,4 +1,4 @@ </span><del>-# mapper/mapper.py </del><ins>+# orm/mapper.py </ins><span class="cx"> # Copyright (C) 2005,2006 Michael Bayer mi...@zz... </span><span class="cx"> # </span><span class="cx"> # This module is part of SQLAlchemy and is released under </span><span class="lines">@@ -660,7 +660,7 @@ </span><span class="cx"> self.extension.after_update(self, connection, obj) </span><span class="cx"> rows += c.cursor.rowcount </span><span class="cx"> if c.supports_sane_rowcount() and rows != len(update): </span><del>- raise CommitError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (rows, len(update))) </del><ins>+ raise exceptions.FlushError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (rows, len(update))) </ins><span class="cx"> if len(insert): </span><span class="cx"> statement = table.insert() </span><span class="cx"> for rec in insert: </span><span class="lines">@@ -729,10 +729,9 @@ </span><span class="cx"> if self.version_id_col is not None: </span><span class="cx"> clause.clauses.append(self.version_id_col == sql.bindparam(self.version_id_col.key, type=self.version_id_col.type)) </span><span class="cx"> statement = table.delete(clause) </span><del>- print "DELETE IS", delete </del><span class="cx"> c = connection.execute(statement, delete) </span><span class="cx"> if c.supports_sane_rowcount() and c.rowcount != len(delete): </span><del>- raise CommitError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (c.cursor.rowcount, len(delete))) </del><ins>+ raise exceptions.FlushError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (c.cursor.rowcount, len(delete))) </ins><span class="cx"> </span><span class="cx"> def _has_pks(self, table): </span><span class="cx"> try: </span></span></pre></div> <a id="sqlalchemybranchesschemalibsqlalchemyormquerypy"></a> <div class="modfile"><h4>Modified: sqlalchemy/branches/schema/lib/sqlalchemy/orm/query.py (1410 => 1411)</h4> <pre class="diff"><span> <span class="info">--- sqlalchemy/branches/schema/lib/sqlalchemy/orm/query.py 2006-05-05 17:52:32 UTC (rev 1410) +++ sqlalchemy/branches/schema/lib/sqlalchemy/orm/query.py 2006-05-06 00:54:41 UTC (rev 1411) </span><span class="lines">@@ -1,4 +1,4 @@ </span><del>-# mapper/query.py </del><ins>+# orm/query.py </ins><span class="cx"> # Copyright (C) 2005,2006 Michael Bayer mi...@zz... </span><span class="cx"> # </span><span class="cx"> # This module is part of SQLAlchemy and is released under </span><span class="lines">@@ -11,8 +11,11 @@ </span><span class="cx"> </span><span class="cx"> class Query(object): </span><span class="cx"> """encapsulates the object-fetching operations provided by Mappers.""&quo... [truncated message content] |