Update of /cvsroot/sqlobject/SQLObject/SQLObject
In directory sc8-pr-cvs1:/tmp/cvs-serv22961
Modified Files:
DBConnection.py SQLObject.py
Added Files:
Cache.py
Log Message:
Fancier caching -- weak references, so objects can expire from cache,
as well as having things expire from the cache.
New Cache module that helps implement this.
--- NEW FILE: Cache.py ---
import threading
from weakref import ref
from time import time as now
class CacheFactory(object):
"""
CacheFactory caches object creation. Each object should be
referenced by a single hashable ID (note tuples of hashable
values are also hashable).
"""
def __init__(self, expireFrequency=100, expireFraction=2,
cache=True):
"""
Every expireFrequency times that an item is retrieved from
this cache, the expire method is called.
The expire method then expires an arbitrary fraction of
the cached objects. The idea is at no time will the cache
be entirely emptied, placing a potentially high load at that
moment, but everything object will have its time to go
eventually. The fraction is given as an integer, and one
in that many objects are expired (i.e., the default is 1/2
of objects are expired).
By setting cache to False, items won't be cached.
However, in all cases a weak reference is kept to created
objects, and if the object hasn't been garbage collected
it will be returned.
"""
self.expireFrequency = expireFrequency
self.expireCount = expireFrequency
self.expireOffset = 0
self.expireFraction = expireFraction
self.doCache = cache
if self.doCache:
self.cache = {}
self.expiredCache = {}
self.lock = threading.Lock()
def get(self, id):
if self.doCache:
if self.expireCount > self.expireFrequency:
# Two threads could hit the expire in a row, but
# that's not so bad. At least by setting expireCount
# back to zero right away we avoid this. The expire
# method has a lock, so it's threadsafe.
self.expireCount = 0
self.expire()
try:
return self.cache[id]
except KeyError:
pass
self.lock.acquire()
try:
val = self.cache[id]
except KeyError:
pass
else:
self.lock.release()
return val
try:
val = self.expiredCache[id]()
except KeyError:
return None
else:
del self.expiredCache[id]
if val is None:
return None
self.cache[id] = val
self.lock.release()
return val
else:
try:
val = self.expiredCache[id]()
if val is not None:
return val
except KeyError:
pass
self.lock.acquire()
try:
val = self.expiredCache[id]()
except KeyError:
return None
else:
if val is None:
del self.expiredCache[id]
return None
self.lock.release()
return val
def put(self, id, obj):
if self.doCache:
self.cache[id] = obj
else:
self.expiredCache[id] = ref(obj)
self.lock.release()
def created(self, id, obj):
if self.doCache:
self.cache[id] = obj
else:
self.expiredCache[id] = ref(obj)
def expire(self):
self.lock.acquire()
keys = self.cache.keys()
for i in xrange(self.expireOffset, len(keys), self.expireFraction):
id = keys[i]
self.expiredCache[id] = ref(self.cache[id])
del self.cache[id]
# This offset tries to balance out which objects we expire, so
# no object will just hang out in the cache forever.
self.expireOffset = (self.expiredOffset + 1) % self.expireFraction
self.lock.release()
class CacheSet(object):
def __init__(self, *args, **kw):
self.caches = {}
self.args = args
self.kw = kw
def get(self, id, cls):
try:
return self.caches[cls.__name__].get(id)
except KeyError:
self.caches[cls.__name__] = CacheFactory(*self.args, **self.kw)
return self.caches[cls.__name__].get(id)
def put(self, id, cls, obj):
self.caches[cls.__name__].put(id, obj)
def created(self, id, cls, obj):
try:
self.caches[cls.__name__].created(id, obj)
except KeyError:
self.caches[cls.__name__] = CacheFactory(*self.args, **self.kw)
self.caches[cls.__name__].created(id, obj)
Index: DBConnection.py
===================================================================
RCS file: /cvsroot/sqlobject/SQLObject/SQLObject/DBConnection.py,v
retrieving revision 1.6
retrieving revision 1.7
diff -C2 -d -r1.6 -r1.7
*** DBConnection.py 14 Mar 2003 03:52:01 -0000 1.6
--- DBConnection.py 14 Mar 2003 08:59:24 -0000 1.7
***************
*** 3,6 ****
--- 3,7 ----
import threading
import SQLBuilder
+ from Cache import CacheSet
try:
***************
*** 26,30 ****
class DBConnection:
! def __init__(self, name=None, debug=False):
if name:
assert not _connections.has_key(name), 'A database by the name %s has already been created: %s' % (name, _connections[name])
--- 27,31 ----
class DBConnection:
! def __init__(self, name=None, debug=False, cache=True):
if name:
assert not _connections.has_key(name), 'A database by the name %s has already been created: %s' % (name, _connections[name])
***************
*** 32,37 ****
self.name = name
self.debug = debug
! self.instanceCache = {}
! self.instanceCacheLock = threading.Lock()
--- 33,37 ----
self.name = name
self.debug = debug
! self.cache = CacheSet(cache=cache)
Index: SQLObject.py
===================================================================
RCS file: /cvsroot/sqlobject/SQLObject/SQLObject/SQLObject.py,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -d -r1.8 -r1.9
*** SQLObject.py 14 Mar 2003 03:52:01 -0000 1.8
--- SQLObject.py 14 Mar 2003 08:59:24 -0000 1.9
***************
*** 409,447 ****
id = int(id)
- # If there is no specific connection given (so we don't
- # care about transactions), we cache instances for the
- # entire class, and we use the class cache:
if connection is None:
! cache = cls._SO_instanceCache
! lock = cls._SO_instanceCacheLock
! # Otherwise we get the cache that belongs to the
! # connection:
else:
! cache = connection.instanceCache.setdefault(cls.__name__, {})
! lock = connection.instanceCacheLock
! # We're optimistic that we find a cached instance, if
! # so we can return it right away:
! if cache.has_key(id):
! return cache[id]
! else:
! # But if now we need to make a lock to make sure
! # no other threads make an instance while we're
! # making one:
! lock.acquire()
! # And just in case one has already finished in
! # the short time it took us to get here:
! if cache.has_key(id):
! lock.release()
! return cache[id]
! # Create the object, put it in the cache:
! inst = object.__new__(cls, id, connection)
! cache[id] = inst
! # Use the _init function -- __init__ can't be
! # used, since it's called even when we return an
! # already-created instance from the cache:
! inst._init(id, connection)
! lock.release()
! return inst
def _init(self, id, connection=None):
--- 409,423 ----
id = int(id)
if connection is None:
! cache = cls._connection.cache
else:
! cache = connection.cache
! val = cache.get(id, cls)
! if val is None:
! val = object.__new__(cls, id, connection)
! val._init(id, connection)
! cache.put(id, cls, val)
! return val
def _init(self, id, connection=None):
***************
*** 635,655 ****
del self._SO_createValues
del self._SO_creating
! if self._SO_perConnection:
! cache = self._connection.instanceCache.setdefault(self.__class__.__name__, {})
! lock = self._connection.instanceCacheLock
! else:
! cache = self._SO_instanceCache
! lock = self._SO_instanceCacheLock
! # Everyone has to wait while we do the insert, so that a
! # select can't recreate this object during these next
! # moments. But at least we know that this object can't
! # be in the cache yet.
! lock.acquire()
# Do the insert -- most of the SQL in this case is left
# up to DBConnection, since getting a new ID is
# non-standard.
id = self._connection.queryInsertID(self._table, names, values)
! cache[id] = self
! lock.release()
self._init(id)
--- 611,621 ----
del self._SO_createValues
del self._SO_creating
!
# Do the insert -- most of the SQL in this case is left
# up to DBConnection, since getting a new ID is
# non-standard.
id = self._connection.queryInsertID(self._table, names, values)
! cache = self._connection.cache
! cache.created(id, self.__class__, self)
self._init(id)
***************
*** 694,698 ****
cls._connection.createTable(cls)
if createJoinTables:
! self.createJoinTables(ifExists=ifExists)
createTable = classmethod(createTable)
--- 660,664 ----
cls._connection.createTable(cls)
if createJoinTables:
! cls.createJoinTables(ifExists=ifExists)
createTable = classmethod(createTable)
***************
*** 701,705 ****
# but right now it's making tables directly.
result = []
! for join in soClass._joins:
if not join.hasIntermediateTable():
continue
--- 667,671 ----
# but right now it's making tables directly.
result = []
! for join in cls._joins:
if not join.hasIntermediateTable():
continue
***************
*** 711,715 ****
continue
if ifExists and \
! self._connection.tableExists(join.intermediateTable):
continue
self._connection.query(
--- 677,681 ----
continue
if ifExists and \
! cls._connection.tableExists(join.intermediateTable):
continue
self._connection.query(
***************
*** 720,723 ****
--- 686,690 ----
join.otherColumn,
self.joinSQLType(join)))
+ createJoinTables = classmethod(createJoinTables)
def clearTable(cls):
|