Update of /cvsroot/sqlobject/SQLObject/SQLObject
In directory sc8-pr-cvs1:/tmp/cvs-serv12054/SQLObject
Modified Files:
Cache.py DBConnection.py SQLObject.py
Log Message:
Added expiring and syncing. Transactions expire their objects
whenever there's a rollback.
Index: Cache.py
===================================================================
RCS file: /cvsroot/sqlobject/SQLObject/SQLObject/Cache.py,v
retrieving revision 1.8
retrieving revision 1.9
diff -C2 -d -r1.8 -r1.9
*** Cache.py 15 Jul 2003 02:25:31 -0000 1.8
--- Cache.py 7 Sep 2003 07:17:49 -0000 1.9
***************
*** 14,24 ****
"""
! def __init__(self, expireFrequency=100, expireFraction=2,
cache=True):
"""
! Every expireFrequency times that an item is retrieved from
! this cache, the expire method is called.
! The expire method then expires an arbitrary fraction of
the cached objects. The idea is at no time will the cache
be entirely emptied, placing a potentially high load at that
--- 14,24 ----
"""
! def __init__(self, cullFrequency=100, cullFraction=2,
cache=True):
"""
! Every cullFrequency times that an item is retrieved from
! this cache, the cull method is called.
! The cull method then expires an arbitrary fraction of
the cached objects. The idea is at no time will the cache
be entirely emptied, placing a potentially high load at that
***************
*** 35,42 ****
"""
! self.expireFrequency = expireFrequency
! self.expireCount = expireFrequency
! self.expireOffset = 0
! self.expireFraction = expireFraction
self.doCache = cache
--- 35,42 ----
"""
! self.cullFrequency = cullFrequency
! self.cullCount = cullFrequency
! self.cullOffset = 0
! self.cullFraction = cullFraction
self.doCache = cache
***************
*** 46,59 ****
self.lock = threading.Lock()
def get(self, id):
if self.doCache:
! if self.expireCount > self.expireFrequency:
! # Two threads could hit the expire in a row, but
! # that's not so bad. At least by setting expireCount
! # back to zero right away we avoid this. The expire
# method has a lock, so it's threadsafe.
! self.expireCount = 0
! self.expire()
try:
--- 46,62 ----
self.lock = threading.Lock()
+ def tryGet(self, id):
+ return self.cache.get(id)
+
def get(self, id):
if self.doCache:
! if self.cullCount > self.cullFrequency:
! # Two threads could hit the cull in a row, but
! # that's not so bad. At least by setting cullCount
! # back to zero right away we avoid this. The cull
# method has a lock, so it's threadsafe.
! self.cullCount = 0
! self.cull()
try:
***************
*** 115,122 ****
self.expiredCache[id] = ref(obj)
! def expire(self):
self.lock.acquire()
keys = self.cache.keys()
! for i in xrange(self.expireOffset, len(keys), self.expireFraction):
id = keys[i]
self.expiredCache[id] = ref(self.cache[id])
--- 118,125 ----
self.expiredCache[id] = ref(obj)
! def cull(self):
self.lock.acquire()
keys = self.cache.keys()
! for i in xrange(self.cullOffset, len(keys), self.cullFraction):
id = keys[i]
self.expiredCache[id] = ref(self.cache[id])
***************
*** 124,131 ****
# This offset tries to balance out which objects we expire, so
# no object will just hang out in the cache forever.
! self.expireOffset = (self.expiredOffset + 1) % self.expireFraction
self.lock.release()
! def purge(self, id):
self.lock.acquire()
if self.cache.has_key(id):
--- 127,134 ----
# This offset tries to balance out which objects we expire, so
# no object will just hang out in the cache forever.
! self.cullOffset = (self.culldOffset + 1) % self.cullFraction
self.lock.release()
! def expire(self, id):
self.lock.acquire()
if self.cache.has_key(id):
***************
*** 135,139 ****
self.lock.release()
! def clear(self):
self.lock.acquire()
for key, value in self.cache.items():
--- 138,142 ----
self.lock.release()
! def expireAll(self):
self.lock.acquire()
for key, value in self.cache.items():
***************
*** 142,145 ****
--- 145,151 ----
self.lock.release()
+ def allIDs(self):
+ return self.cache.keys()
+
class CacheSet(object):
***************
*** 169,175 ****
self.caches[cls.__name__].created(id, obj)
! def purge(self, id, cls):
try:
! self.caches[cls.__name__].purge(id)
except KeyError:
pass
--- 175,181 ----
self.caches[cls.__name__].created(id, obj)
! def expire(self, id, cls):
try:
! self.caches[cls.__name__].expire(id)
except KeyError:
pass
***************
*** 182,183 ****
--- 188,203 ----
self.caches[cls.__name__].clear()
+ def tryGet(self, id, cls):
+ try:
+ self.caches[cls.__name__].tryGet(id)
+ except KeyError:
+ return None
+
+ def allIDs(self, cls):
+ try:
+ self.caches[cls.__name__].allIDs()
+ except KeyError:
+ return []
+
+ def allSubCaches(self):
+ return self.caches.values()
Index: DBConnection.py
===================================================================
RCS file: /cvsroot/sqlobject/SQLObject/SQLObject/DBConnection.py,v
retrieving revision 1.44
retrieving revision 1.45
diff -C2 -d -r1.44 -r1.45
*** DBConnection.py 7 Sep 2003 00:49:03 -0000 1.44
--- DBConnection.py 7 Sep 2003 07:17:50 -0000 1.45
***************
*** 393,397 ****
--- 393,404 ----
if self._dbConnection.debug:
self._dbConnection.printDebug(self._connection, '', 'ROLLBACK')
+ subCaches = [(sub, sub.allIDs()) for sub in self.cache.allSubCaches()]
self._connection.rollback()
+
+ for subCache, ids in subCaches:
+ for id in ids:
+ inst = subCache.tryGet(id)
+ if inst is not None:
+ inst.expire()
def __getattr__(self, attr):
Index: SQLObject.py
===================================================================
RCS file: /cvsroot/sqlobject/SQLObject/SQLObject/SQLObject.py,v
retrieving revision 1.53
retrieving revision 1.54
diff -C2 -d -r1.53 -r1.54
*** SQLObject.py 7 Sep 2003 07:05:26 -0000 1.53
--- SQLObject.py 7 Sep 2003 07:17:50 -0000 1.54
***************
*** 708,711 ****
--- 708,712 ----
delattr(self, instanceName(column.name))
self._expired = True
+ self._connection.cache.expire(self.id, self.__class__)
self._SO_writeLock.release()
***************
*** 972,976 ****
self._SO_obsolete = True
self._connection._SO_delete(self)
! self._connection.cache.purge(self.id, self.__class__)
def delete(cls, id):
--- 973,977 ----
self._SO_obsolete = True
self._connection._SO_delete(self)
! self._connection.cache.expire(self.id, self.__class__)
def delete(cls, id):
|