bases-devel Mailing List for Bases: Enterprise Application Framework
Status: Alpha
Brought to you by:
joe_steeve
You can subscribe to this list here.
| 2009 |
Jan
|
Feb
(31) |
Mar
|
Apr
|
May
|
Jun
|
Jul
|
Aug
|
Sep
|
Oct
|
Nov
|
Dec
|
|---|
|
From: <joe...@us...> - 2009-02-26 08:21:07
|
Revision: 32
http://bases.svn.sourceforge.net/bases/?rev=32&view=rev
Author: joe_steeve
Date: 2009-02-26 08:21:03 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
stringified the package version in setup.py
* setup.py: package version should be string.
Modified Paths:
--------------
trunk/setup.py
Modified: trunk/setup.py
===================================================================
--- trunk/setup.py 2009-02-26 08:19:47 UTC (rev 31)
+++ trunk/setup.py 2009-02-26 08:21:03 UTC (rev 32)
@@ -35,7 +35,7 @@
setup(
name = "bases",
- version = 0.1,
+ version = "0.1",
url = 'https://forge.hipro.co.in/projects/bases/',
author = 'HiPro Team',
author_email = 'bas...@li...',
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:19:51
|
Revision: 31
http://bases.svn.sourceforge.net/bases/?rev=31&view=rev
Author: joe_steeve
Date: 2009-02-26 08:19:47 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
type code collision in serialization code (fixed)
* bases/thirdparty/jecode.py (INT_POS_FIXED_START): This should
start from 32 and not 30. '30' collides with the 'type
characters'.
Modified Paths:
--------------
trunk/bases/thirdparty/jecode.py
Modified: trunk/bases/thirdparty/jecode.py
===================================================================
--- trunk/bases/thirdparty/jecode.py 2009-02-26 08:18:31 UTC (rev 30)
+++ trunk/bases/thirdparty/jecode.py 2009-02-26 08:19:47 UTC (rev 31)
@@ -55,7 +55,7 @@
TYPE_TERM = chr(31)
# Positive integers with value embedded in typecode.
-INT_POS_FIXED_START = 30
+INT_POS_FIXED_START = 32
INT_POS_FIXED_END = 63
INT_POS_FIXED_COUNT = 32
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:18:36
|
Revision: 30
http://bases.svn.sourceforge.net/bases/?rev=30&view=rev
Author: joe_steeve
Date: 2009-02-26 08:18:31 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Added more debug logs for call tracking
* bases/core/transports/bbtrans.py (_BBTransProtocol): bumped logs
from level 3 to level 7.
* bases/core/broker.py (BasesBroker): added log at level 3 to
tracking incoming calls and outgoing responses.
Modified Paths:
--------------
trunk/bases/core/broker.py
trunk/bases/core/transports/bbtrans.py
Modified: trunk/bases/core/broker.py
===================================================================
--- trunk/bases/core/broker.py 2009-02-26 08:17:06 UTC (rev 29)
+++ trunk/bases/core/broker.py 2009-02-26 08:18:31 UTC (rev 30)
@@ -8,6 +8,7 @@
from datetime import datetime
import os
+from twisted.python import log
from twisted.internet import defer, error, protocol
from bases.core.transports import transports
from bases.errors import *
@@ -119,7 +120,7 @@
@param trans: a bases-transport object that has called us
"""
-
+
if dmesg[BASES_MSG_MSGTYPE] == BASES_METHOD_CALL:
self._handleInMethodCall(dmesg,trans)
elif dmesg[BASES_MSG_MSGTYPE] == BASES_METHOD_RESPONSE:
@@ -168,6 +169,9 @@
o = self.ObjRepo.getObject(dmesg[BASES_MSG_OBJID])
mctxt = {'brokerMsgID':mid, 'brokerOwnerID':str(id(trans)),
'basesService':trans.factory.basesService}
+ log.msg(3, "incoming call: (%s).%s (%s, %s)" % \
+ (dmesg[BASES_MSG_OBJID], dmesg[BASES_MSG_METHODNAME],
+ dmesg[BASES_MSG_ARGS], dmesg[BASES_MSG_KWARGS]))
d = o.callMethod(mctxt, dmesg[BASES_MSG_METHODNAME],
dmesg[BASES_MSG_ARGS], dmesg[BASES_MSG_KWARGS])
except:
@@ -193,6 +197,7 @@
try:
MsgID, trans, ts = self.in_MethodCalls[mid]
+ log.msg(3, "response: %s" % (response))
dmesg = [BASES_METHOD_RESPONSE, MsgID, response]
try:
trans.queueMsg(dmesg)
@@ -219,8 +224,10 @@
MsgID, trans, ts = self.in_MethodCalls[mid]
dmesg = None
if response.type == BasesAppError:
+ log.msg(3, "app-err-response: %s" % (response.value.message))
dmesg = [BASES_METHOD_AERESPONSE, MsgID, response.value.message]
else:
+ log.msg(3, "err-response: %s" % (response))
dmesg = [BASES_METHOD_ERESPONSE, MsgID, str(response)]
try:
trans.queueMsg(dmesg)
@@ -334,7 +341,7 @@
I should be called when a transport is dying to clear out its
references and do cleanup job.
"""
- print "Transport cleanup", trans
+ log.msg(4,"Transport cleanup %s" % (str(trans)))
if id(trans) in self.in_Transports:
# Should clear up all in-coming related book-keeping
mlist = self.in_Transports[id(trans)]
Modified: trunk/bases/core/transports/bbtrans.py
===================================================================
--- trunk/bases/core/transports/bbtrans.py 2009-02-26 08:17:06 UTC (rev 29)
+++ trunk/bases/core/transports/bbtrans.py 2009-02-26 08:18:31 UTC (rev 30)
@@ -93,22 +93,22 @@
# Parse the stream and break it into messages
while bytes_left != 0:
if self._Msg_TransitState == STATE_GET_HOLA:
- log.msg(3, "_Msg_TransitState == STATE_GET_HOLA")
+ log.msg(7, "_Msg_TransitState == STATE_GET_HOLA")
pos = self._handleHola(data,pos)
elif self._Msg_TransitState == STATE_GET_MSG:
- log.msg(3, "_Msg_TransitState == STATE_GET_MSG")
+ log.msg(7, "_Msg_TransitState == STATE_GET_MSG")
pos = self._handleMsg(data,pos)
elif self._Msg_TransitState == STATE_GET_INIT:
- log.msg(3, "_Msg_TransitState == STATE_GET_INIT")
+ log.msg(7, "_Msg_TransitState == STATE_GET_INIT")
pos = self._handleInit(data,pos)
elif self._Msg_TransitState == STATE_GET_INITRESPONSE:
- log.msg(3, "_Msg_TransitState == STATE_GET_INITRESPONSE")
+ log.msg(7, "_Msg_TransitState == STATE_GET_INITRESPONSE")
pos = self._handleInitR(data,pos)
else:
log.msg(1, "ERROR: Undefined state = %d" % \
(self._Msg_TransitState))
bytes_left = data_len - pos
- log.msg(3,"processed %d bytes, left %d" % (pos, bytes_left))
+ log.msg(7,"processed %d bytes, left %d" % (pos, bytes_left))
# Process the messages that we have recieved.
@@ -119,11 +119,11 @@
self.factory.basesBroker.processInMessage(dmsg, self)
self._ErrTolerance = 0
except Exception, e:
- log.msg(3, "Exception while processing _InMsgQueue")
- log.msg(3, str(e))
+ log.msg(7, "Exception while processing _InMsgQueue")
+ log.msg(7, str(e))
if self.Debug is True:
s = traceback.format_exc()
- log.msg(3, s)
+ log.msg(7, s)
self._ErrTolerance += 1
if self._ErrTolerance > BBTRANS_ERROR_TOLERANCE:
@@ -131,11 +131,11 @@
self.transport.loseConnection()
except Exception, e:
- log.msg(3, "Exception in DataRecieved")
- log.msg(3, str(e))
+ log.msg(7, "Exception in DataRecieved")
+ log.msg(7, str(e))
if self.Debug is True:
s = traceback.format_exc()
- log.msg(3, s)
+ log.msg(7, s)
self.transport.loseConnection()
@@ -162,7 +162,7 @@
@return tuple(int, int): A tuple of (length, type). (1) length
of the incoming message and (2) type of the incoming message"""
- log.msg(3, "Checking for hOla")
+ log.msg(7, "Checking for hOla")
try:
hola,length,msgtype = struct.unpack(BBTRANS_HOLA_FMT,data)
except:
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:17:10
|
Revision: 29
http://bases.svn.sourceforge.net/bases/?rev=29&view=rev
Author: joe_steeve
Date: 2009-02-26 08:17:06 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
cleaned up the usage of .has_key in bases-server
* src/bases-server: changed key checking in dict from .has_key to
'in' clause.
Modified Paths:
--------------
trunk/src/bases-server
Modified: trunk/src/bases-server
===================================================================
--- trunk/src/bases-server 2009-02-26 08:15:56 UTC (rev 28)
+++ trunk/src/bases-server 2009-02-26 08:17:06 UTC (rev 29)
@@ -136,7 +136,7 @@
conf['globals.local_locations']=[conf['globals.location']]
conf['services'] = conf['services']
- if conf.has_key('globals.manhole'):
+ if 'globals.manhole' in conf:
conf['globals.manhole'] = bool(conf['globals.manhole'])
conf['globals.manhole_ip'] = conf['globals.manhole_ip']
conf['globals.manhole_port'] = int(conf['globals.manhole_port'])
@@ -158,20 +158,20 @@
else:
conf['globals.db_conn_timeout'] = 60
- if conf.has_key('globals.verbosity'):
+ if 'globals.verbosity' in conf:
conf['globals.verbosity'] = int(conf['globals.verbosity'])
else:
conf['globals.verbosity'] = 0
- if conf.has_key('globals.log_keep_days'):
+ if 'globals.log_keep_days' in conf:
conf['globals.log_keep_days'] = int(conf['globals.log_keep_days'])
else:
conf['globals.log_keep_days'] = 10
- if not conf.has_key("globals.name"):
+ if "globals.name" not in conf:
conf['globals.name'] = 'bases-server'
- if not conf.has_key("globals.syslog_prefix"):
+ if "globals.syslog_prefix" not in conf:
conf['globals.syslog_prefix'] = conf['globals.name']
if len(conf['services']) == 0:
@@ -204,16 +204,16 @@
sys.exit(1)
if sd['protocol'] in strans_list:
- if not sd.has_key('sslkey') or \
- not sd.has_key('sslcert'):
+ if 'sslkey' not in sd or \
+ 'sslcert' not in sd:
print "Service:%s,Protocol=%s requires sslkey and sslcert" % \
(sn, sd['protocol'])
sys.exit(1)
- if sd.has_key('verify_peers'):
+ if 'verify_peers' in sd:
sd['verify_peers'] = bool(sd['verify_peers'])
else:
sd['verify_peers'] = False
- if sd['verify_peers'] is True and not sd.has_key('ca'):
+ if sd['verify_peers'] is True and 'ca' not in sd:
print "Service=%s,Protocol=%s requires CA to verify_peers" % \
(sn, sd['protocol'])
sys.exit(1)
@@ -302,7 +302,7 @@
lo.bases_verbosity = conf['globals.verbosity']
log.startLoggingWithObserver(lo.emit, setStdout=True)
else:
- if conf.has_key("globals.log"):
+ if "globals.log" in conf:
d = os.path.dirname(conf["globals.log"])
b = os.path.basename(conf["globals.log"])
if not os.access(d, os.W_OK):
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:16:04
|
Revision: 28
http://bases.svn.sourceforge.net/bases/?rev=28&view=rev
Author: joe_steeve
Date: 2009-02-26 08:15:56 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Removed ChangeLog from VC. We have logs .. duh!
Removed Paths:
-------------
trunk/ChangeLog
Deleted: trunk/ChangeLog
===================================================================
--- trunk/ChangeLog 2009-02-26 08:14:57 UTC (rev 27)
+++ trunk/ChangeLog 2009-02-26 08:15:56 UTC (rev 28)
@@ -1,108 +0,0 @@
-2008-03-23 Joe Steeve <joe...@gm...>
-
- * bases/util/util.py: refactored loadModule to load only a module
- object. Added a loadKlass function to load a Klass object.
-
- * bases/db/pgsql.py: simplified the code a lot. removed the custom
- cursor and transaction objects. Transactions are connection level
- now. At connection time, the type of transaction can be
- chosen. This is simple and stupid and serves the purpose.
- (_getExceptions): db-api2.0 exception classes from the db-adapter
- are sent back to the caller to trap exceptions.
-
- * bases/db/dbi.py: many db-adapters themselvs do connection
- pooling. So does not make sense in us implementing it again. Just
- adds un-necessary code. Removed it. Kept it simple and stupid.
- (createSIConnection): Added to create 'serializable isolated'
- transaction connection.
- (createACConnection): Added to create 'auto commit' transaction
- connection.
-
- * src/bases-server: refactoring bases.dbi to bases.db
-
- * bases/services/objrepo.py (BasesObjectRepository.addObjectToDB):
- modified to retur the oid after adding object to objdb.
-
-2008-03-20 Joe Steeve <joe...@gm...>
-
- * bases/db/dbi.py: removed connection pooling. db adapters should
- do that. only supporting connection leak handling.
-
- * bases/db/pgsql.py: made the code a lot simpler. Just set the
- default transaction level now.
-
-2008-03-17 Joe Steeve <joe...@gm...>
-
- * bases/dbi/dbi.py (cleanupConnectionLeak): added empty function
- to avoid panic
-
- * bases/util/util.py (loadModule): to load classes dynamically
-
- * bases/dbi/pgsql.py: adapter code for pgsql
-
- * bases/dbi/dbi.py (activeCons_lock): added simple db-specific
- connection pooling support.
-
- * src/bases-server (LoadConfig): Added a section for 'hosted app
- specific information'
-
-2008-02-09 Joe Steeve <joe...@gm...>
-
- * src/bases-server (sanitizeConfigOptions): changed the
- arrangement of the 'conf' structure so that we dont have to
- iterate every time we want a service-description
-
-2008-02-03 Joe Steeve <joe...@gm...>
-
- * bases/synclient/services.py (BasesDirectoryClient): inital code
- to behave as a client to the bases-directory-service.
-
- * bases/synclient/transports/transports.py (getTransport): added
- to create a transport object based on the given protocol name.
-
-2008-01-23 Joe Steeve <joe...@gm...>
-
- * src/bases-server (sanitizeConfigOptions): convert config option
- types, put sane defaults, and basic option verification.
- Removed unnecessary casts and exception handling
- stuff. sanitizeConfigOptions makes them un-necessary
-
- * bases/core/transports/transports.py (makeSSLContext): creation
- of ssl context is needed in the broker. So, makes sense here.
- This now uses twisted.internet.ssl.CertificateOptions to create a
- context.
-
-2008-01-22 Joe Steeve <joe...@gm...>
-
- * bases/core/transports/transports.py (basesTransportsMap): add
- this map to keep track for the protocols that we support
-
- * src/bases-server (start_services): made protocol-factory
- creation generic
-
- * bases/services/directory.py (BasesComponentDirectory.__init__):
- the component dir registers with the obj-repo.
-
- * bases/services/objrepo.py (BasesObjectRepository): added most of
- the needed methods. The garbage collection code should be put here.
-
- * bases/core/broker.py (BasesBroker): converted MsgIDs to be
- sha-hash rather than ints.
- (BasesBroker.__init__): Hard constraints that incoming transports
- are different from outgoing transports. A connection drop will
- cause relevant objects to be destroyed.
-
-2008-01-19 Joe Steeve <joe...@gm...>
-
- * bases/core/transports/bbtrans.py (_BBTransProtocol): cleaned up
- console printing with logging. tracebacks are printed only when in
- debug mode.
-
- * src/bases-server: removed Twisted services/application based
- startup
-
-2008-01-18 Joe Steeve <joe...@gm...>
-
- * src/bases-server: multi-level logging, daemonizing, shedding
- privileges, starting multiple services.
-
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:15:02
|
Revision: 27
http://bases.svn.sourceforge.net/bases/?rev=27&view=rev
Author: joe_steeve
Date: 2009-02-26 08:14:57 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
replaced rencode with jecode. jecode has unicode support.
* bases/synclient/transports/bbtrans.py: modified to use jecode
instead of rencode
* bases/core/transports/bbtrans.py: modified to use jecode instead
of rencode
* bases/thirdparty/jecode.py: complete rewrite of rencode with
unicode support and more safety
Modified Paths:
--------------
trunk/bases/core/transports/bbtrans.py
trunk/bases/synclient/transports/bbtrans.py
Added Paths:
-----------
trunk/bases/thirdparty/jecode.py
Modified: trunk/bases/core/transports/bbtrans.py
===================================================================
--- trunk/bases/core/transports/bbtrans.py 2009-02-26 08:13:06 UTC (rev 26)
+++ trunk/bases/core/transports/bbtrans.py 2009-02-26 08:14:57 UTC (rev 27)
@@ -1,6 +1,6 @@
# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
-from bases.thirdparty import rencode
+from bases.thirdparty import jecode
from twisted.internet import protocol
from twisted.python import log
@@ -115,7 +115,7 @@
while len(self._InMsgQueue) != 0:
emsg = self._InMsgQueue.pop(0)
try:
- dmsg = rencode.loads(emsg)
+ dmsg = jecode.loads(emsg)
self.factory.basesBroker.processInMessage(dmsg, self)
self._ErrTolerance = 0
except Exception, e:
@@ -272,7 +272,7 @@
TBD: I should check whether the remote's accepted message
size.
"""
- emsg = rencode.dumps(m)
+ emsg = jecode.dumps(m)
l = len(emsg)
if l > BBTRANS_MAX_MSG_SIZE:
raise BBTransError\
@@ -283,7 +283,7 @@
def sendInit(self):
- emsg = rencode.dumps(BBTRANS_INIT)
+ emsg = jecode.dumps(BBTRANS_INIT)
l = len(emsg)
if l > BBTRANS_MAX_INIT_SIZE:
raise BBTransError\
@@ -298,7 +298,7 @@
other end of the line. I see whether we are ok with it. When
respond is True, I send back a init-response."""
- init = rencode.loads(einit)
+ init = jecode.loads(einit)
# checking the protocol version
if (init['ver_major'], init['ver_minor']) != \
(BBTRANS_VER_MAJOR, BBTRANS_VER_MINOR):
@@ -313,7 +313,7 @@
self._KeepConnectionAlive = init['keepalive']
if respond is True:
- emsg = rencode.dumps(BBTRANS_INIT)
+ emsg = jecode.dumps(BBTRANS_INIT)
l = len(emsg)
if l > BBTRANS_MAX_INIT_SIZE:
raise BBTransError\
Modified: trunk/bases/synclient/transports/bbtrans.py
===================================================================
--- trunk/bases/synclient/transports/bbtrans.py 2009-02-26 08:13:06 UTC (rev 26)
+++ trunk/bases/synclient/transports/bbtrans.py 2009-02-26 08:14:57 UTC (rev 27)
@@ -3,7 +3,7 @@
from datetime import datetime, timedelta
import struct, traceback
-from bases.thirdparty import rencode
+from bases.thirdparty import jecode
from bases.errors import *
# A limit on the maximum length of a bbtrans message.
@@ -80,7 +80,7 @@
@param m: A serializable python object.
"""
- emsg = rencode.dumps(m)
+ emsg = jecode.dumps(m)
l = len(emsg)
if l > BBTRANS_MAX_MSG_SIZE:
raise BBTransError("Object too long=%d to transmit" % (l))
@@ -100,7 +100,7 @@
self.dataReceived(data)
emsg = self._InMsgQueue.pop(0)
- r = rencode.loads(emsg)
+ r = jecode.loads(emsg)
return r
@@ -268,7 +268,7 @@
other end of the line. I see whether we are ok with it. When
respond is True, I send back a init-response."""
- init = rencode.loads(einit)
+ init = jecode.loads(einit)
# checking the protocol version
if (init['ver_major'], init['ver_minor']) != \
(BBTRANS_VER_MAJOR, BBTRANS_VER_MINOR):
@@ -282,7 +282,7 @@
self._KeepConnectionAlive = init['keepalive']
if respond is True:
- emsg = rencode.dumps(BBTRANS_INIT)
+ emsg = jecode.dumps(BBTRANS_INIT)
l = len(emsg)
if l > BBTRANS_MAX_INIT_SIZE:
raise BBTransError("Object too long=%d to transmit" % (l))
Added: trunk/bases/thirdparty/jecode.py
===================================================================
--- trunk/bases/thirdparty/jecode.py (rev 0)
+++ trunk/bases/thirdparty/jecode.py 2009-02-26 08:14:57 UTC (rev 27)
@@ -0,0 +1,760 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
+"""
+jecode -- Web safe object pickling/unpickling.
+"""
+
+__version__ = '1.0.0'
+__all__ = ['dumps', 'loads']
+
+# This module has code derived from Connelly Barnes's rencode. Which
+# is again derived from the bencode module by Petru Paler, et
+# al. However, this code is in no way compatible with both rencode or
+# bencode.
+
+import struct, string
+
+# Number of bits for serialized floats, either 32 or 64.
+FLOAT_BITS = 64
+
+# Maximum length of integer when written as base 10 string. This is
+# used for very large ints (greater than 8 bytes)
+MAX_INT_LENGTH = 64
+
+# Maximum length of a string (1/2/4/8).
+# 1 = 2^8 bytes, 2 = 2^16 bytes, 4 = 2^32 bytes, 8 = 2^64 bytes.
+MAX_STR_LENGTH = 4
+
+# Maximum length of a unicode string encoded as utf-8 (1/2/4/8).
+# 1 = 2^8 bytes, 2 = 2^16 bytes, 4 = 2^32 bytes, 8 = 2^64 bytes.
+MAX_UNI_LENGTH = 4
+
+# Maximum length of a unicode string encoded as utf-8 (1/2/4/8).
+# 1 = 2^8 bytes, 2 = 2^16 bytes, 4 = 2^32 bytes, 8 = 2^64 bytes.
+MAX_LIST_LENGTH = 4
+
+# Maximum length of a unicode string encoded as utf-8 (1/2/4/8).
+# 1 = 2^8 bytes, 2 = 2^16 bytes, 4 = 2^32 bytes, 8 = 2^64 bytes.
+MAX_DICT_LENGTH = 4
+
+
+# The type codes for all supported types. Values 13..30 are reserved.
+TYPE_NONE = chr(0)
+TYPE_INT = chr(1)
+TYPE_INT1 = chr(2)
+TYPE_INT2 = chr(3)
+TYPE_INT4 = chr(4)
+TYPE_INT8 = chr(5)
+TYPE_FLOAT = chr(6)
+TYPE_TRUE = chr(7)
+TYPE_FALSE = chr(8)
+TYPE_STR = chr(9)
+TYPE_UNI = chr(10)
+TYPE_LIST = chr(11)
+TYPE_DICT = chr(12)
+TYPE_TERM = chr(31)
+
+# Positive integers with value embedded in typecode.
+INT_POS_FIXED_START = 30
+INT_POS_FIXED_END = 63
+INT_POS_FIXED_COUNT = 32
+
+# Negative integers with value embedded in typecode.
+INT_NEG_FIXED_START = 64
+INT_NEG_FIXED_END = 95
+INT_NEG_FIXED_COUNT = 32
+
+# Fixed length strings. (plain strings)
+STR_FIXED_START = 96
+STR_FIXED_END = 143
+STR_FIXED_COUNT = 48
+
+# Fixed length unicode strings (the length is the length of the utf-8
+# encoding of the unicode string and not the number of characters in
+# the unicode string)
+UNICODE_FIXED_START = 144
+UNICODE_FIXED_END = 191
+UNICODE_FIXED_COUNT = 48
+
+# Fixed length dictionaries
+DICT_FIXED_START = 192
+DICT_FIXED_END = 223
+DICT_FIXED_COUNT = 32
+
+# Fixed length lists
+LIST_FIXED_START = 224
+LIST_FIXED_END = 255
+LIST_FIXED_COUNT = 32
+
+#### Decode integer types ####
+# <TYPE_INT><integer as base10 string><TYPE_TERM>
+def decode_int(x, f):
+ f += 1
+ newf = x.index(TYPE_TERM, f)
+ if newf - f >= MAX_INT_LENGTH:
+ raise ValueError('overflow')
+ try:
+ n = int(x[f:newf])
+ except (OverflowError, ValueError):
+ n = long(x[f:newf])
+ if x[f] == '-':
+ if x[f + 1] == '0':
+ raise ValueError
+ elif x[f] == '0' and newf != f+1:
+ raise ValueError
+ return (n, newf+1)
+
+# <TYPE_INT1><number packed as 1 byte>
+def decode_intb(x, f):
+ f += 1
+ return (struct.unpack('!b', x[f:f+1])[0], f+1)
+
+# <TYPE_INT2><number packed as 2 bytes>
+def decode_inth(x, f):
+ f += 1
+ return (struct.unpack('!h', x[f:f+2])[0], f+2)
+
+# <TYPE_INT4><number packed as 4 bytes>
+def decode_intl(x, f):
+ f += 1
+ return (struct.unpack('!l', x[f:f+4])[0], f+4)
+
+# <TYPE_INT8><number packed as 8 bytes>
+def decode_intq(x, f):
+ f += 1
+ return (struct.unpack('!q', x[f:f+8])[0], f+8)
+
+
+#### Decode float types ####
+
+# <TYPE_FLOAT><packed float number (4/8 bytes for 32/64 bits respectively)>
+def decode_float(x, f):
+ f += 1
+ if FLOAT_BITS == 32:
+ n = struct.unpack('!f', x[f:f+4])[0]
+ return (n, f+4)
+ elif FLOAT_BITS == 64:
+ n = struct.unpack('!d', x[f:f+8])[0]
+ return (n, f+8)
+ else:
+ raise ValueError
+
+
+#### Decode string types ####
+
+# <TYPE_STR><length of string in 1 byte>
+def decode_string_l1(x, f):
+ e = struct.unpack('!B', x[f+1])[0] + f + 2
+ f += 2
+ return(x[f:e], e)
+
+# <TYPE_STR><length of string in 2 bytes>
+def decode_string_l2(x, f):
+ e = struct.unpack('!H', x[f+1:f+3])[0] + f + 3
+ f += 3
+ return(x[f:e], e)
+
+# <TYPE_STR><length of string in 4 bytes>
+def decode_string_l4(x, f):
+ e = struct.unpack('!L', x[f+1:f+5])[0] + f + 5
+ f += 5
+ return(x[f:e], e)
+
+# <TYPE_STR><length of string in 8 bytes>
+def decode_string_l8(x, f):
+ e = struct.unpack('!Q', x[f+1:f+9])[0] + f + 9
+ f += 9
+ return(x[f:e], e)
+
+
+#### Decode unicode types ####
+
+# <TYPE_UNI><length of utf-8 string in 1 byte>
+def decode_unistring_l1(x, f):
+ e = struct.unpack('!B', x[f+1])[0] + f + 2
+ f += 2
+ return(x[f:e].decode("utf-8"), e)
+
+# <TYPE_UNI><length of utf-8 string in 2 bytes>
+def decode_unistring_l2(x, f):
+ e = struct.unpack('!H', x[f+1:f+3])[0] + f + 3
+ f += 3
+ return(x[f:e].decode("utf-8"), e)
+
+# <TYPE_UNI><length of utf-8 string in 2 bytes>
+def decode_unistring_l4(x, f):
+ e = struct.unpack('!L', x[f+1:f+5])[0] + f + 5
+ f += 5
+ return(x[f:e].decode("utf-8"), e)
+
+# <TYPE_UNI><length of utf-8 string in 2 bytes>
+def decode_unistring_l8(x, f):
+ e = struct.unpack('!Q', x[f+1:f+9])[0] + f + 9
+ f += 9
+ return(x[f:e].decode("utf-8"), e)
+
+
+#### Decode list types ####
+
+# <TYPE_LIST><length as 1 byte><encoded list data>
+def decode_list_l1(x, f):
+ l = struct.unpack('!B', x[f+1])[0]
+ r, f = [], f+2
+ while l > 0:
+ v, f = decode_func[x[f]](x, f)
+ r.append(v)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (tuple(r), f+1)
+
+# <TYPE_LIST><length as 2 bytes><encoded list data>
+def decode_list_l2(x, f):
+ l = struct.unpack('!H', x[f+1:f+3])[0]
+ r, f = [], f+3
+ while l > 0:
+ v, f = decode_func[x[f]](x, f)
+ r.append(v)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (tuple(r), f+1)
+
+# <TYPE_LIST><length as 4 bytes><encoded list data>
+def decode_list_l4(x, f):
+ l = struct.unpack('!L', x[f+1:f+5])[0]
+ r, f = [], f+5
+ while l > 0:
+ v, f = decode_func[x[f]](x, f)
+ r.append(v)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (tuple(r), f+1)
+
+# <TYPE_LIST><length as 8 bytes><encoded list data>
+def decode_list_l8(x, f):
+ l = struct.unpack('!Q', x[f+1:f+9])[0]
+ r, f = [], f+9
+ while l > 0:
+ v, f = decode_func[x[f]](x, f)
+ r.append(v)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (tuple(r), f+1)
+
+
+#### Decode dict types ####
+
+# <TYPE_DICT><length as 1 byte><encoded dict data>
+def decode_dict_l1(x, f):
+ l = struct.unpack('!B', x[f+1])[0]
+ r, f = {}, f+2
+ while l > 0:
+ k, f = decode_func[x[f]](x, f)
+ r[k], f = decode_func[x[f]](x, f)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (r, f + 1)
+
+# <TYPE_DICT><length as 2 bytes><encoded dict data>
+def decode_dict_l2(x, f):
+ l = struct.unpack('!H', x[f+1:f+3])[0]
+ r, f = {}, f+3
+ while l > 0:
+ k, f = decode_func[x[f]](x, f)
+ r[k], f = decode_func[x[f]](x, f)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (r, f + 1)
+
+# <TYPE_DICT><length as 4 bytes><encoded dict data>
+def decode_dict_l4(x, f):
+ l = struct.unpack('!L', x[f+1:f+5])[0]
+ r, f = {}, f+5
+ while l > 0:
+ k, f = decode_func[x[f]](x, f)
+ r[k], f = decode_func[x[f]](x, f)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (r, f + 1)
+
+# <TYPE_DICT><length as 8 bytes><encoded dict data>
+def decode_dict_l8(x, f):
+ l = struct.unpack('!Q', x[f+1:f+9])[0]
+ r, f = {}, f+9
+ while l > 0:
+ k, f = decode_func[x[f]](x, f)
+ r[k], f = decode_func[x[f]](x, f)
+ l = l-1
+ if x[f] != TYPE_TERM:
+ raise ValueError
+ return (r, f + 1)
+
+
+#### Decode booleans ####
+
+# <TYPE_TRUE>
+def decode_true(x, f):
+ return (True, f+1)
+
+# <TYPE_FALSE>
+def decode_false(x, f):
+ return (False, f+1)
+
+#### Decode None ####
+
+# <TYPE_NONE>
+def decode_none(x, f):
+ return (None, f+1)
+
+# To catch bad encoding :)
+def decode_badtype(x, f):
+ raise ValueError ("Bad type " + x[f])
+
+decode_func = {}
+decode_func[TYPE_NONE ] = decode_none
+decode_func[TYPE_INT ] = decode_int
+decode_func[TYPE_INT1 ] = decode_intb
+decode_func[TYPE_INT2 ] = decode_inth
+decode_func[TYPE_INT4 ] = decode_intl
+decode_func[TYPE_INT8 ] = decode_intq
+decode_func[TYPE_FLOAT] = decode_float
+decode_func[TYPE_TRUE ] = decode_true
+decode_func[TYPE_FALSE] = decode_false
+
+if MAX_STR_LENGTH == 1:
+ decode_func[TYPE_STR] = decode_string_l1
+elif MAX_STR_LENGTH == 2:
+ decode_func[TYPE_STR] = decode_string_l2
+elif MAX_STR_LENGTH == 4:
+ decode_func[TYPE_STR] = decode_string_l4
+elif MAX_STR_LENGTH == 8:
+ decode_func[TYPE_STR] = decode_string_l8
+
+if MAX_UNI_LENGTH == 1:
+ decode_func[TYPE_UNI] = decode_unistring_l1
+elif MAX_UNI_LENGTH == 2:
+ decode_func[TYPE_UNI] = decode_unistring_l2
+elif MAX_UNI_LENGTH == 4:
+ decode_func[TYPE_UNI] = decode_unistring_l4
+elif MAX_UNI_LENGTH == 8:
+ decode_func[TYPE_UNI] = decode_unistring_l8
+
+if MAX_LIST_LENGTH == 1:
+ decode_func[TYPE_LIST] = decode_list_l1
+elif MAX_LIST_LENGTH == 2:
+ decode_func[TYPE_LIST] = decode_list_l2
+elif MAX_LIST_LENGTH == 4:
+ decode_func[TYPE_LIST] = decode_list_l4
+elif MAX_LIST_LENGTH == 8:
+ decode_func[TYPE_LIST] = decode_list_l8
+
+if MAX_DICT_LENGTH == 1:
+ decode_func[TYPE_DICT] = decode_dict_l1
+elif MAX_DICT_LENGTH == 2:
+ decode_func[TYPE_DICT] = decode_dict_l2
+elif MAX_DICT_LENGTH == 4:
+ decode_func[TYPE_DICT] = decode_dict_l4
+elif MAX_DICT_LENGTH == 8:
+ decode_func[TYPE_DICT] = decode_dict_l8
+
+# fill the decode_func entries for the fixed-length integer (positive
+# and negative) decoders
+def make_fixed_length_int_decoders():
+ def make_decoder(j):
+ def f(x, f):
+ return (j, f+1)
+ return f
+ for i in range(INT_POS_FIXED_COUNT):
+ decode_func[chr(INT_POS_FIXED_START+i)] = make_decoder(i)
+ for i in range(INT_NEG_FIXED_COUNT):
+ decode_func[chr(INT_NEG_FIXED_START+i)] = make_decoder(-1-i)
+
+make_fixed_length_int_decoders()
+
+
+# fill the decode_func entries for the fixed-length string decoders
+def make_fixed_length_string_decoders():
+ def make_decoder(slen):
+ def f(x, f):
+ return (x[f+1:f+1+slen], f+1+slen)
+ return f
+ for i in range(STR_FIXED_COUNT):
+ decode_func[chr(STR_FIXED_START+i)] = make_decoder(i)
+
+make_fixed_length_string_decoders()
+
+
+# fill the decode_func entries for the fixed-length unicode string
+# decoders
+def make_fixed_length_unistring_decoders():
+ def make_decoder(slen):
+ def f(x, f):
+ return (x[f+1:f+1+slen].decode("utf-8"), f+1+slen)
+ return f
+ for i in range(UNICODE_FIXED_COUNT):
+ decode_func[chr(UNICODE_FIXED_START+i)] = make_decoder(i)
+
+make_fixed_length_unistring_decoders()
+
+
+# fill the decode_func entries for the fixed-length list decoders
+def make_fixed_length_list_decoders():
+ def make_decoder(slen):
+ def f(x, f):
+ r, f = [], f+1
+ for i in range(slen):
+ v, f = decode_func[x[f]](x, f)
+ r.append(v)
+ return (tuple(r), f)
+ return f
+ for i in range(LIST_FIXED_COUNT):
+ decode_func[chr(LIST_FIXED_START+i)] = make_decoder(i)
+
+make_fixed_length_list_decoders()
+
+
+# fill the decode_func entries for the fixed-length dict decoders
+def make_fixed_length_dict_decoders():
+ def make_decoder(slen):
+ def f(x, f):
+ r, f = {}, f+1
+ for j in range(slen):
+ k, f = decode_func[x[f]](x, f)
+ r[k], f = decode_func[x[f]](x, f)
+ return (r, f)
+ return f
+ for i in range(DICT_FIXED_COUNT):
+ decode_func[chr(DICT_FIXED_START+i)] = make_decoder(i)
+
+make_fixed_length_dict_decoders()
+
+
+# Entry point to the decoder
+def loads(x):
+ try:
+ r, l = decode_func[x[0]](x, 0)
+ except (IndexError, KeyError):
+ raise ValueError
+ if l != len(x):
+ raise ValueError
+ return r
+
+
+from types import NoneType, IntType, LongType, FloatType, DictType, ListType
+from types import TupleType, StringType, UnicodeType, BooleanType
+
+# Encode integers
+def encode_int(x, r):
+ # Check if x fits in fixed-positive-integer range
+ if 0 <= x < INT_POS_FIXED_COUNT:
+ r.append(chr(INT_POS_FIXED_START+x))
+ # Check if x fits in fixex-negative-integer range
+ elif -INT_NEG_FIXED_COUNT <= x < 0:
+ r.append(chr(INT_NEG_FIXED_START-1-x))
+ # Check if x can fit in 1 byte
+ elif -128 <= x < 128:
+ r.extend((TYPE_INT1, struct.pack('!b', x)))
+ # Check if x can fit in 2 bytes
+ elif -32768 <= x < 32768:
+ r.extend((TYPE_INT2, struct.pack('!h', x)))
+ # Check if x can fit in 4 bytes
+ elif -2147483648 <= x < 2147483648:
+ r.extend((TYPE_INT4, struct.pack('!l', x)))
+ # Check if x can fit in 8 bytes
+ elif -9223372036854775808 <= x < 9223372036854775808:
+ r.extend((TYPE_INT8, struct.pack('!q', x)))
+ else:
+ # x is very long. Should be encoded as a base10 string
+ s = str(x)
+ if len(s) >= MAX_INT_LENGTH:
+ raise ValueError('overflow')
+ r.extend((TYPE_INT, s, TYPE_TERM))
+
+# Encode floats
+def encode_float(x, r):
+ if FLOAT_BITS == 32:
+ r.extend((TYPE_FLOAT, struct.pack('!f', x)))
+ elif FLOAT_BITS == 64:
+ r.extend((TYPE_FLOAT, struct.pack('!d', x)))
+ else:
+ raise ValueError
+
+# Encode booleans
+def encode_bool(x, r):
+ if x is True:
+ r.extend(TYPE_TRUE)
+ else:
+ r.extend(TYPE_FALSE)
+
+# Encode None
+def encode_none(x, r):
+ r.extend(TYPE_NONE)
+
+
+# Encode strings with 1-byte length
+def encode_string_l1(x, r):
+ if len(x) < STR_FIXED_COUNT:
+ r.extend((chr(STR_FIXED_START + len(x)), x))
+ else:
+ r.extend((TYPE_STR, struct.pack('!B', len(x)), x))
+# Encode strings with 2-byte length
+def encode_string_l2(x, r):
+ if len(x) < STR_FIXED_COUNT:
+ r.extend((chr(STR_FIXED_START + len(x)), x))
+ else:
+ r.extend((TYPE_STR, struct.pack('!H', len(x)), x))
+# Encode strings with 4-byte length
+def encode_string_l4(x, r):
+ if len(x) < STR_FIXED_COUNT:
+ r.extend((chr(STR_FIXED_START + len(x)), x))
+ else:
+ r.extend((TYPE_STR, struct.pack('!L', len(x)), x))
+# Encode strings with 8-byte length
+def encode_string_l8(x, r):
+ if len(x) < STR_FIXED_COUNT:
+ r.extend((chr(STR_FIXED_START + len(x)), x))
+ else:
+ r.extend((TYPE_STR, struct.pack('!Q', len(x)), x))
+
+
+# Encode unicode strings with 1-byte length
+def encode_unistring_l1(x, r):
+ u = x.encode("utf-8")
+ if len(u) < UNICODE_FIXED_COUNT:
+ r.extend((chr(UNICODE_FIXED_START + len(u)), u))
+ else:
+ r.extend((TYPE_UNI, struct.pack('!B', len(u)), u))
+# Encode unicode strings with 2-byte length
+def encode_unistring_l2(x, r):
+ u = x.encode("utf-8")
+ if len(u) < UNICODE_FIXED_COUNT:
+ r.extend((chr(UNICODE_FIXED_START + len(u)), u))
+ else:
+ r.extend((TYPE_UNI, struct.pack('!H', len(u)), u))
+# Encode unicode strings with 4-byte length
+def encode_unistring_l4(x, r):
+ u = x.encode("utf-8")
+ if len(u) < UNICODE_FIXED_COUNT:
+ r.extend((chr(UNICODE_FIXED_START + len(u)), u))
+ else:
+ r.extend((TYPE_UNI, struct.pack('!L', len(u)), u))
+# Encode unicode strings with 8-byte length
+def encode_unistring_l8(x, r):
+ u = x.encode("utf-8")
+ if len(u) < UNICODE_FIXED_COUNT:
+ r.extend((chr(UNICODE_FIXED_START + len(u)), u))
+ else:
+ r.extend((TYPE_UNI, struct.pack('!Q', len(u)), u))
+
+
+# Encode lists with 1-byte length
+def encode_list_l1(x, r):
+ if len(x) < LIST_FIXED_COUNT:
+ r.append(chr(LIST_FIXED_START + len(x)))
+ for i in x:
+ encode_func[type(i)](i, r)
+ else:
+ r.extend((TYPE_LIST, struct.pack('!B', len(x))))
+ for i in x:
+ encode_func[type(i)](i, r)
+ r.append(TYPE_TERM)
+# Encode lists with 2-byte length
+def encode_list_l2(x, r):
+ if len(x) < LIST_FIXED_COUNT:
+ r.append(chr(LIST_FIXED_START + len(x)))
+ for i in x:
+ encode_func[type(i)](i, r)
+ else:
+ r.extend((TYPE_LIST, struct.pack('!H', len(x))))
+ for i in x:
+ encode_func[type(i)](i, r)
+ r.append(TYPE_TERM)
+# Encode lists with 4-byte length
+def encode_list_l4(x, r):
+ if len(x) < LIST_FIXED_COUNT:
+ r.append(chr(LIST_FIXED_START + len(x)))
+ for i in x:
+ encode_func[type(i)](i, r)
+ else:
+ r.extend((TYPE_LIST, struct.pack('!L', len(x))))
+ for i in x:
+ encode_func[type(i)](i, r)
+ r.append(TYPE_TERM)
+# Encode lists with 8-byte length
+def encode_list_l8(x, r):
+ if len(x) < LIST_FIXED_COUNT:
+ r.append(chr(LIST_FIXED_START + len(x)))
+ for i in x:
+ encode_func[type(i)](i, r)
+ else:
+ r.extend((TYPE_LIST, struct.pack('!Q', len(x))))
+ for i in x:
+ encode_func[type(i)](i, r)
+ r.append(TYPE_TERM)
+
+
+# Encode dict with 1-byte length
+def encode_dict_l1(x,r):
+ if len(x) < DICT_FIXED_COUNT:
+ r.append(chr(DICT_FIXED_START + len(x)))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ else:
+ r.extend((TYPE_DICT, struct.pack('!B', len(x))))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ r.append(TYPE_TERM)
+# Encode dict with 2-byte length
+def encode_dict_l2(x,r):
+ if len(x) < DICT_FIXED_COUNT:
+ r.append(chr(DICT_FIXED_START + len(x)))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ else:
+ r.extend((TYPE_DICT, struct.pack('!H', len(x))))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ r.append(TYPE_TERM)
+# Encode dict with 4-byte length
+def encode_dict_l4(x,r):
+ if len(x) < DICT_FIXED_COUNT:
+ r.append(chr(DICT_FIXED_START + len(x)))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ else:
+ r.extend((TYPE_DICT, struct.pack('!L', len(x))))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ r.append(TYPE_TERM)
+# Encode dict with 8-byte length
+def encode_dict_l8(x,r):
+ if len(x) < DICT_FIXED_COUNT:
+ r.append(chr(DICT_FIXED_START + len(x)))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ else:
+ r.extend((TYPE_DICT, struct.pack('!Q', len(x))))
+ for k, v in x.items():
+ encode_func[type(k)](k, r)
+ encode_func[type(v)](v, r)
+ r.append(TYPE_TERM)
+
+
+encode_func = {}
+encode_func[IntType] = encode_int
+encode_func[LongType] = encode_int
+encode_func[FloatType] = encode_float
+encode_func[NoneType] = encode_none
+encode_func[BooleanType] = encode_bool
+
+if MAX_STR_LENGTH == 1:
+ encode_func[StringType] = encode_string_l1
+elif MAX_STR_LENGTH == 2:
+ encode_func[StringType] = encode_string_l2
+elif MAX_STR_LENGTH == 4:
+ encode_func[StringType] = encode_string_l4
+elif MAX_STR_LENGTH == 8:
+ encode_func[StringType] = encode_string_l8
+
+if MAX_UNI_LENGTH == 1:
+ encode_func[UnicodeType] = encode_unistring_l1
+elif MAX_UNI_LENGTH == 2:
+ encode_func[UnicodeType] = encode_unistring_l2
+elif MAX_UNI_LENGTH == 4:
+ encode_func[UnicodeType] = encode_unistring_l4
+elif MAX_UNI_LENGTH == 8:
+ encode_func[UnicodeType] = encode_unistring_l8
+
+if MAX_LIST_LENGTH == 1:
+ encode_func[ListType] = encode_list_l1
+elif MAX_LIST_LENGTH == 2:
+ encode_func[ListType] = encode_list_l2
+elif MAX_LIST_LENGTH == 4:
+ encode_func[ListType] = encode_list_l4
+elif MAX_LIST_LENGTH == 8:
+ encode_func[ListType] = encode_list_l8
+
+if MAX_LIST_LENGTH == 1:
+ encode_func[TupleType] = encode_list_l1
+elif MAX_LIST_LENGTH == 2:
+ encode_func[TupleType] = encode_list_l2
+elif MAX_LIST_LENGTH == 4:
+ encode_func[TupleType] = encode_list_l4
+elif MAX_LIST_LENGTH == 8:
+ encode_func[TupleType] = encode_list_l8
+
+if MAX_DICT_LENGTH == 1:
+ encode_func[DictType] = encode_dict_l1
+elif MAX_DICT_LENGTH == 2:
+ encode_func[DictType] = encode_dict_l2
+elif MAX_DICT_LENGTH == 4:
+ encode_func[DictType] = encode_dict_l4
+elif MAX_DICT_LENGTH == 8:
+ encode_func[DictType] = encode_dict_l8
+
+
+# Entry point for the encoder
+def dumps(x):
+ r = []
+ encode_func[type(x)](x, r)
+ return ''.join(r)
+
+
+def test():
+ f1 = struct.unpack('!f', struct.pack('!f', 25.5))[0]
+ f2 = struct.unpack('!f', struct.pack('!f', 29.3))[0]
+ f3 = struct.unpack('!f', struct.pack('!f', -0.6))[0]
+ L = (({'a':15, 'bb':f1, 'ccc':f2, '':(f3,(),False,True,'')},('a',10**20),tuple(range(-100000,100000)),'b'*31,'b'*62,'b'*64,2**30,2**33,2**62,2**64,2**30,2**33,2**62,2**64,False,False, True, -1, 2, 0),)
+ assert loads(dumps(L)) == L
+
+ d = dict(zip(range(-100000,100000),range(-100000,100000)))
+ d.update({'a':20, 20:40, 40:41, f1:f2, f2:f3, f3:False, False:True,
+ True:False})
+ L = (d, {}, {5:6}, {7:7,True:8}, {9:10, 22:39, 49:50, 44: ''})
+ assert loads(dumps(L)) == L
+
+ L = ('', 'a'*10, 'a'*100, 'a'*1000, 'a'*10000, 'a'*100000, 'a'*1000000, \
+ 'a'*10000000)
+ assert loads(dumps(L)) == L
+
+ L = (u'', u'a'*10, u'a'*100, u'a'*1000, u'a'*10000, u'a'*100000, \
+ u'a'*1000000, u'a'*10000000)
+ assert loads(dumps(L)) == L
+
+ L = tuple([dict(zip(range(n),range(n))) for n in range(100)]) + ('b',)
+ assert loads(dumps(L)) == L
+ L = tuple([dict(zip(range(n),range(-n,0))) for n in range(100)]) + ('b',)
+ assert loads(dumps(L)) == L
+ L = tuple([tuple(range(n)) for n in range(100)]) + ('b',)
+ assert loads(dumps(L)) == L
+ L = tuple(['a'*n for n in range(1000)]) + ('b',)
+ assert loads(dumps(L)) == L
+ L = tuple(['a'*n for n in range(1000)]) + (None,True,None)
+ assert loads(dumps(L)) == L
+ assert loads(dumps(None)) == None
+ assert loads(dumps({None:None})) == {None:None}
+
+
+try:
+ import psyco
+ psyco.bind(dumps)
+ psyco.bind(loads)
+except ImportError:
+ pass
+
+
+if __name__ == '__main__':
+ test()
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:13:11
|
Revision: 26
http://bases.svn.sourceforge.net/bases/?rev=26&view=rev
Author: joe_steeve
Date: 2009-02-26 08:13:06 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Fixed author details
Modified Paths:
--------------
trunk/setup.py
Modified: trunk/setup.py
===================================================================
--- trunk/setup.py 2009-02-26 08:12:02 UTC (rev 25)
+++ trunk/setup.py 2009-02-26 08:13:06 UTC (rev 26)
@@ -36,9 +36,9 @@
setup(
name = "bases",
version = 0.1,
- url = 'http://https://forge.hipro.co.in/projects/bases/',
- author = 'HiPro team',
- author_email = 'jo...@hi...',
+ url = 'https://forge.hipro.co.in/projects/bases/',
+ author = 'HiPro Team',
+ author_email = 'bas...@li...',
description = 'Bases framework is a pythonic application development platform for n-tier enterprise applications.',
packages = packages,
data_files = data_files,
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:12:06
|
Revision: 25
http://bases.svn.sourceforge.net/bases/?rev=25&view=rev
Author: joe_steeve
Date: 2009-02-26 08:12:02 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
import elementree from alternate location for python2.5
Modified Paths:
--------------
trunk/bases/core/component.py
Modified: trunk/bases/core/component.py
===================================================================
--- trunk/bases/core/component.py 2009-02-26 08:10:43 UTC (rev 24)
+++ trunk/bases/core/component.py 2009-02-26 08:12:02 UTC (rev 25)
@@ -1,6 +1,10 @@
# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
-import elementtree.ElementTree as ET
+try:
+ import elementtree.ElementTree as ET
+except:
+ import xml.etree.ElementTree as ET
+
from bases.errors import *
BASES_STATELESS_COMPONENT = 1
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:10:47
|
Revision: 24
http://bases.svn.sourceforge.net/bases/?rev=24&view=rev
Author: joe_steeve
Date: 2009-02-26 08:10:43 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Added support to handle hosted app specific exceptions (BasesAppError)
* broker.py, synclient/broker.py: Added a new MsgType,
BASES_METHOD_AERESPONSE. This is to segregate between
exceptions raised explicitly by the hosted application from
other stray exceptions.
* errors.py: Added an exception class 'BasesAppError' to be used
by hosted applications to send error conditions across to the
wire to the other side.
Modified Paths:
--------------
trunk/bases/core/broker.py
trunk/bases/errors.py
trunk/bases/synclient/broker.py
Modified: trunk/bases/core/broker.py
===================================================================
--- trunk/bases/core/broker.py 2009-02-26 08:09:06 UTC (rev 23)
+++ trunk/bases/core/broker.py 2009-02-26 08:10:43 UTC (rev 24)
@@ -34,8 +34,8 @@
BASES_METHOD_CALL = 1
BASES_METHOD_RESPONSE = 2
BASES_METHOD_ERESPONSE = 3
+BASES_METHOD_AERESPONSE = 4
-
class BasesBrokerError(Exception):
pass
@@ -217,7 +217,11 @@
print "ErrorBack: _handleInMethodErrResponse(%s, %s)" % (response,mid)
try:
MsgID, trans, ts = self.in_MethodCalls[mid]
- dmesg = [BASES_METHOD_ERESPONSE, MsgID, str(response)]
+ dmesg = None
+ if response.type == BasesAppError:
+ dmesg = [BASES_METHOD_AERESPONSE, MsgID, response.value.message]
+ else:
+ dmesg = [BASES_METHOD_ERESPONSE, MsgID, str(response)]
try:
trans.queueMsg(dmesg)
finally:
Modified: trunk/bases/errors.py
===================================================================
--- trunk/bases/errors.py 2009-02-26 08:09:06 UTC (rev 23)
+++ trunk/bases/errors.py 2009-02-26 08:10:43 UTC (rev 24)
@@ -31,3 +31,9 @@
class BasesDBITransactionFailure(BasesError):
pass
+
+class BasesAppError(Exception):
+ def __init__(self, message):
+ self.message = message
+ def __str__(self):
+ return repr(self.message)
Modified: trunk/bases/synclient/broker.py
===================================================================
--- trunk/bases/synclient/broker.py 2009-02-26 08:09:06 UTC (rev 23)
+++ trunk/bases/synclient/broker.py 2009-02-26 08:10:43 UTC (rev 24)
@@ -26,6 +26,7 @@
BASES_METHOD_CALL = 1
BASES_METHOD_RESPONSE = 2
BASES_METHOD_ERESPONSE = 3
+BASES_METHOD_AERESPONSE = 4
class BasesBroker:
def __init__(self,Debug=False):
@@ -73,6 +74,8 @@
% (r[BASES_MSG_MSGID]))
if r[BASES_MSG_MSGTYPE] == BASES_METHOD_ERESPONSE:
raise BasesComponentError(str(r[BASES_MSG_RESPONSE]))
+ if r[BASES_MSG_MSGTYPE] == BASES_METHOD_AERESPONSE:
+ raise BasesAppError(r[BASES_MSG_RESPONSE])
except:
t.conn.close()
del self.out_transports[location]
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:09:15
|
Revision: 23
http://bases.svn.sourceforge.net/bases/?rev=23&view=rev
Author: joe_steeve
Date: 2009-02-26 08:09:06 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
db.dbi has been made simpler and stupider. db.* fairly working now.
* bases/util/util.py: refactored loadModule to load only a module
object. Added a loadKlass function to load a Klass object.
* bases/db/pgsql.py: simplified the code a lot. removed the custom
cursor and transaction objects. Transactions are connection level
now. At connection time, the type of transaction can be
chosen. This is simple and stupid and serves the
purpose.
(_getExceptions): db-api2.0 exception classes from the db-adapter
are sent back to the caller to trap exceptions.
* bases/db/dbi.py: many db-adapters themselvs do connection
pooling. So does not make sense in us implementing it again. Just
adds un-necessary code. Removed it. Kept it simple and
stupid.
(createSIConnection): Added to create 'serializable isolated'
transaction connection.
(createACConnection): Added to create 'auto commit' transaction
connection.
* src/bases-server: refactoring bases.dbi to bases.db
* bases/services/objrepo.py (BasesObjectRepository.addObjectToDB):
modified to return the oid after adding object to objdb.
Modified Paths:
--------------
trunk/ChangeLog
trunk/bases/db/dbi.py
trunk/bases/db/pgsql.py
trunk/bases/services/directory.py
trunk/bases/services/objrepo.py
trunk/bases/synclient/services.py
trunk/bases/util/util.py
trunk/src/bases-server
Modified: trunk/ChangeLog
===================================================================
--- trunk/ChangeLog 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/ChangeLog 2009-02-26 08:09:06 UTC (rev 23)
@@ -1,3 +1,36 @@
+2008-03-23 Joe Steeve <joe...@gm...>
+
+ * bases/util/util.py: refactored loadModule to load only a module
+ object. Added a loadKlass function to load a Klass object.
+
+ * bases/db/pgsql.py: simplified the code a lot. removed the custom
+ cursor and transaction objects. Transactions are connection level
+ now. At connection time, the type of transaction can be
+ chosen. This is simple and stupid and serves the purpose.
+ (_getExceptions): db-api2.0 exception classes from the db-adapter
+ are sent back to the caller to trap exceptions.
+
+ * bases/db/dbi.py: many db-adapters themselvs do connection
+ pooling. So does not make sense in us implementing it again. Just
+ adds un-necessary code. Removed it. Kept it simple and stupid.
+ (createSIConnection): Added to create 'serializable isolated'
+ transaction connection.
+ (createACConnection): Added to create 'auto commit' transaction
+ connection.
+
+ * src/bases-server: refactoring bases.dbi to bases.db
+
+ * bases/services/objrepo.py (BasesObjectRepository.addObjectToDB):
+ modified to retur the oid after adding object to objdb.
+
+2008-03-20 Joe Steeve <joe...@gm...>
+
+ * bases/db/dbi.py: removed connection pooling. db adapters should
+ do that. only supporting connection leak handling.
+
+ * bases/db/pgsql.py: made the code a lot simpler. Just set the
+ default transaction level now.
+
2008-03-17 Joe Steeve <joe...@gm...>
* bases/dbi/dbi.py (cleanupConnectionLeak): added empty function
Modified: trunk/bases/db/dbi.py
===================================================================
--- trunk/bases/db/dbi.py 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/bases/db/dbi.py 2009-02-26 08:09:06 UTC (rev 23)
@@ -2,105 +2,66 @@
from twisted.python import log
from datetime import datetime, timedelta
-import thread
+import thread, traceback
from bases.errors import *
from bases.util import util
-shafunc = None
-try:
- import hashlib
- shafunc = hashlib.sha512
-except:
- import sha
- shafunc = sha.sha
-
-
-MAX_USAGE_COUNT = 10
-MAX_PERPOOL_CONNS = 10
-
# A map of DBs we support
-supportedDBs = {'pgsql':"bases.dbi.pgsql.dbi_Connection"}
+supportedDBs = {'pgsql':"bases.db.pgsql"}
-freeCons = {}
-freeCons_lock = thread.allocate_lock()
activeCons = {}
activeCons_lock = thread.allocate_lock()
-def createConnection (dbtype, dbname, user, passwd, host="localhost",
+
+def createSIConnection (dbtype, dbname, user, passwd, host='localhost',
+ port=None, debug=False):
+ dbmod, conn, ex = _createConnection (dbtype, dbname, user, passwd,
+ host='localhost', port=port,
+ debug=debug)
+ dbmod.setTransSerializable(conn)
+ return conn, ex
+
+
+def createACConnection (dbtype, dbname, user, passwd, host='localhost',
+ port=None, debug=False):
+ dbmod, conn, ex = _createConnection (dbtype, dbname, user, passwd,
+ host='localhost', port=port,
+ debug=debug)
+ dbmod.setTransAutoCommit(conn)
+ return conn, ex
+
+
+def _createConnection (dbtype, dbname, user, passwd, host="localhost",
port=None, debug=False):
+ """
+ Create a connection of the requested db, and return it. Also keep
+ track of the connections we are giving out. We may need to check
+ for connection leaks at sometime later.
+ """
if dbtype not in supportedDBs:
raise BasesDBIError("%s not supported" % (dbtype))
- # Create an ID for the connection-type
- s = "%s:%s:%s:%s:%s" % (dbtype, dbname, user, host, str(port))
- conn_id = shafunc(s).digest()
- # Check whether there are free-connections in this group
- freeCons_lock.acquire()
- if conn_id in freeCons:
- c = freeCons[conn_id].pop(0)
- if len(freeCons[conn_id]) == 0:
- del(freeCons[conn_id])
- freeCons_lock.release()
- # Put book-keeping information
- c.bases_BookKeeping['usage_count'] += 1
- c.bases_BookKeeping['timestamp'] = utcnow.timestamp()
- # Add object to active-connections list
- activeCons_lock.acquire()
- if conn_id not in activeCons:
- activeCons[conn_id] = []
- activeCons[conn_id].append(c)
- activeCons_lock.release()
- else:
- freeCons_lock.release()
-
- # No free connection available.
- cKlass = util.loadModule(supportedDBs[dbtype])
- c = cKlass(dbname, user, passwd, host=host, port=port)
- c.bases_BookKeeping = {'usage_count':0,
- 'timestamp':utcnow.timestamp(),
- 'debug':c.traceback.extract_stack(limit=2).pop(0),
- 'conn_id':conn_id}
+ # Create the connection
+ dbmod = util.loadModule(supportedDBs[dbtype])
+ c, ex = dbmod.createConnection(dbname, user, passwd, host=host, port=port,
+ debug=debug)
+ cinfo = {'timestamp':datetime.utcnow(), 'debug':traceback.extract_stack()}
activeCons_lock.acquire()
- if conn_id not in activeCons:
- activeCons[conn_id] = []
- activeCons[conn_id].append(c)
+ activeCons[str(id(c))] = cinfo
activeCons_lock.release()
- return c
+ return dbmod, c, ex
-
-def destroyConnection (conn):
- conn_id = conn.bases_BookKeeping['conn_id']
+def destroyConnection (conn):
+ """
+ Destroy a given connection.
+ """
activeCons_lock.acquire()
- if conn_id not in activeCons:
- activeCons.release()
- raise BasesDBIError("bad connection object (id=%s)" % conn_id)
- activeCons[conn_id].remove(conn)
+ del(activeCons[str(id(conn))])
activeCons_lock.release()
-
- if conn.bases_BookKeeping['usage_count'] > MAX_USAGE_COUNT:
- conn.close
- del(conn)
- return
- conn.flushStale()
-
- freeCons_lock.acquire()
- if conn_id not in freeCons:
- freeCons[conn_id] = [conn,]
- freeCons_lock.release()
- return
- else:
- if len(freeCons[conn_id]) > MAX_PERPOOL_CONNS:
- freeCons_lock.release()
- conn.close
- del(conn)
- return
- else:
- freeCons[conn_id].append(conn)
- freeCons_lock.release()
- return
def cleanupConnectionLeak(timeout):
pass
+
Modified: trunk/bases/db/pgsql.py
===================================================================
--- trunk/bases/db/pgsql.py 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/bases/db/pgsql.py 2009-02-26 08:09:06 UTC (rev 23)
@@ -1,157 +1,46 @@
# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
import psycopg2 as dbmod
+from psycopg2.extensions import ISOLATION_LEVEL_SERIALIZABLE
+from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
+
from bases.errors import *
-class dbi_Connection:
- def __init__(self, dbname, user, passwd, host='localhost',
- port=None, debug=False):
- if port is None:
- port = 5432
- self._dsn = "dbname='%s' user='%s' host='%s' password='%s' port='%s'"%\
- (dbname, user, passwd, host, passwd, port)
- self._conn = dbmod.connect(self._dsn)
- self._debug = debug
- self._trans_pending = False
- self._trans_list = []
- self._curs_list = []
+def createConnection(dbname, user, passwd, host="localhost",
+ port=None, debug=False):
+ if port is None:
+ port = '5432'
-
- def openCursor(self):
- if self._trans_pending is False:
- c = dbi_Cursor(self, self._conn, debug=self._debug)
- self._curs_list.append(c)
- return c
- else:
- raise BasesDBIError("Transaction in progress")
+ dsn = "dbname='%s' user='%s' host='%s' password='%s' port='%s'" % \
+ (dbname, user, host, passwd, port)
+ conn = dbmod.connect(dsn)
+ ex = _getExceptions(dbmod)
+ return conn, ex
- def closeCursor(self,curs):
- if curs not in self._curs_list:
- raise BasesDBIError("Unknown cursor object")
- curs.close()
- self._curs_list.remove(curs)
+def setTransAutoCommit(conn):
+ conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
+ return conn
- def beginTransaction(self):
- """
- Create a transaction object. A transaction object should do
- the necessary to start and execute transaction reliably.
- """
- if self._trans_pending is False:
- self._trans_pending = True
- t = dbi_Transaction(self._conn, debug=self._debug)
- self._trans_list.append(t)
- return t
- else:
- raise BasesDBIError("Transaction in progress")
+def setTransSerializable(conn):
+ conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
+ return conn
- def endTransaction(self, trans):
- if trans not in self._trans_list:
- raise BasesDBIError("Unknown transaction object")
- # TBD: Should catch transaction failures here
- self._conn.commit()
- trans.close()
- self._trans_list.remove(trans)
- self._trans_pending = False
-
- def close(self):
- self._conn.close()
- self._conn = None
+class dbExceptions:
+ pass
- def flushStale(self):
- pass
-
-
-class dbi_Transaction:
- def __init__(self, conn, debug=False):
- self._debug = debug
- self._curs = conn.cursor()
-
- def execute(self, *args, **kwargs):
- """
- Should check here for quoting
- """
- self._curs.execute(*args, **kwargs)
- self.rowcount = self._curs.rowcount
+def _getExceptions(dbmod):
+ ex = dbExceptions()
+ ex.Error = getattr(dbmod, "Error")
+ ex.Warning = getattr(dbmod, "Warning")
+ ex.DataError = getattr(dbmod, "DataError")
+ ex.DatabaseError = getattr(dbmod, "DatabaseError")
+ ex.ProgrammingError = getattr(dbmod, "ProgrammingError")
+ ex.IntegrityError = getattr(dbmod, "IntegrityError")
+ ex.InterfaceError = getattr(dbmod, "InterfaceError")
+ ex.InternalError = getattr(dbmod, "InternalError")
+ ex.NotSupportedError = getattr(dbmod, "NotSupportedError")
+ ex.OperationalError = getattr(dbmod, "OperationalError")
+ return ex
- def fetchone(self):
- r = self._curs.fetchone()
- self.rowcount = self._curs.rowcount
- return r
-
- def fetchall(self):
- r = self._curs.fetchall()
- self.rowcount = self._curs.rowcount
- return r
-
- def close(self):
- self._curs.close()
- self._curs = None
- self.execute = self.deadMethod
- self.fetchone = self.deadMethod
- self.fetchall = self.deadMethod
-
- def deadMethod(self, *args, **kwargs):
- raise BasesDBIError("Cannot use a dead transaction object")
-
- def __del__(self):
- if self._curs is not None:
- self._curs.close()
- self._curs = None
-
-
-class dbi_Cursor:
- """
- Ideally this should do a 'declare cursor' with a cursor
- name. Currently we are using a db-api-2.0 object so, this cannot
- be done. At a future stage we should be using the 'libpq'
- directly.
- """
-
- def __init__(self, dbi_con, conn, debug=False):
- self._dbi_con = dbi_con
- self._curs = conn.cursor()
- self._debug = debug
-
- def execute(self, *args, **kwargs):
- """
- """
- if self._dbi_con._trans_pending is False:
- self._curs.execute(*args, **kwargs)
- self.rowcount = self._curs.rowcount
- else:
- raise BasesDBIError("A transaction is pending")
-
- def fetchone(self):
- if self._dbi_con._trans_pending is False:
- r = self._curs.fetchone()
- self.rowcount = self._curs.rowcount
- return r
- else:
- raise BasesDBIError("A transaction is pending")
-
- def fetchall(self):
- if self._dbi_con._trans_pending is False:
- r = self._curs.fetchall()
- self.rowcount = self._curs.rowcount
- return r
- else:
- raise BasesDBIError("A transaction is pending")
-
- def close(self):
- self._curs.close()
- self._curs = None
- self._dbi_con = None
- self.execute = self.deadMethod
- self.fetchone = self.deadMethod
- self.fetchall = self.deadMethod
-
- def deadMethod(self, *args, **kwargs):
- raise BasesDBIError("Cannot use a dead cursor object")
-
- def __del__(self):
- if self._curs is not None:
- self._curs.close()
- self._curs = None
- self._dbi_con = None
Modified: trunk/bases/services/directory.py
===================================================================
--- trunk/bases/services/directory.py 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/bases/services/directory.py 2009-02-26 08:09:06 UTC (rev 23)
@@ -7,6 +7,7 @@
import thread
from bases import common
from bases.core import component, proxy
+from bases.errors import *
shafunc = None
try:
@@ -186,7 +187,7 @@
# Code to lookup the ComponentName with other bases-servers
# needed here. When we get a suitable information. We should
# give it back and keep a note of it.
- raise BasesComponentError("Component not found")
+ raise BasesComponentError("Component not found %s" % ComponentName)
def getComponentOID(self, mctxt, ComponentName):
Modified: trunk/bases/services/objrepo.py
===================================================================
--- trunk/bases/services/objrepo.py 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/bases/services/objrepo.py 2009-02-26 08:09:06 UTC (rev 23)
@@ -175,7 +175,8 @@
if stateless is True:
self.ObjDBStateless[compid] = (oid, datetime.utcnow())
-
+ return oid
+
def addStaticObjectToDB (self, obj, ownerid, compid, oid, stateless=True):
"""
Modified: trunk/bases/synclient/services.py
===================================================================
--- trunk/bases/synclient/services.py 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/bases/synclient/services.py 2009-02-26 08:09:06 UTC (rev 23)
@@ -78,7 +78,7 @@
if oid is None:
raise BasesComponentError("Could not get OID for %s from %s" %\
- (ComponentName, str(location)))
+ (ComponentName, str(ci['location'])))
return oid
Modified: trunk/bases/util/util.py
===================================================================
--- trunk/bases/util/util.py 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/bases/util/util.py 2009-02-26 08:09:06 UTC (rev 23)
@@ -1,6 +1,6 @@
# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
-def loadModule(ModName):
+def loadKlass(KlassName):
"""
Given a complete path to a 'class', I give back the class object
(not the instance).
@@ -9,7 +9,7 @@
and fetch the class object.
"""
- mlist = ModName.split('.')
+ mlist = KlassName.split('.')
klass = mlist.pop()
if len(mlist)>0:
m = __import__('.'.join(mlist))
@@ -21,3 +21,17 @@
klass = globals()[klass]
return klass
+
+
+def loadModule(ModName):
+ """
+ Given a complete path to a 'module', I give back the module
+ object.
+ """
+ mlist = ModName.split('.')
+ if len(mlist)>0:
+ m = __import__('.'.join(mlist))
+ mlist.pop(0)
+ for mname in mlist:
+ m = getattr(m, mname)
+ return m
Modified: trunk/src/bases-server
===================================================================
--- trunk/src/bases-server 2009-02-26 08:06:50 UTC (rev 22)
+++ trunk/src/bases-server 2009-02-26 08:09:06 UTC (rev 23)
@@ -8,7 +8,7 @@
from bases.core import broker
from bases import common
-from bases.dbi import dbi
+from bases.db import dbi
from bases.errors import *
stdout = sys.stdout
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:06:55
|
Revision: 22
http://bases.svn.sourceforge.net/bases/?rev=22&view=rev
Author: joe_steeve
Date: 2009-02-26 08:06:50 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Rename dbi to db module since it is ambiguous with the dbi package.
Added Paths:
-----------
trunk/bases/db/
trunk/bases/db/__init__.py
trunk/bases/db/dbi.py
trunk/bases/db/pgsql.py
Removed Paths:
-------------
trunk/bases/dbi/__init__.py
trunk/bases/dbi/dbi.py
trunk/bases/dbi/pgsql.py
Copied: trunk/bases/db/dbi.py (from rev 21, trunk/bases/dbi/dbi.py)
===================================================================
--- trunk/bases/db/dbi.py (rev 0)
+++ trunk/bases/db/dbi.py 2009-02-26 08:06:50 UTC (rev 22)
@@ -0,0 +1,106 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
+from twisted.python import log
+from datetime import datetime, timedelta
+import thread
+
+from bases.errors import *
+from bases.util import util
+
+shafunc = None
+try:
+ import hashlib
+ shafunc = hashlib.sha512
+except:
+ import sha
+ shafunc = sha.sha
+
+
+MAX_USAGE_COUNT = 10
+MAX_PERPOOL_CONNS = 10
+
+# A map of DBs we support
+supportedDBs = {'pgsql':"bases.dbi.pgsql.dbi_Connection"}
+
+freeCons = {}
+freeCons_lock = thread.allocate_lock()
+activeCons = {}
+activeCons_lock = thread.allocate_lock()
+
+def createConnection (dbtype, dbname, user, passwd, host="localhost",
+ port=None, debug=False):
+ if dbtype not in supportedDBs:
+ raise BasesDBIError("%s not supported" % (dbtype))
+ # Create an ID for the connection-type
+ s = "%s:%s:%s:%s:%s" % (dbtype, dbname, user, host, str(port))
+ conn_id = shafunc(s).digest()
+
+ # Check whether there are free-connections in this group
+ freeCons_lock.acquire()
+ if conn_id in freeCons:
+ c = freeCons[conn_id].pop(0)
+ if len(freeCons[conn_id]) == 0:
+ del(freeCons[conn_id])
+ freeCons_lock.release()
+ # Put book-keeping information
+ c.bases_BookKeeping['usage_count'] += 1
+ c.bases_BookKeeping['timestamp'] = utcnow.timestamp()
+ # Add object to active-connections list
+ activeCons_lock.acquire()
+ if conn_id not in activeCons:
+ activeCons[conn_id] = []
+ activeCons[conn_id].append(c)
+ activeCons_lock.release()
+ else:
+ freeCons_lock.release()
+
+ # No free connection available.
+ cKlass = util.loadModule(supportedDBs[dbtype])
+ c = cKlass(dbname, user, passwd, host=host, port=port)
+ c.bases_BookKeeping = {'usage_count':0,
+ 'timestamp':utcnow.timestamp(),
+ 'debug':c.traceback.extract_stack(limit=2).pop(0),
+ 'conn_id':conn_id}
+ activeCons_lock.acquire()
+ if conn_id not in activeCons:
+ activeCons[conn_id] = []
+ activeCons[conn_id].append(c)
+ activeCons_lock.release()
+ return c
+
+
+def destroyConnection (conn):
+ conn_id = conn.bases_BookKeeping['conn_id']
+
+ activeCons_lock.acquire()
+ if conn_id not in activeCons:
+ activeCons.release()
+ raise BasesDBIError("bad connection object (id=%s)" % conn_id)
+ activeCons[conn_id].remove(conn)
+ activeCons_lock.release()
+
+ if conn.bases_BookKeeping['usage_count'] > MAX_USAGE_COUNT:
+ conn.close
+ del(conn)
+ return
+
+ conn.flushStale()
+
+ freeCons_lock.acquire()
+ if conn_id not in freeCons:
+ freeCons[conn_id] = [conn,]
+ freeCons_lock.release()
+ return
+ else:
+ if len(freeCons[conn_id]) > MAX_PERPOOL_CONNS:
+ freeCons_lock.release()
+ conn.close
+ del(conn)
+ return
+ else:
+ freeCons[conn_id].append(conn)
+ freeCons_lock.release()
+ return
+
+def cleanupConnectionLeak(timeout):
+ pass
Copied: trunk/bases/db/pgsql.py (from rev 21, trunk/bases/dbi/pgsql.py)
===================================================================
--- trunk/bases/db/pgsql.py (rev 0)
+++ trunk/bases/db/pgsql.py 2009-02-26 08:06:50 UTC (rev 22)
@@ -0,0 +1,157 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
+import psycopg2 as dbmod
+from bases.errors import *
+
+class dbi_Connection:
+ def __init__(self, dbname, user, passwd, host='localhost',
+ port=None, debug=False):
+ if port is None:
+ port = 5432
+ self._dsn = "dbname='%s' user='%s' host='%s' password='%s' port='%s'"%\
+ (dbname, user, passwd, host, passwd, port)
+ self._conn = dbmod.connect(self._dsn)
+ self._debug = debug
+ self._trans_pending = False
+ self._trans_list = []
+ self._curs_list = []
+
+
+ def openCursor(self):
+ if self._trans_pending is False:
+ c = dbi_Cursor(self, self._conn, debug=self._debug)
+ self._curs_list.append(c)
+ return c
+ else:
+ raise BasesDBIError("Transaction in progress")
+
+
+ def closeCursor(self,curs):
+ if curs not in self._curs_list:
+ raise BasesDBIError("Unknown cursor object")
+ curs.close()
+ self._curs_list.remove(curs)
+
+
+ def beginTransaction(self):
+ """
+ Create a transaction object. A transaction object should do
+ the necessary to start and execute transaction reliably.
+ """
+ if self._trans_pending is False:
+ self._trans_pending = True
+ t = dbi_Transaction(self._conn, debug=self._debug)
+ self._trans_list.append(t)
+ return t
+ else:
+ raise BasesDBIError("Transaction in progress")
+
+ def endTransaction(self, trans):
+ if trans not in self._trans_list:
+ raise BasesDBIError("Unknown transaction object")
+ # TBD: Should catch transaction failures here
+ self._conn.commit()
+ trans.close()
+ self._trans_list.remove(trans)
+ self._trans_pending = False
+
+ def close(self):
+ self._conn.close()
+ self._conn = None
+
+ def flushStale(self):
+ pass
+
+
+class dbi_Transaction:
+ def __init__(self, conn, debug=False):
+ self._debug = debug
+ self._curs = conn.cursor()
+
+ def execute(self, *args, **kwargs):
+ """
+ Should check here for quoting
+ """
+ self._curs.execute(*args, **kwargs)
+ self.rowcount = self._curs.rowcount
+
+ def fetchone(self):
+ r = self._curs.fetchone()
+ self.rowcount = self._curs.rowcount
+ return r
+
+ def fetchall(self):
+ r = self._curs.fetchall()
+ self.rowcount = self._curs.rowcount
+ return r
+
+ def close(self):
+ self._curs.close()
+ self._curs = None
+ self.execute = self.deadMethod
+ self.fetchone = self.deadMethod
+ self.fetchall = self.deadMethod
+
+ def deadMethod(self, *args, **kwargs):
+ raise BasesDBIError("Cannot use a dead transaction object")
+
+ def __del__(self):
+ if self._curs is not None:
+ self._curs.close()
+ self._curs = None
+
+
+class dbi_Cursor:
+ """
+ Ideally this should do a 'declare cursor' with a cursor
+ name. Currently we are using a db-api-2.0 object so, this cannot
+ be done. At a future stage we should be using the 'libpq'
+ directly.
+ """
+
+ def __init__(self, dbi_con, conn, debug=False):
+ self._dbi_con = dbi_con
+ self._curs = conn.cursor()
+ self._debug = debug
+
+ def execute(self, *args, **kwargs):
+ """
+ """
+ if self._dbi_con._trans_pending is False:
+ self._curs.execute(*args, **kwargs)
+ self.rowcount = self._curs.rowcount
+ else:
+ raise BasesDBIError("A transaction is pending")
+
+ def fetchone(self):
+ if self._dbi_con._trans_pending is False:
+ r = self._curs.fetchone()
+ self.rowcount = self._curs.rowcount
+ return r
+ else:
+ raise BasesDBIError("A transaction is pending")
+
+ def fetchall(self):
+ if self._dbi_con._trans_pending is False:
+ r = self._curs.fetchall()
+ self.rowcount = self._curs.rowcount
+ return r
+ else:
+ raise BasesDBIError("A transaction is pending")
+
+ def close(self):
+ self._curs.close()
+ self._curs = None
+ self._dbi_con = None
+ self.execute = self.deadMethod
+ self.fetchone = self.deadMethod
+ self.fetchall = self.deadMethod
+
+ def deadMethod(self, *args, **kwargs):
+ raise BasesDBIError("Cannot use a dead cursor object")
+
+ def __del__(self):
+ if self._curs is not None:
+ self._curs.close()
+ self._curs = None
+ self._dbi_con = None
Deleted: trunk/bases/dbi/dbi.py
===================================================================
--- trunk/bases/dbi/dbi.py 2009-02-26 08:05:08 UTC (rev 21)
+++ trunk/bases/dbi/dbi.py 2009-02-26 08:06:50 UTC (rev 22)
@@ -1,106 +0,0 @@
-# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
-
-from twisted.python import log
-from datetime import datetime, timedelta
-import thread
-
-from bases.errors import *
-from bases.util import util
-
-shafunc = None
-try:
- import hashlib
- shafunc = hashlib.sha512
-except:
- import sha
- shafunc = sha.sha
-
-
-MAX_USAGE_COUNT = 10
-MAX_PERPOOL_CONNS = 10
-
-# A map of DBs we support
-supportedDBs = {'pgsql':"bases.dbi.pgsql.dbi_Connection"}
-
-freeCons = {}
-freeCons_lock = thread.allocate_lock()
-activeCons = {}
-activeCons_lock = thread.allocate_lock()
-
-def createConnection (dbtype, dbname, user, passwd, host="localhost",
- port=None, debug=False):
- if dbtype not in supportedDBs:
- raise BasesDBIError("%s not supported" % (dbtype))
- # Create an ID for the connection-type
- s = "%s:%s:%s:%s:%s" % (dbtype, dbname, user, host, str(port))
- conn_id = shafunc(s).digest()
-
- # Check whether there are free-connections in this group
- freeCons_lock.acquire()
- if conn_id in freeCons:
- c = freeCons[conn_id].pop(0)
- if len(freeCons[conn_id]) == 0:
- del(freeCons[conn_id])
- freeCons_lock.release()
- # Put book-keeping information
- c.bases_BookKeeping['usage_count'] += 1
- c.bases_BookKeeping['timestamp'] = utcnow.timestamp()
- # Add object to active-connections list
- activeCons_lock.acquire()
- if conn_id not in activeCons:
- activeCons[conn_id] = []
- activeCons[conn_id].append(c)
- activeCons_lock.release()
- else:
- freeCons_lock.release()
-
- # No free connection available.
- cKlass = util.loadModule(supportedDBs[dbtype])
- c = cKlass(dbname, user, passwd, host=host, port=port)
- c.bases_BookKeeping = {'usage_count':0,
- 'timestamp':utcnow.timestamp(),
- 'debug':c.traceback.extract_stack(limit=2).pop(0),
- 'conn_id':conn_id}
- activeCons_lock.acquire()
- if conn_id not in activeCons:
- activeCons[conn_id] = []
- activeCons[conn_id].append(c)
- activeCons_lock.release()
- return c
-
-
-def destroyConnection (conn):
- conn_id = conn.bases_BookKeeping['conn_id']
-
- activeCons_lock.acquire()
- if conn_id not in activeCons:
- activeCons.release()
- raise BasesDBIError("bad connection object (id=%s)" % conn_id)
- activeCons[conn_id].remove(conn)
- activeCons_lock.release()
-
- if conn.bases_BookKeeping['usage_count'] > MAX_USAGE_COUNT:
- conn.close
- del(conn)
- return
-
- conn.flushStale()
-
- freeCons_lock.acquire()
- if conn_id not in freeCons:
- freeCons[conn_id] = [conn,]
- freeCons_lock.release()
- return
- else:
- if len(freeCons[conn_id]) > MAX_PERPOOL_CONNS:
- freeCons_lock.release()
- conn.close
- del(conn)
- return
- else:
- freeCons[conn_id].append(conn)
- freeCons_lock.release()
- return
-
-def cleanupConnectionLeak(timeout):
- pass
Deleted: trunk/bases/dbi/pgsql.py
===================================================================
--- trunk/bases/dbi/pgsql.py 2009-02-26 08:05:08 UTC (rev 21)
+++ trunk/bases/dbi/pgsql.py 2009-02-26 08:06:50 UTC (rev 22)
@@ -1,157 +0,0 @@
-# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
-
-import psycopg2 as dbmod
-from bases.errors import *
-
-class dbi_Connection:
- def __init__(self, dbname, user, passwd, host='localhost',
- port=None, debug=False):
- if port is None:
- port = 5432
- self._dsn = "dbname='%s' user='%s' host='%s' password='%s' port='%s'"%\
- (dbname, user, passwd, host, passwd, port)
- self._conn = dbmod.connect(self._dsn)
- self._debug = debug
- self._trans_pending = False
- self._trans_list = []
- self._curs_list = []
-
-
- def openCursor(self):
- if self._trans_pending is False:
- c = dbi_Cursor(self, self._conn, debug=self._debug)
- self._curs_list.append(c)
- return c
- else:
- raise BasesDBIError("Transaction in progress")
-
-
- def closeCursor(self,curs):
- if curs not in self._curs_list:
- raise BasesDBIError("Unknown cursor object")
- curs.close()
- self._curs_list.remove(curs)
-
-
- def beginTransaction(self):
- """
- Create a transaction object. A transaction object should do
- the necessary to start and execute transaction reliably.
- """
- if self._trans_pending is False:
- self._trans_pending = True
- t = dbi_Transaction(self._conn, debug=self._debug)
- self._trans_list.append(t)
- return t
- else:
- raise BasesDBIError("Transaction in progress")
-
- def endTransaction(self, trans):
- if trans not in self._trans_list:
- raise BasesDBIError("Unknown transaction object")
- # TBD: Should catch transaction failures here
- self._conn.commit()
- trans.close()
- self._trans_list.remove(trans)
- self._trans_pending = False
-
- def close(self):
- self._conn.close()
- self._conn = None
-
- def flushStale(self):
- pass
-
-
-class dbi_Transaction:
- def __init__(self, conn, debug=False):
- self._debug = debug
- self._curs = conn.cursor()
-
- def execute(self, *args, **kwargs):
- """
- Should check here for quoting
- """
- self._curs.execute(*args, **kwargs)
- self.rowcount = self._curs.rowcount
-
- def fetchone(self):
- r = self._curs.fetchone()
- self.rowcount = self._curs.rowcount
- return r
-
- def fetchall(self):
- r = self._curs.fetchall()
- self.rowcount = self._curs.rowcount
- return r
-
- def close(self):
- self._curs.close()
- self._curs = None
- self.execute = self.deadMethod
- self.fetchone = self.deadMethod
- self.fetchall = self.deadMethod
-
- def deadMethod(self, *args, **kwargs):
- raise BasesDBIError("Cannot use a dead transaction object")
-
- def __del__(self):
- if self._curs is not None:
- self._curs.close()
- self._curs = None
-
-
-class dbi_Cursor:
- """
- Ideally this should do a 'declare cursor' with a cursor
- name. Currently we are using a db-api-2.0 object so, this cannot
- be done. At a future stage we should be using the 'libpq'
- directly.
- """
-
- def __init__(self, dbi_con, conn, debug=False):
- self._dbi_con = dbi_con
- self._curs = conn.cursor()
- self._debug = debug
-
- def execute(self, *args, **kwargs):
- """
- """
- if self._dbi_con._trans_pending is False:
- self._curs.execute(*args, **kwargs)
- self.rowcount = self._curs.rowcount
- else:
- raise BasesDBIError("A transaction is pending")
-
- def fetchone(self):
- if self._dbi_con._trans_pending is False:
- r = self._curs.fetchone()
- self.rowcount = self._curs.rowcount
- return r
- else:
- raise BasesDBIError("A transaction is pending")
-
- def fetchall(self):
- if self._dbi_con._trans_pending is False:
- r = self._curs.fetchall()
- self.rowcount = self._curs.rowcount
- return r
- else:
- raise BasesDBIError("A transaction is pending")
-
- def close(self):
- self._curs.close()
- self._curs = None
- self._dbi_con = None
- self.execute = self.deadMethod
- self.fetchone = self.deadMethod
- self.fetchall = self.deadMethod
-
- def deadMethod(self, *args, **kwargs):
- raise BasesDBIError("Cannot use a dead cursor object")
-
- def __del__(self):
- if self._curs is not None:
- self._curs.close()
- self._curs = None
- self._dbi_con = None
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:05:10
|
Revision: 21
http://bases.svn.sourceforge.net/bases/?rev=21&view=rev
Author: joe_steeve
Date: 2009-02-26 08:05:08 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
changes in dbi broke bases-server. fixed them.
* bases/dbi/dbi.py (cleanupConnectionLeak): added empty function to
avoid panic
Modified Paths:
--------------
trunk/ChangeLog
trunk/bases/dbi/dbi.py
trunk/src/bases-server
Modified: trunk/ChangeLog
===================================================================
--- trunk/ChangeLog 2009-02-26 08:03:42 UTC (rev 20)
+++ trunk/ChangeLog 2009-02-26 08:05:08 UTC (rev 21)
@@ -1,5 +1,8 @@
2008-03-17 Joe Steeve <joe...@gm...>
+ * bases/dbi/dbi.py (cleanupConnectionLeak): added empty function
+ to avoid panic
+
* bases/util/util.py (loadModule): to load classes dynamically
* bases/dbi/pgsql.py: adapter code for pgsql
Modified: trunk/bases/dbi/dbi.py
===================================================================
--- trunk/bases/dbi/dbi.py 2009-02-26 08:03:42 UTC (rev 20)
+++ trunk/bases/dbi/dbi.py 2009-02-26 08:05:08 UTC (rev 21)
@@ -43,7 +43,7 @@
del(freeCons[conn_id])
freeCons_lock.release()
# Put book-keeping information
- c.bases_BookKeeping['usage_count'] ++
+ c.bases_BookKeeping['usage_count'] += 1
c.bases_BookKeeping['timestamp'] = utcnow.timestamp()
# Add object to active-connections list
activeCons_lock.acquire()
@@ -101,3 +101,6 @@
freeCons[conn_id].append(conn)
freeCons_lock.release()
return
+
+def cleanupConnectionLeak(timeout):
+ pass
Modified: trunk/src/bases-server
===================================================================
--- trunk/src/bases-server 2009-02-26 08:03:42 UTC (rev 20)
+++ trunk/src/bases-server 2009-02-26 08:05:08 UTC (rev 21)
@@ -8,7 +8,7 @@
from bases.core import broker
from bases import common
-from bases.util import dbi
+from bases.dbi import dbi
from bases.errors import *
stdout = sys.stdout
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:03:46
|
Revision: 20
http://bases.svn.sourceforge.net/bases/?rev=20&view=rev
Author: joe_steeve
Date: 2009-02-26 08:03:42 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
dynamic module-klass loader
* bases/util/util.py (loadModule): to load classes dynamically
Modified Paths:
--------------
trunk/ChangeLog
Added Paths:
-----------
trunk/bases/util/util.py
Modified: trunk/ChangeLog
===================================================================
--- trunk/ChangeLog 2009-02-26 08:02:24 UTC (rev 19)
+++ trunk/ChangeLog 2009-02-26 08:03:42 UTC (rev 20)
@@ -1,5 +1,7 @@
2008-03-17 Joe Steeve <joe...@gm...>
+ * bases/util/util.py (loadModule): to load classes dynamically
+
* bases/dbi/pgsql.py: adapter code for pgsql
* bases/dbi/dbi.py (activeCons_lock): added simple db-specific
Added: trunk/bases/util/util.py
===================================================================
--- trunk/bases/util/util.py (rev 0)
+++ trunk/bases/util/util.py 2009-02-26 08:03:42 UTC (rev 20)
@@ -0,0 +1,23 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
+def loadModule(ModName):
+ """
+ Given a complete path to a 'class', I give back the class object
+ (not the instance).
+
+ Supposing I am given u.a.b.KoolClass, I load the required modules
+ and fetch the class object.
+ """
+
+ mlist = ModName.split('.')
+ klass = mlist.pop()
+ if len(mlist)>0:
+ m = __import__('.'.join(mlist))
+ mlist.pop(0)
+ for mname in mlist:
+ m = getattr(m, mname)
+ klass = getattr(m, klass)
+ else:
+ klass = globals()[klass]
+
+ return klass
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 08:02:26
|
Revision: 19
http://bases.svn.sourceforge.net/bases/?rev=19&view=rev
Author: joe_steeve
Date: 2009-02-26 08:02:24 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Untabified the sources and added a emacs specific mode-line string
Modified Paths:
--------------
trunk/bases/app.py
trunk/bases/common.py
trunk/bases/core/broker.py
trunk/bases/core/component.py
trunk/bases/core/proxy.py
trunk/bases/core/transports/bbtrans.py
trunk/bases/core/transports/transports.py
trunk/bases/services/directory.py
trunk/bases/services/objrepo.py
trunk/bases/synclient/app.py
trunk/bases/synclient/broker.py
trunk/bases/synclient/component.py
trunk/bases/synclient/services.py
trunk/bases/synclient/transports/bbtrans.py
trunk/bases/synclient/transports/transports.py
trunk/bases/thirdparty/rencode.py
trunk/src/bases-server
trunk/test/test-client.py
trunk/test/test-echo/crepo/TestComponent.py
trunk/test/test-echo/test.py
trunk/test/test1.py
Modified: trunk/bases/app.py
===================================================================
--- trunk/bases/app.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/app.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from twisted.internet import protocol
from twisted.internet import reactor
Modified: trunk/bases/common.py
===================================================================
--- trunk/bases/common.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/common.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,4 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
-
BASES_OID_COMPONENTDIR = "1"
Modified: trunk/bases/core/broker.py
===================================================================
--- trunk/bases/core/broker.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/core/broker.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,5 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
"""The bases object broker.
This handles messages sent to remote bases-components. It keeps track
Modified: trunk/bases/core/component.py
===================================================================
--- trunk/bases/core/component.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/core/component.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
import elementtree.ElementTree as ET
from bases.errors import *
Modified: trunk/bases/core/proxy.py
===================================================================
--- trunk/bases/core/proxy.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/core/proxy.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,5 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
"""
"""
Modified: trunk/bases/core/transports/bbtrans.py
===================================================================
--- trunk/bases/core/transports/bbtrans.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/core/transports/bbtrans.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from bases.thirdparty import rencode
from twisted.internet import protocol
Modified: trunk/bases/core/transports/transports.py
===================================================================
--- trunk/bases/core/transports/transports.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/core/transports/transports.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from twisted.internet import protocol
from twisted.python import log
Modified: trunk/bases/services/directory.py
===================================================================
--- trunk/bases/services/directory.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/services/directory.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,5 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
"""Provides the component-directory service for a bases-server"""
from twisted.internet import defer
Modified: trunk/bases/services/objrepo.py
===================================================================
--- trunk/bases/services/objrepo.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/services/objrepo.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,5 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
"""Provides the object-repository service for a bases-server """
shafunc = None
Modified: trunk/bases/synclient/app.py
===================================================================
--- trunk/bases/synclient/app.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/synclient/app.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from bases.synclient import services
from bases.synclient.broker import BasesBroker
Modified: trunk/bases/synclient/broker.py
===================================================================
--- trunk/bases/synclient/broker.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/synclient/broker.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from bases.errors import *
from datetime import datetime
Modified: trunk/bases/synclient/component.py
===================================================================
--- trunk/bases/synclient/component.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/synclient/component.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
class BasesComponentServerProxy:
"""A class to create sychronous proxies to remote BasesComponents"""
Modified: trunk/bases/synclient/services.py
===================================================================
--- trunk/bases/synclient/services.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/synclient/services.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from bases.errors import *
from bases import common
Modified: trunk/bases/synclient/transports/bbtrans.py
===================================================================
--- trunk/bases/synclient/transports/bbtrans.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/synclient/transports/bbtrans.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from datetime import datetime, timedelta
import struct, traceback
Modified: trunk/bases/synclient/transports/transports.py
===================================================================
--- trunk/bases/synclient/transports/transports.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/synclient/transports/transports.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
from bases.errors import *
import socket
@@ -3,59 +4,59 @@
def SSLVerificationCB(conn, cert, errnum, depth, ok):
- #TBD: Server certificate verificaton should be done here.
- return ok
-
+ #TBD: Server certificate verificaton should be done here.
+ return ok
+
def createBBTransTransport(*args, **kwargs):
- from bases.synclient.transports import bbtrans
- return bbtrans._BBTransProtocol()
+ from bases.synclient.transports import bbtrans
+ return bbtrans._BBTransProtocol()
basesTransportsMap = \
- {"client": \
- {"bbtrans": createBBTransTransport},
- "secure": \
- {"sbbtrans":"bbtrans"}
- }
+ {"client": \
+ {"bbtrans": createBBTransTransport},
+ "secure": \
+ {"sbbtrans":"bbtrans"}
+ }
def getTransport(location, SSLKey=None, SSLCert=None, VerifyPeer=False,
- CA=None):
- """
- Given a location tuple, I create a transport object. If the
- requested protocol is a secure protocol, I use the key, cert and
- ca provided in SSLKey, SSLCert, CA to create a SSL connection to
- the remote location.
- """
+ CA=None):
+ """
+ Given a location tuple, I create a transport object. If the
+ requested protocol is a secure protocol, I use the key, cert and
+ ca provided in SSLKey, SSLCert, CA to create a SSL connection to
+ the remote location.
+ """
- # Sanity check
- secure = False
- proto = location[0]
- if location[0] in basesTransportsMap['secure']:
- if SSLKey == None or SSLCert == None:
- raise BasesTransportError("SSL details required for %s" % \
- (location[0]))
- if VerifyPeer is True and CA is None:
- raise BasesTransportError("CA needed to verify peer")
- secure = True
- proto = basesTransportsMap['secure'][location[0]]
- if proto not in basesTransportsMap['client']:
- raise BasesTransportError("Unknown protocol %s" % (proto))
- elif location[0] not in basesTransportsMap['client']:
- raise BasesTransportError("Unknown protocol %s" % (location[0]))
+ # Sanity check
+ secure = False
+ proto = location[0]
+ if location[0] in basesTransportsMap['secure']:
+ if SSLKey == None or SSLCert == None:
+ raise BasesTransportError("SSL details required for %s" % \
+ (location[0]))
+ if VerifyPeer is True and CA is None:
+ raise BasesTransportError("CA needed to verify peer")
+ secure = True
+ proto = basesTransportsMap['secure'][location[0]]
+ if proto not in basesTransportsMap['client']:
+ raise BasesTransportError("Unknown protocol %s" % (proto))
+ elif location[0] not in basesTransportsMap['client']:
+ raise BasesTransportError("Unknown protocol %s" % (location[0]))
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- if secure is True:
- ctx = SSL.Context(SSL.SSLv23_METHOD)
- ctx.set_verify(SSL.VERIFY_PEER, SSLVerificationCB)
- ctx.use_privatekey_file (self._SSLKey)
- ctx.use_certificate_file(self._SSLCert)
- ctx.load_verify_locations(self._CA)
- sock = SSL.Connection(ctx, sock)
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if secure is True:
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+ ctx.set_verify(SSL.VERIFY_PEER, SSLVerificationCB)
+ ctx.use_privatekey_file (self._SSLKey)
+ ctx.use_certificate_file(self._SSLCert)
+ ctx.load_verify_locations(self._CA)
+ sock = SSL.Connection(ctx, sock)
- sock.connect((location[1], location[2]))
- t = basesTransportsMap['client'][proto]()
- t.conn = sock
- t.location = location
- t.connectionMade()
- return t
+ sock.connect((location[1], location[2]))
+ t = basesTransportsMap['client'][proto]()
+ t.conn = sock
+ t.location = location
+ t.connectionMade()
+ return t
Modified: trunk/bases/thirdparty/rencode.py
===================================================================
--- trunk/bases/thirdparty/rencode.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/bases/thirdparty/rencode.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
"""
rencode -- Web safe object pickling/unpickling.
Modified: trunk/src/bases-server
===================================================================
--- trunk/src/bases-server 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/src/bases-server 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,4 +1,5 @@
#!/usr/bin/env python
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
import sys, time
import pprint, os
Modified: trunk/test/test-client.py
===================================================================
--- trunk/test/test-client.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/test/test-client.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,5 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
import sys
import getopt
Modified: trunk/test/test-echo/crepo/TestComponent.py
===================================================================
--- trunk/test/test-echo/crepo/TestComponent.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/test/test-echo/crepo/TestComponent.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,5 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
from bases.core.component import *
class TestComponentClass(BasesComponent_Stateless):
Modified: trunk/test/test-echo/test.py
===================================================================
--- trunk/test/test-echo/test.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/test/test-echo/test.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,4 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
import thread, time
from datetime import datetime
Modified: trunk/test/test1.py
===================================================================
--- trunk/test/test1.py 2009-02-26 07:59:13 UTC (rev 18)
+++ trunk/test/test1.py 2009-02-26 08:02:24 UTC (rev 19)
@@ -1,3 +1,5 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
"""
I am a test case to test the bbtrans protocol on the network.
@@ -16,131 +18,131 @@
class TestSFactory(protocol.Factory):
- def __init__(self, server=True):
- self._ServerMode = server
-
- def buildProtocol(self, addr):
- bb = bbtrans.BasesBinaryTransport()
- bb.factory = self
- bb.broker = self
- bb._server_side = self._ServerMode
- bb.Debug = True
- return bb
+ def __init__(self, server=True):
+ self._ServerMode = server
+
+ def buildProtocol(self, addr):
+ bb = bbtrans.BasesBinaryTransport()
+ bb.factory = self
+ bb.broker = self
+ bb._server_side = self._ServerMode
+ bb.Debug = True
+ return bb
- def processMessage(self, m):
- print "Incoming Msg:", m
+ def processMessage(self, m):
+ print "Incoming Msg:", m
class TestCFactory(protocol.ClientFactory):
- def __init__(self, server=True):
- self._ServerMode = server
-
- def buildProtocol(self, addr):
- bb = bbtrans.BasesBinaryTransport()
- bb.factory = self
- bb.broker = self
- bb._server_side = self._ServerMode
- bb.Debug = True
- return bb
-
-
+ def __init__(self, server=True):
+ self._ServerMode = server
+
+ def buildProtocol(self, addr):
+ bb = bbtrans.BasesBinaryTransport()
+ bb.factory = self
+ bb.broker = self
+ bb._server_side = self._ServerMode
+ bb.Debug = True
+ return bb
+
+
def test(ip, port, server=True):
-
- if server is True:
- f = TestSFactory()
- reactor.listenTCP (port, f, interface=ip)
- reactor.run()
- else:
- f = TestCFactory(server=False)
- reactor.connectTCP (ip, port, f)
- reactor.run()
+
+ if server is True:
+ f = TestSFactory()
+ reactor.listenTCP (port, f, interface=ip)
+ reactor.run()
+ else:
+ f = TestCFactory(server=False)
+ reactor.connectTCP (ip, port, f)
+ reactor.run()
def usage():
- print "Accepted options are:"
- print " -h | --help \t\tPrint this"
- print " -d | --debug \t\tEnable debug"
- print " -i | --listen-ip \tIP to listen on"
- print " -p | --listen-port \tPort to listen on"
- print " -s | --server \t\tRun in server mode"
- print " -c | --client \t\tRun in client mode"
- print " -I | --server-ip \tIP of server (when in client mode)"
- print " -P | --server-port \tPort of server (when in client mode)"
-
+ print "Accepted options are:"
+ print " -h | --help \t\tPrint this"
+ print " -d | --debug \t\tEnable debug"
+ print " -i | --listen-ip \tIP to listen on"
+ print " -p | --listen-port \tPort to listen on"
+ print " -s | --server \t\tRun in server mode"
+ print " -c | --client \t\tRun in client mode"
+ print " -I | --server-ip \tIP of server (when in client mode)"
+ print " -P | --server-port \tPort of server (when in client mode)"
+
if __name__ == "__main__":
- try:
- opts,args = getopt.getopt(sys.argv[1:], "hdsci:p:I:P:",
- ["help", "debug", "server", "client",
- "listen-ip", "listen-port",
- "server-ip", "server-port"])
- except getopt.GetoptError:
- usage()
- sys.exit(2)
+ try:
+ opts,args = getopt.getopt(sys.argv[1:], "hdsci:p:I:P:",
+ ["help", "debug", "server", "client",
+ "listen-ip", "listen-port",
+ "server-ip", "server-port"])
+ except getopt.GetoptError:
+ usage()
+ sys.exit(2)
- debug = False
- listen_ip = "127.0.0.1"
- listen_port = 8001
- server = False
- client = False
- server_ip = None
- server_port = None
-
- for o, a in opts:
- if o in ("-h", "--help"):
- usage()
- sys.exit()
- elif o in ("-d", "--debug"):
- debug = True
- elif o in ("-i", "--listen-ip"):
- listen_ip = a
- elif o in ("-p", "--listen-port"):
- listen_port = int(a)
- elif o in ("-s", "--server"):
- if client is True:
- print "Can be either client or server. Not both."
- sys.exit(2)
- server = True
- elif o in ("-c", "--client"):
- if server is True:
- print "Can be either client or server. Not both."
- sys.exit(2)
- client = True
- elif o in ("-I", "--server-ip"):
- server_ip = a
- elif o in ("-P", "--server-port"):
- server_port = int(a)
+ debug = False
+ listen_ip = "127.0.0.1"
+ listen_port = 8001
+ server = False
+ client = False
+ server_ip = None
+ server_port = None
+
+ for o, a in opts:
+ if o in ("-h", "--help"):
+ usage()
+ sys.exit()
+ elif o in ("-d", "--debug"):
+ debug = True
+ elif o in ("-i", "--listen-ip"):
+ listen_ip = a
+ elif o in ("-p", "--listen-port"):
+ listen_port = int(a)
+ elif o in ("-s", "--server"):
+ if client is True:
+ print "Can be either client or server. Not both."
+ sys.exit(2)
+ server = True
+ elif o in ("-c", "--client"):
+ if server is True:
+ print "Can be either client or server. Not both."
+ sys.exit(2)
+ client = True
+ elif o in ("-I", "--server-ip"):
+ server_ip = a
+ elif o in ("-P", "--server-port"):
+ server_port = int(a)
- if server is True:
- print "Working in 'server mode'"
- if server_ip is not None:
- print "WARNING: ignoring the 'server-ip'"
- if server_port is not None:
- print "WARNING: ignoring the 'server-port'"
- if listen_ip is None:
- print "ERROR: 'listen-ip' not given"
- sys.exit(2)
- if listen_port is None:
- print "ERROR: 'listen-port' not given"
- sys.exit(2)
-
- elif client is True:
- print "Working in 'client mode'"
- if server_ip is None:
- print "ERROR: 'server-ip' not given"
- sys.exit(2)
- if server_port is None:
- print "ERROR: 'server-port' not given"
- sys.exit(2)
- if listen_ip is not None:
- print "WARNING: ignoring the 'listen-ip'"
- if listen_port is not None:
- print "WARNING: ignoring the 'listen-port'"
-
- else:
- print "Neither 'client-mode' nor 'server-mode' set"
- sys.exit(2)
+ if server is True:
+ print "Working in 'server mode'"
+ if server_ip is not None:
+ print "WARNING: ignoring the 'server-ip'"
+ if server_port is not None:
+ print "WARNING: ignoring the 'server-port'"
+ if listen_ip is None:
+ print "ERROR: 'listen-ip' not given"
+ sys.exit(2)
+ if listen_port is None:
+ print "ERROR: 'listen-port' not given"
+ sys.exit(2)
+
+ elif client is True:
+ print "Working in 'client mode'"
+ if server_ip is None:
+ print "ERROR: 'server-ip' not given"
+ sys.exit(2)
+ if server_port is None:
+ print "ERROR: 'server-port' not given"
+ sys.exit(2)
+ if listen_ip is not None:
+ print "WARNING: ignoring the 'listen-ip'"
+ if listen_port is not None:
+ print "WARNING: ignoring the 'listen-port'"
+
+ else:
+ print "Neither 'client-mode' nor 'server-mode' set"
+ sys.exit(2)
- if client is True:
- test(server_ip, server_port, server=False)
- else:
- test(listen_ip, listen_port, server=True)
+ if client is True:
+ test(server_ip, server_port, server=False)
+ else:
+ test(listen_ip, listen_port, server=True)
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:59:14
|
Revision: 18
http://bases.svn.sourceforge.net/bases/?rev=18&view=rev
Author: joe_steeve
Date: 2009-02-26 07:59:13 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Support for 'transaction aware db connections
* bases/dbi/pgsql.py: adapter code for pgsql
* bases/dbi/dbi.py (activeCons_lock): added simple db-specific
connection pooling support.
* src/bases-server (LoadConfig): Added a section for 'hosted app
specific information'
Modified Paths:
--------------
trunk/ChangeLog
trunk/bases/errors.py
trunk/src/bases-server
Added Paths:
-----------
trunk/bases/dbi/
trunk/bases/dbi/__init__.py
trunk/bases/dbi/dbi.py
trunk/bases/dbi/pgsql.py
Removed Paths:
-------------
trunk/bases/util/dbi.py
Modified: trunk/ChangeLog
===================================================================
--- trunk/ChangeLog 2009-02-26 07:57:23 UTC (rev 17)
+++ trunk/ChangeLog 2009-02-26 07:59:13 UTC (rev 18)
@@ -1,3 +1,13 @@
+2008-03-17 Joe Steeve <joe...@gm...>
+
+ * bases/dbi/pgsql.py: adapter code for pgsql
+
+ * bases/dbi/dbi.py (activeCons_lock): added simple db-specific
+ connection pooling support.
+
+ * src/bases-server (LoadConfig): Added a section for 'hosted app
+ specific information'
+
2008-02-09 Joe Steeve <joe...@gm...>
* src/bases-server (sanitizeConfigOptions): changed the
Added: trunk/bases/dbi/dbi.py
===================================================================
--- trunk/bases/dbi/dbi.py (rev 0)
+++ trunk/bases/dbi/dbi.py 2009-02-26 07:59:13 UTC (rev 18)
@@ -0,0 +1,103 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
+from twisted.python import log
+from datetime import datetime, timedelta
+import thread
+
+from bases.errors import *
+from bases.util import util
+
+shafunc = None
+try:
+ import hashlib
+ shafunc = hashlib.sha512
+except:
+ import sha
+ shafunc = sha.sha
+
+
+MAX_USAGE_COUNT = 10
+MAX_PERPOOL_CONNS = 10
+
+# A map of DBs we support
+supportedDBs = {'pgsql':"bases.dbi.pgsql.dbi_Connection"}
+
+freeCons = {}
+freeCons_lock = thread.allocate_lock()
+activeCons = {}
+activeCons_lock = thread.allocate_lock()
+
+def createConnection (dbtype, dbname, user, passwd, host="localhost",
+ port=None, debug=False):
+ if dbtype not in supportedDBs:
+ raise BasesDBIError("%s not supported" % (dbtype))
+ # Create an ID for the connection-type
+ s = "%s:%s:%s:%s:%s" % (dbtype, dbname, user, host, str(port))
+ conn_id = shafunc(s).digest()
+
+ # Check whether there are free-connections in this group
+ freeCons_lock.acquire()
+ if conn_id in freeCons:
+ c = freeCons[conn_id].pop(0)
+ if len(freeCons[conn_id]) == 0:
+ del(freeCons[conn_id])
+ freeCons_lock.release()
+ # Put book-keeping information
+ c.bases_BookKeeping['usage_count'] ++
+ c.bases_BookKeeping['timestamp'] = utcnow.timestamp()
+ # Add object to active-connections list
+ activeCons_lock.acquire()
+ if conn_id not in activeCons:
+ activeCons[conn_id] = []
+ activeCons[conn_id].append(c)
+ activeCons_lock.release()
+ else:
+ freeCons_lock.release()
+
+ # No free connection available.
+ cKlass = util.loadModule(supportedDBs[dbtype])
+ c = cKlass(dbname, user, passwd, host=host, port=port)
+ c.bases_BookKeeping = {'usage_count':0,
+ 'timestamp':utcnow.timestamp(),
+ 'debug':c.traceback.extract_stack(limit=2).pop(0),
+ 'conn_id':conn_id}
+ activeCons_lock.acquire()
+ if conn_id not in activeCons:
+ activeCons[conn_id] = []
+ activeCons[conn_id].append(c)
+ activeCons_lock.release()
+ return c
+
+
+def destroyConnection (conn):
+ conn_id = conn.bases_BookKeeping['conn_id']
+
+ activeCons_lock.acquire()
+ if conn_id not in activeCons:
+ activeCons.release()
+ raise BasesDBIError("bad connection object (id=%s)" % conn_id)
+ activeCons[conn_id].remove(conn)
+ activeCons_lock.release()
+
+ if conn.bases_BookKeeping['usage_count'] > MAX_USAGE_COUNT:
+ conn.close
+ del(conn)
+ return
+
+ conn.flushStale()
+
+ freeCons_lock.acquire()
+ if conn_id not in freeCons:
+ freeCons[conn_id] = [conn,]
+ freeCons_lock.release()
+ return
+ else:
+ if len(freeCons[conn_id]) > MAX_PERPOOL_CONNS:
+ freeCons_lock.release()
+ conn.close
+ del(conn)
+ return
+ else:
+ freeCons[conn_id].append(conn)
+ freeCons_lock.release()
+ return
Added: trunk/bases/dbi/pgsql.py
===================================================================
--- trunk/bases/dbi/pgsql.py (rev 0)
+++ trunk/bases/dbi/pgsql.py 2009-02-26 07:59:13 UTC (rev 18)
@@ -0,0 +1,157 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
+
+import psycopg2 as dbmod
+from bases.errors import *
+
+class dbi_Connection:
+ def __init__(self, dbname, user, passwd, host='localhost',
+ port=None, debug=False):
+ if port is None:
+ port = 5432
+ self._dsn = "dbname='%s' user='%s' host='%s' password='%s' port='%s'"%\
+ (dbname, user, passwd, host, passwd, port)
+ self._conn = dbmod.connect(self._dsn)
+ self._debug = debug
+ self._trans_pending = False
+ self._trans_list = []
+ self._curs_list = []
+
+
+ def openCursor(self):
+ if self._trans_pending is False:
+ c = dbi_Cursor(self, self._conn, debug=self._debug)
+ self._curs_list.append(c)
+ return c
+ else:
+ raise BasesDBIError("Transaction in progress")
+
+
+ def closeCursor(self,curs):
+ if curs not in self._curs_list:
+ raise BasesDBIError("Unknown cursor object")
+ curs.close()
+ self._curs_list.remove(curs)
+
+
+ def beginTransaction(self):
+ """
+ Create a transaction object. A transaction object should do
+ the necessary to start and execute transaction reliably.
+ """
+ if self._trans_pending is False:
+ self._trans_pending = True
+ t = dbi_Transaction(self._conn, debug=self._debug)
+ self._trans_list.append(t)
+ return t
+ else:
+ raise BasesDBIError("Transaction in progress")
+
+ def endTransaction(self, trans):
+ if trans not in self._trans_list:
+ raise BasesDBIError("Unknown transaction object")
+ # TBD: Should catch transaction failures here
+ self._conn.commit()
+ trans.close()
+ self._trans_list.remove(trans)
+ self._trans_pending = False
+
+ def close(self):
+ self._conn.close()
+ self._conn = None
+
+ def flushStale(self):
+ pass
+
+
+class dbi_Transaction:
+ def __init__(self, conn, debug=False):
+ self._debug = debug
+ self._curs = conn.cursor()
+
+ def execute(self, *args, **kwargs):
+ """
+ Should check here for quoting
+ """
+ self._curs.execute(*args, **kwargs)
+ self.rowcount = self._curs.rowcount
+
+ def fetchone(self):
+ r = self._curs.fetchone()
+ self.rowcount = self._curs.rowcount
+ return r
+
+ def fetchall(self):
+ r = self._curs.fetchall()
+ self.rowcount = self._curs.rowcount
+ return r
+
+ def close(self):
+ self._curs.close()
+ self._curs = None
+ self.execute = self.deadMethod
+ self.fetchone = self.deadMethod
+ self.fetchall = self.deadMethod
+
+ def deadMethod(self, *args, **kwargs):
+ raise BasesDBIError("Cannot use a dead transaction object")
+
+ def __del__(self):
+ if self._curs is not None:
+ self._curs.close()
+ self._curs = None
+
+
+class dbi_Cursor:
+ """
+ Ideally this should do a 'declare cursor' with a cursor
+ name. Currently we are using a db-api-2.0 object so, this cannot
+ be done. At a future stage we should be using the 'libpq'
+ directly.
+ """
+
+ def __init__(self, dbi_con, conn, debug=False):
+ self._dbi_con = dbi_con
+ self._curs = conn.cursor()
+ self._debug = debug
+
+ def execute(self, *args, **kwargs):
+ """
+ """
+ if self._dbi_con._trans_pending is False:
+ self._curs.execute(*args, **kwargs)
+ self.rowcount = self._curs.rowcount
+ else:
+ raise BasesDBIError("A transaction is pending")
+
+ def fetchone(self):
+ if self._dbi_con._trans_pending is False:
+ r = self._curs.fetchone()
+ self.rowcount = self._curs.rowcount
+ return r
+ else:
+ raise BasesDBIError("A transaction is pending")
+
+ def fetchall(self):
+ if self._dbi_con._trans_pending is False:
+ r = self._curs.fetchall()
+ self.rowcount = self._curs.rowcount
+ return r
+ else:
+ raise BasesDBIError("A transaction is pending")
+
+ def close(self):
+ self._curs.close()
+ self._curs = None
+ self._dbi_con = None
+ self.execute = self.deadMethod
+ self.fetchone = self.deadMethod
+ self.fetchall = self.deadMethod
+
+ def deadMethod(self, *args, **kwargs):
+ raise BasesDBIError("Cannot use a dead cursor object")
+
+ def __del__(self):
+ if self._curs is not None:
+ self._curs.close()
+ self._curs = None
+ self._dbi_con = None
Modified: trunk/bases/errors.py
===================================================================
--- trunk/bases/errors.py 2009-02-26 07:57:23 UTC (rev 17)
+++ trunk/bases/errors.py 2009-02-26 07:59:13 UTC (rev 18)
@@ -1,29 +1,33 @@
+# -*- mode: python; indent-tabs-mode: nil; tab-width: 4; -*-
class BasesError(Exception):
- pass
+ pass
## Errors raised by classes in bases.core
class BasesBrokerError(BasesError):
- pass
+ pass
class BasesTransportError(BasesError):
- pass
+ pass
class BasesComponentError(BasesError):
- pass
+ pass
class BasesInterfaceError(BasesError):
- pass
+ pass
class BasesServerProxyError(BasesError):
- pass
+ pass
class BasesClientProxyError(BasesError):
- pass
+ pass
+class BasesObjRepoError(BasesError):
+ pass
+
class BasesDBIError(BasesError):
pass
-class BasesObjRepoError(BasesError):
+class BasesDBITransactionFailure(BasesError):
pass
Deleted: trunk/bases/util/dbi.py
===================================================================
--- trunk/bases/util/dbi.py 2009-02-26 07:57:23 UTC (rev 17)
+++ trunk/bases/util/dbi.py 2009-02-26 07:59:13 UTC (rev 18)
@@ -1,70 +0,0 @@
-
-from twisted.python import log
-from datetime import datetime, timedelta
-import thread
-
-conn_list = {}
-conn_list_lock = thread.allocate_lock()
-supportedDBs = ['pgdb']
-
-def createConnection (dbtype, connstr):
- global conn_list, conn_list_lock, supportedDBs
-
- if dbtype not in supportedDBs:
- raise BasesDBIError("%s not supported" % (dbtype))
-
- conn = None
- try:
- dbmod = __import__(dbtype)
- if(dbmod.threadsafety < 1):
- raise BasesDBIError("The mentioned adapter is not thread-safe")
- conn = dbmod.connect(connstr)
- except BasesError, e:
- raise(e)
- except Exception, e:
- raise BasesDBIError(str(e))
-
- conn_list_lock.acquire()
- conn_list[id(conn)] = datetime.utcnow()
- # Encode the creation location in the connection object for debug
- # purpose.
- conn.tn_creation_location = traceback.extract_stack(limit=2).pop(0)
- lock.release()
- return conn
-
-
-def destroyConnection (conn):
- global conn_list, conn_list_lock
-
- conn_list_lock.acquire()
- try:
- del(conn_list[id(conn)])
- finally:
- conn_list_lock.release()
- conn.close()
- del(conn)
-
-
-def cleanupConnectionLeak(maxTime):
- """
- This method closes connections which are older than maxTime
- seconds
- """
-
- global conn_list, conn_list_lock
- log.msg(3, "Checking for leaked DB connections")
-
- conn_list_lock.acquire()
- try:
- for c in conn_list:
- tnow = datetime.utcnow()
- td = timedelta(seconds=maxTime)
- if (tnow - conn_list[c]) > td:
- log.msg(1,"DB connection Leak at: %s" % \
- [c.tn_creation_location])
-
- del(conn_list[id(c)])
- c.close()
- del(c)
- finally:
- conn_list_lock.release()
Modified: trunk/src/bases-server
===================================================================
--- trunk/src/bases-server 2009-02-26 07:57:23 UTC (rev 17)
+++ trunk/src/bases-server 2009-02-26 07:59:13 UTC (rev 18)
@@ -97,7 +97,7 @@
conf["service_desc"] = {}
for sec in cp.sections():
name = string.lower(sec).strip()
- if name == "globals":
+ if name == "globals" or name == "app":
for opt in cp.options(sec):
conf[name+"."+string.lower(opt)] = \
string.strip(cp.get(sec,opt))
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:57:32
|
Revision: 17
http://bases.svn.sourceforge.net/bases/?rev=17&view=rev
Author: joe_steeve
Date: 2009-02-26 07:57:23 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Improve setup.py script.
* setup.py:
(): Improve the script in order to detect the packages and modules
automatically. This eliminates the need to update the setup.py for
each package or module introduced, unless otherwise required.
Modified Paths:
--------------
trunk/setup.py
Modified: trunk/setup.py
===================================================================
--- trunk/setup.py 2009-02-26 07:56:28 UTC (rev 16)
+++ trunk/setup.py 2009-02-26 07:57:23 UTC (rev 17)
@@ -1,18 +1,46 @@
from distutils.core import setup
+import os
+import sys
-packages = ['bases/core', 'bases/core/transports', 'bases/services',
- 'bases/synclient', 'bases/synclient/transports',
- 'bases/thirdparty', 'bases/util']
+def fullsplit(path, result=None):
+ """
+ Split a pathname into components (the opposite of os.path.join) in a
+ platform-neutral way.
+ """
+ if result is None:
+ result = []
+ head, tail = os.path.split(path)
+ if head == '':
+ return [tail] + result
+ if head == path:
+ return result
+ return fullsplit(head, [tail] + result)
+# Compile the list of packages available, because distutils doesn't have
+# an easy way to do this.
+packages, data_files = [], []
+root_dir = os.path.dirname(__file__)
+if root_dir != '':
+ os.chdir(root_dir)
+bases_dir = 'bases'
+
+for dirpath, dirnames, filenames in os.walk(bases_dir):
+ # Ignore dirnames that start with '.'
+ for i, dirname in enumerate(dirnames):
+ if dirname.startswith('.'): del dirnames[i]
+ if '__init__.py' in filenames:
+ packages.append('.'.join(fullsplit(dirpath)))
+ elif filenames:
+ data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
+
setup(
- name='bases',
- version='0.1',
+ name = "bases",
+ version = 0.1,
url = 'http://https://forge.hipro.co.in/projects/bases/',
author = 'HiPro team',
author_email = 'jo...@hi...',
description = 'Bases framework is a pythonic application development platform for n-tier enterprise applications.',
packages = packages,
- py_modules = ['bases/app', 'bases/common', 'bases/errors',
- 'bases/interface'],
+ data_files = data_files,
scripts = ['src/bases-server'],
- )
+)
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:56:33
|
Revision: 16
http://bases.svn.sourceforge.net/bases/?rev=16&view=rev
Author: joe_steeve
Date: 2009-02-26 07:56:28 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Add relative paths to some modules.
* setup.py:
(setup): Refer modules inside bases relatively.
Modified Paths:
--------------
trunk/setup.py
Modified: trunk/setup.py
===================================================================
--- trunk/setup.py 2009-02-26 07:55:35 UTC (rev 15)
+++ trunk/setup.py 2009-02-26 07:56:28 UTC (rev 16)
@@ -12,6 +12,7 @@
author_email = 'jo...@hi...',
description = 'Bases framework is a pythonic application development platform for n-tier enterprise applications.',
packages = packages,
- py_modules = ['app', 'common', 'errors', 'interface'],
+ py_modules = ['bases/app', 'bases/common', 'bases/errors',
+ 'bases/interface'],
scripts = ['src/bases-server'],
)
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:55:39
|
Revision: 15
http://bases.svn.sourceforge.net/bases/?rev=15&view=rev
Author: joe_steeve
Date: 2009-02-26 07:55:35 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Basic setup.py script for making bases installation simple.
* setup.py:
(): New script to create python distutils based packages.
* MANIFEST.in:
(): MANIFEST include file to specify some directories and files to
be added as a part of the package.
Added Paths:
-----------
trunk/MANIFEST.in
trunk/setup.py
Added: trunk/MANIFEST.in
===================================================================
--- trunk/MANIFEST.in (rev 0)
+++ trunk/MANIFEST.in 2009-02-26 07:55:35 UTC (rev 15)
@@ -0,0 +1,4 @@
+include README
+include MANIFEST.in
+recursive-include docs *
+
Added: trunk/setup.py
===================================================================
--- trunk/setup.py (rev 0)
+++ trunk/setup.py 2009-02-26 07:55:35 UTC (rev 15)
@@ -0,0 +1,17 @@
+from distutils.core import setup
+
+packages = ['bases/core', 'bases/core/transports', 'bases/services',
+ 'bases/synclient', 'bases/synclient/transports',
+ 'bases/thirdparty', 'bases/util']
+
+setup(
+ name='bases',
+ version='0.1',
+ url = 'http://https://forge.hipro.co.in/projects/bases/',
+ author = 'HiPro team',
+ author_email = 'jo...@hi...',
+ description = 'Bases framework is a pythonic application development platform for n-tier enterprise applications.',
+ packages = packages,
+ py_modules = ['app', 'common', 'errors', 'interface'],
+ scripts = ['src/bases-server'],
+ )
Property changes on: trunk/setup.py
___________________________________________________________________
Added: svn:executable
+ *
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:54:39
|
Revision: 14
http://bases.svn.sourceforge.net/bases/?rev=14&view=rev
Author: joe_steeve
Date: 2009-02-26 07:54:34 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Major rework of basic areas
* Reworked major areas to facilitate support for implementing
'peer-proxying'.
* Obj-Repo now does not instantiate a stateless object for every
incoming call.
* Reworked the component-directory to cache component information
properly.
* The way how per-message information traverses till the component
and back is changed. I use my favourite dicts now ;)
Modified Paths:
--------------
trunk/bases/core/broker.py
trunk/bases/core/component.py
trunk/bases/core/proxy.py
trunk/bases/core/transports/transports.py
trunk/bases/errors.py
trunk/bases/services/directory.py
trunk/bases/services/objrepo.py
trunk/src/bases-server
trunk/test/test-echo/crepo/TestComponent.py
trunk/test/test-echo/test.py
Modified: trunk/bases/core/broker.py
===================================================================
--- trunk/bases/core/broker.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/bases/core/broker.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -164,7 +164,9 @@
d = None
try:
o = self.ObjRepo.getObject(dmesg[BASES_MSG_OBJID])
- d = o.callMethod(mid, dmesg[BASES_MSG_METHODNAME],
+ mctxt = {'brokerMsgID':mid, 'brokerOwnerID':str(id(trans)),
+ 'basesService':trans.factory.basesService}
+ d = o.callMethod(mctxt, dmesg[BASES_MSG_METHODNAME],
dmesg[BASES_MSG_ARGS], dmesg[BASES_MSG_KWARGS])
except:
self.in_Transports[id(trans)].remove(mid)
@@ -210,7 +212,7 @@
@param string mid: The internal-id for the incoming message.
"""
-
+ print "ErrorBack: _handleInMethodErrResponse(%s, %s)" % (response,mid)
try:
MsgID, trans, ts = self.in_MethodCalls[mid]
dmesg = [BASES_METHOD_ERESPONSE, MsgID, str(response)]
@@ -221,7 +223,6 @@
del(self.in_MethodCalls[mid])
except KeyError:
pass
- return response
def callRemote(self, location, ObjID, MethodName, args, kwargs):
@@ -342,11 +343,18 @@
del(self.out_MethodCalls[mid])
del(self.out_Transports[id(trans)])
-
+ # Cleanup object references in the object repository
+ self.ObjRepo.cleanupDeadOwnerObjs(str(id(trans)))
+
+
def getSnameOfMsg(self, MsgID):
trans = self.in_MethodCalls[MsgID][1]
return trans.factory.basesService
+ def getOwnerID(self, MsgID):
+ trans = self.in_MethodCalls[MsgID][1]
+ return str(id(trans))
+
class BasesAsyncPoser:
pass
Modified: trunk/bases/core/component.py
===================================================================
--- trunk/bases/core/component.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/bases/core/component.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -4,52 +4,27 @@
BASES_STATELESS_COMPONENT = 1
BASES_STATEFUL_COMPONENT = 2
-BASES_STATEFULLONGLIVED_COMPONENT = 3
class BasesComponent:
- def __init__(self):
- self.componentInit()
+ pass
- def componentInit(self):
- raise BasesComponentError("componentInit: Not implemented")
- def __del__(self):
- self.componentDel()
-
- def componentDel(self):
- raise BasesComponentError("componentDel: Not implemented")
-
-
-
class BasesComponent_Stateless(BasesComponent):
- def __init__(self):
- self.ComponentType = BASES_STATELESS_COMPONENT
- BasesComponent.__init__(self)
+ bases_ComponentType = BASES_STATELESS_COMPONENT
+ bases_LongLived = False
- def __del__(self):
- BasesComponent.__del__(self)
-
class BasesComponent_Stateful(BasesComponent):
- def __init__(self):
- self.ComponentType = BASES_STATEFUL_COMPONENT
- BasesComponent.__init__(self)
+ bases_ComponentType = BASES_STATEFUL_COMPONENT
+ bases_LongLived = False
+
- def __del__(self):
- BasesComponent.__del__(self)
-
-
class BasesComponent_StatefulLongLived(BasesComponent):
- def __init__(self):
- self.ComponentType = BASES_STATEFULLONGLIVED_COMPONENT
- BasesComponent.__init__(self)
+ bases_ComponentType = BASES_STATEFUL_COMPONENT
+ bases_LongLived = True
- def __del__(self):
- BasesComponent.__del__(self)
-
-
class BasesComponentInterface:
def __init__ (self, file=None, interface=None):
@@ -70,7 +45,7 @@
self.KlassName = tree.findtext('KlassName')
if self.KlassName is None:
raise BasesInterfaceError("Bad interface: No 'KlassName'")
-
+
self.Methods = {}
miter = tree.getiterator("Method")
for m in miter:
Modified: trunk/bases/core/proxy.py
===================================================================
--- trunk/bases/core/proxy.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/bases/core/proxy.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -167,18 +167,18 @@
self.callMethod = self._callRemoteMethod
- def callMethod(self, MsgID, MethodName, args, kwargs):
+ def callMethod(self, mctxt, MethodName, args, kwargs):
raise BasesClientProxyError("Unconfigured BasesComponentClientProxy")
- def _callRemoteMethod(self, MsgID, MethodName, args, kwargs):
+ def _callRemoteMethod(self, mctxt, MethodName, args, kwargs):
self._Interface.validateMethodInterface(MethodName, args, kwargs)
d = self._Broker.callRemote (self._Location, self._Object,
MethodName, args, kwargs)
return d
- def _callLocalMethod(self, MsgID, MethodName, args, kwargs):
+ def _callLocalMethod(self, mctxt, MethodName, args, kwargs):
self._Interface.validateMethodInterface(MethodName, args, kwargs)
m = getattr (self._Object, MethodName)
d = threads.deferToThread (m, *args, **kwargs)
Modified: trunk/bases/core/transports/transports.py
===================================================================
--- trunk/bases/core/transports/transports.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/bases/core/transports/transports.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -23,7 +23,10 @@
def connectionLost(self,reason):
log.msg(3, str(reason))
- self.factory.basesBroker.cleanupTransReferences(self)
+ try:
+ self.factory.basesBroker.cleanupTransReferences(self)
+ except Exception, e:
+ print str(e)
def canHandle(self, LocationTuple):
"""
Modified: trunk/bases/errors.py
===================================================================
--- trunk/bases/errors.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/bases/errors.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -24,3 +24,6 @@
class BasesDBIError(BasesError):
pass
+
+class BasesObjRepoError(BasesError):
+ pass
Modified: trunk/bases/services/directory.py
===================================================================
--- trunk/bases/services/directory.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/bases/services/directory.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -18,7 +18,59 @@
class BasesComponentDirectory:
"""
I maintain a directory of known components and information about
- them.
+ them.
+
+ An incoming directory lookup request can be one of,
+
+ a) A local component wanting to locate another component
+ b) A remote client wanting to locate a component
+
+ In both cases, the component that is being located can either be a
+ locally published component or a remotely published component. The
+ BasesComponentDirectory maintains a directory of known components
+ (both local and remote).
+
+ An 'interface repository' is a directory containing *.interface
+ files (component interface descrption files). Every 'service'
+ defined in the configuration file should have an 'interface
+ repository' (set using 'interface_repo' setting). The
+ 'interface_repo' setting in the '[globals]' section points to the
+ interface repository of local components that are available to the
+ local components. At startup BasesComponentDirectory scans each
+ 'interface repository' and constructs the initial directory of
+ component locations.
+
+
+ TBD: Proxy peers:
+ -----------------
+
+ Each service can have its own proxy_peer setting. The
+ 'proxy_peers' are other bases-servers whose components are
+ proxied. A component proxy is one that abstracts the location of a
+ remote compoennt to the client; i.e. the client will not know that
+ the component actually resides in a remote bases-server. The
+ client simply sends messages to the component-proxy and the
+ component-proxy calls the appropriate bases-server. Each service
+ proxies only the components that are on the 'proxy-peer' defined
+ under its section. The components available on a proxy-peer are
+ learned dynamically. i.e, the component lookup on a proxy-peer is
+ done only once. The results are cached so that future queries for
+ the same component are responded immediately without another
+ lookup.
+
+
+ TBD: Remote components:
+ -----------------------
+
+ The BasesComponentDirectory is capable of querying other
+ bases-servers for a component's information. In this case, the
+ client simply queries for a Compoennt's information/location and
+ we lookup other known bases-servers for the required
+ information. The information is cached locally so that future
+ requests for the same component need not be searched. The other
+ neighbouring bases-servers are defined in the configuration file
+ under the '[globals]' section in 'bases_neighbours' setting.
+
"""
interface = """
@@ -39,8 +91,7 @@
self.srvConf = conf
self.ObjRepo = ObjRepo
self.Broker = Broker
- self.localComponents = {}
- self.remoteComponents = {}
+ self.ComponentDB = {}
self.scanLocalComponents(conf)
self._Interface = component.BasesComponentInterface(interface=self.interface)
@@ -52,13 +103,14 @@
# Components published by each service
for sn in conf["services"]:
sd = conf["service_desc"][sn]
- location = (str(sd['protocol']), str(sd['ip']),
- int(sd['port']))
- self._scanLocalComponents(sn, sd['interface_repo'], location)
+ self._scanLocalComponents(sn, sd['interface_repo'], sd['location'])
# Components available to be called locally
self._scanLocalComponents("__bases__local__",
- conf['globals.interface_repo'], None)
+ conf['globals.interface_repo'],
+ conf['globals.location'])
+ print self.ComponentDB
+
def _scanLocalComponents(self, sname, repodir, location):
import glob
@@ -70,12 +122,12 @@
cinfo['interfacef'] = f
cinfo['module'] = ciface.ModuleName
cinfo['klass'] = ciface.KlassName
- compid_s = "%s:%s" % (sname, ciface.ComponentName)
- compid = shafunc(compid_s).hexdigest()
- self.localComponents[compid] = cinfo
+ cinfo['location'] = location
+ compid = self.generateComponentID(sname, ciface.ComponentName)
+ self.ComponentDB[compid] = cinfo
- def callMethod(self, MsgID, MethodName, args, kwargs):
+ def callMethod(self, mctxt, MethodName, args, kwargs):
"""
I am usually called by the broker. The component-directory
behaves as its own proxy object. The broker directly delivers
@@ -83,19 +135,21 @@
"""
m = getattr (self, MethodName)
try:
- d = m(MsgID, *args, **kwargs)
+ d = m(mctxt, *args, **kwargs)
return d
except:
return defer.fail(failure.Failure())
-
- def getComponentInfo (self, msgid, ComponentName):
+
+ def getComponentInfo (self, mctxt, ComponentName):
"""
Lookup the component with name 'ComponentName' and return
- information about it.
+ information about it. I can be called either by a remote
+ client or a local client. The mctxt holds the context of the
+ request.
- @param[in] msgid A unique ID that identifies this call. This
- is sent to us by the Broker
+ @param[in] mctxt A message context generated by the broker
+ which contains information about the caller.
@param[in] ComponentName Name of the component (provided by
the client)
@@ -112,75 +166,104 @@
"""
# Get a service name
sname = "__bases__local__"
- if msgid is not None:
- sname = self.Broker.getSnameOfMsg(msgid)
+ if mctxt['brokerMsgID'] is not None:
+ sname = mctxt['basesService']
ci = {}
- # Check the locally published componenets
- compid_s = "%s:%s" % (sname, ComponentName)
- compid = shafunc(compid_s).hexdigest()
- if compid in self.localComponents:
- sd = self.srvConf['service_desc'][sname]
- ci['location'] = (str(sd['protocol']), str(sd['ip']),
- int(sd['port']))
- i = open(self.localComponents[compid]['interfacef']).read()
- ci['interface'] = i
- if msgid is not None:
- return defer.succeed(ci)
- else:
- return ci
- # Check the locally cached remote component information
- compid = shafunc(ComponentName).hexdigest()
- if compid in self.remoteComponents:
- ci['location'] = self.remoteComponents[compid]['location']
- i = open(self.remoteComponents[compid]['interfacef']).read()
- ci['interface'] = i
- if msgid is not None:
- return defer.succeed(ci)
- else:
- return ci
+ # lookup local cache
+ compid = self.generateComponentID(sname, ComponentName)
+ if compid in self.ComponentDB:
+ centry = self.ComponentDB[compid]
+ ci['location'] = centry['location']
+ if mctxt['brokerMsgID'] is None:
+ if ci['location'] in self.srvConf['globals.local_locations']:
+ ci['location'] = None
+ ci['interface'] = open(centry['interfacef']).read()
+ return defer.succeed(ci)
# Code to lookup the ComponentName with other bases-servers
# needed here. When we get a suitable information. We should
# give it back and keep a note of it.
raise BasesComponentError("Component not found")
-
- def getComponentOID(self, msgid, ComponentName):
+
+ def getComponentOID(self, mctxt, ComponentName):
"""
+ 1) The caller can be a remote client, who wants to access a
+ locally published component.
+
+ 2) The caller can be a remote client, who wants to access a
+ component published on another bases-server (peer-proxy).
+
+ 3) The caller can be a local component who wants to access
+ another locally published component on the '__bases__local__'
+ service. (mctxt['brokerMsgID']=None)
+
+ 3) The caller can be a local component who wants to access
+ another component on another
+ bases-server. (mctxt['brokerMsgID']=None)
+
+ Basically we have caller wanting to access either
+
+ (a) local component or
+ (b) remote component.
+
+ In both cases, we instantiate a BasesComponentClientProxy. For
+ case (a), the proxy is configured with location=None. And for
+ case (b) the proxy is configured with
+ location=remote-component-location.
"""
sname = "__bases__local__"
- if msgid is not None:
- sname = self.Broker.getSnameOfMsg(msgid)
+ if mctxt['brokerMsgID'] is not None:
+ sname = mctxt['basesService']
+
+ # Check the local cache
+ compid = self.generateComponentID(sname, ComponentName)
+ if compid in self.ComponentDB:
+ # Found the component in the local cache.
+ oid = self._doGetComponentOID(mctxt['brokerOwnerID'], compid,
+ self.ComponentDB[compid])
+ return defer.succeed(oid)
- # Check the locally published componenets
+ # Support to check remote Components should go here
+ raise BasesComponentError("Component not found")
+
+
+ def _doGetComponentOID(self, ownerid, compid, centry):
+ # Construct location tuple
+ location = centry['location']
+ # Construct interface object
+ iface = component.BasesComponentInterface(file=centry['interfacef'])
+
+ # Get the OID of the object
+ oid = None
+ if location in self.srvConf['globals.local_locations']:
+ oid = self.ObjRepo.getOID(ownerid, compid, None, iface,
+ mod=centry['module'],
+ klass=centry['klass'])
+ else:
+ # TBD: remote objects (peer-proxied objects) have to be
+ # instantiated on the other end. i.e, we should get the
+ # oid from the remote bases-server.
+ raise BasesObjRepo("Peer-Proxying not implemented")
+ return oid
+
+
+ def generateComponentID(self, sname, ComponentName):
+ """
+ Generate the component-ID using the given details
+ """
compid_s = "%s:%s" % (sname, ComponentName)
compid = shafunc(compid_s).hexdigest()
- if compid in self.localComponents:
- # Construct location tuple
- location = None
- if msgid is None:
- sd = self.srvConf['service_desc'][sname]
- location = (str(sd['protocol']), str(sd['ip']), int(sd['port']))
- # Construct interface object
- ifacef = self.localComponents[compid]['interfacef']
- iface = component.BasesComponentInterface(file=ifacef)
+ return compid
- # Get the object-id
-
- obj = __import__(self.localComponents[compid]['module'])
- klass = self.localComponents[compid]['klass']
- if klass is not None:
- k = getattr(obj, klass)
- obj = k()
- cproxy = proxy.BasesComponentClientProxy(iface, obj,
- location, self.Broker)
- # Add to object-repository
- oid = self.ObjRepo.addObject(cproxy, None)
- if msgid is not None:
- return defer.succeed(oid)
- else:
- return oid
- # Support to check remote Components should go here
- raise BasesComponentError("Component not found")
+class BasesCompDirClientProxy:
+ """
+ A client proxy to access the BasesComponentDirectory.
+
+ This class SHOULD NOT be used in the 'main-thread context'. This
+ class should be used by code running in other threads to talk to
+ the 'BasesComponentDirectory'.
+ """
+ pass
Modified: trunk/bases/services/objrepo.py
===================================================================
--- trunk/bases/services/objrepo.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/bases/services/objrepo.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -10,11 +10,9 @@
from datetime import datetime
import os
+from bases.errors import *
+from bases.core import component, proxy
-class BasesObjRepoError(Exception):
- pass
-
-
class BasesObjectRepository:
"""
- Should keep a database of alive objects.
@@ -29,79 +27,186 @@
- Statefull objects: Should keep track of the client who created
the object (location). Should keep track of which service the
object is published on. Cannot be garbage-collected. The object
- can timeout and kill itself.
+ can timeout and kill itself.
"""
def __init__(self, Broker):
self.Broker = Broker
- self.ObjDB = {}
- self.StatelessObjDB={}
- self.Broker.ObjRepo = self
+ self.Broker.ObjRepo = self
+ self.ObjDB = {} #{'oid': (obj, reference_count, compid)}
+ self.ObjDBStateless = {} # {'component-id': (OID, timestamp)}
+ self.ObjOwnerDB = {} # {'owner-id': [OID1, OID2, ..]}
+
- def getObject(self, ObjID):
+ def getObject(self, oid):
"""
I return an object from the ObjDB with the given ObjID
"""
try:
- o = self.ObjDB[ObjID]
- return o[0]
+ return self.ObjDB[oid][0]
except KeyError:
- raise BasesObjRepoError("Bad object-id")
+ raise BasesObjRepoError("Bad object-id = %s" %(oid))
+
+ def getOID(self, ownerid, compid, location, interface,
+ mod=None, klass=None, roid=None):
+
+ if location is None:
+ if mod is None or klass is None:
+ raise BasesObjRepoError("No module/klass for local component")
+ oid = self._getLocalComponentOID(ownerid, compid, interface,
+ mod, klass)
+ return oid
+ else:
+ if roid is None:
+ raise BasesObjRepoError( \
+ "peer-proxy requires remote oid != None")
+ cproxy = proxy.BasesComponentClientProxy(interface, roid, location,
+ self.Broker)
+ oid = self.addObjectToDB(cproxy, ownerid, compid, stateless=False)
+ return oid
+
- def addObject(self, Object, OwnerID):
- """
- I put the given object into the ObjDB and return an ObjID
+ def _getLocalComponentOID(self, ownerid, compid, interface, mod, klass):
+ # Check the stateless-object cache
+ oid = self.lookupStatelessObjDB(compid)
+ if oid is not None:
+ self.incrementObjReference(oid, ownerid)
+ return oid
+
+ # Get the 'klass object'
+ mlist = mod.split('.')
+ kobj = None
+ if len(mlist)>0:
+ m = __import__(mod)
+ mlist.pop(0)
+ for mname in mlist:
+ m = getattr(m, mname)
+ kobj = getattr(m, klass)
- @param Object: The object that should be stored
+ if not hasattr(kobj, "bases_ComponentType"):
+ raise BasesObjRepoError("Component deployment error")
+
+ # Check klass type
+ if kobj.bases_ComponentType == \
+ component.BASES_STATELESS_COMPONENT:
+ obj = kobj()
+ cproxy = proxy.BasesComponentClientProxy(interface, obj, None,
+ self.Broker)
+ oid = self.addObjectToDB(cproxy, ownerid, compid, stateless=True)
+ return oid
+ elif kobj.bases_ComponentType == \
+ component.BASES_STATEFUL_COMPONENT:
+ # Stateful objects are not reused. Every new reference is
+ # a new object.
+ obj = kobj()
+ cproxy = proxy.BasesComponentClientProxy(interface, obj, None,
+ self.Broker)
+ oid = self.addObjectToDB(cproxy, ownerid, compid, stateless=False)
+ return oid
- @param CallerID: A string that identifies the owner. Should be
- the KeyID of the client application when available. Else this
- should be generated by the protocol-factory.
- """
- m = ""
- # We make sure we dont accidentally kill another
- # object. Checking for collisions is extreme paranoia.
+ def cleanupDeadOwnerObjs(self, ownerid):
+ try:
+ ol = self.ObjOwnerDB[ownerid][:]
+ except KeyError:
+ raise BasesObjRepoError("Bad owner-id: %s" % (ownerid))
+ for oid in ol:
+ self.decrementObjReference(oid, ownerid)
+ del(self.ObjOwnerDB[ownerid])
+
+
+ def lookupStatelessObjDB(self, compid):
+ if compid in self.ObjDBStateless:
+ return self.ObjDBStateless[compid][0]
+
+
+ def incrementObjReference(self, oid, ownerid):
+ # update the reference count and the ObjOwnerDB
+ try:
+ o = self.ObjDB[oid]
+ if ownerid in self.ObjOwnerDB:
+ ol = self.ObjOwnerDB[ownerid]
+ if oid not in ol:
+ self.ObjDB[oid] = (o[0], o[1]+1, o[2])
+ ol.append(oid)
+ else:
+ self.ObjOwnerDB[ownerid] = [oid,]
+ self.ObjDB[oid] = (o[0], o[1]+1, o[2])
+ except:
+ raise BasesObjRepoError("Bad object-id %s" % (oid))
+
+
+ def decrementObjReference(self, oid, ownerid):
+ try:
+ oe = self.ObjDB[oid]
+ self.ObjOwnerDB[ownerid].remove(oid)
+ except KeyError, e:
+ raise BasesObjRepoError("Bad object-id=%s or owner-id=%s" % \
+ (oid, ownerid))
+ oe = (oe[0], oe[1]-1, oe[2])
+
+ # If reference count is zero, and if the component is not
+ # a stateless component, then we'll have to del it.
+ if oe[1] == 0:
+ if oe[2] not in self.ObjDBStateless:
+ del(oe[0])
+ del(self.ObjDB[oid])
+ return
+ self.ObjDB[oid] = oe
+
+
+ def addObjectToDB(self, obj, ownerid, compid, stateless=True):
+ oid = ""
while True:
- s = "%s%s%s" % (id(Object), datetime.utcnow(), os.urandom(4))
- m = shafunc(s).hexdigest()
- if not self.ObjDB.has_key(m):
+ oid = self.generateOID(obj)
+ if not self.ObjDB.has_key(oid):
break
- o = (Object, datetime.utcnow(), OwnerID)
- self.ObjDB[m] = o
- return m
+ self.ObjDB[oid] = (obj, 1, compid)
+ if ownerid not in self.ObjOwnerDB:
+ self.ObjOwnerDB[ownerid]= []
+ self.ObjOwnerDB[ownerid].append(oid)
- def addStaticObject(self, ObjID, Object, OwnerID):
+ if stateless is True:
+ self.ObjDBStateless[compid] = (oid, datetime.utcnow())
+
+
+ def addStaticObjectToDB (self, obj, ownerid, compid, oid, stateless=True):
"""
I let the caller choose an object-id for the given
object. This is not for normal use. This is for internal use.
"""
- if self.ObjDB.has_key(ObjID):
- raise BasesObjRepoError("ObjectID=%s already exists" % (ObjID))
- o = (Object, datetime.utcnow(), OwnerID)
- self.ObjDB[ObjID] = o
+ if oid in self.ObjDB:
+ raise BasesObjRepoError("OID=%s already exists" % (oid))
+ if stateless is True:
+ if compid in self.ObjDBStateless:
+ raise BasesObjRepoError("Object for compid=%s exists" %(compid))
+
+ self.ObjDB[oid] = (obj, 1, compid)
+ if stateless is True:
+ self.ObjDBStateless[compid] = (oid, datetime.utcnow())
+ self.ObjOwnerDB[ownerid] = [oid,]
+ return oid
- def removeObject(self, ObjID):
- del(self.ObjDB[ObjID])
-
-
-
- def updateObject(self, ObjID):
+ def updateObjectDB(self, oid):
"""
- I recompute a new hash key for the given object and refile it
- in the DB.
+ If the object pointed to by oid is a 'stateless' object, I
+ update its timestamp.
"""
-
- o = self.ObjDB[ObjectID]
- s = "%s%s%s" % (id(o(0)), datetime.utcnow(), os.urandom(4))
- o1 = (o(0), datetime.utcnow(), o(2))
- m = shafunc(s).hexdigest()
- del(self.ObjDB[ObjID])
- self.ObjDB[m] = o1
- return m
+ try:
+ oe = self.ObjDB[oid]
+ if oe[2] in self.ObjDBStateless:
+ sloe = self.ObjDBStateless[oe[2]]
+ self.ObjDBStateless[oe[2]] = (sloe[0], datetime.utcnow())
+ except:
+ raise BasesObjRepoError("Bad OID = %s" % (oid))
+
+
+ def generateOID(self, obj):
+ oid_s = "%s%s%s" % (id(obj), datetime.utcnow(), os.urandom(4))
+ return shafunc(oid_s).hexdigest()
Modified: trunk/src/bases-server
===================================================================
--- trunk/src/bases-server 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/src/bases-server 2009-02-26 07:54:34 UTC (rev 14)
@@ -131,6 +131,8 @@
conf['globals.groupid'] = int(conf['globals.groupid'])
conf['globals.pidfile'] = conf['globals.pidfile']
conf['globals.bases_cache'] = conf['globals.bases_cache']
+ conf['globals.location'] = ("local", "local", 0)
+ conf['globals.local_locations']=[conf['globals.location']]
conf['services'] = conf['services']
if conf.has_key('globals.manhole'):
@@ -187,6 +189,9 @@
sd['ip'] = sd['ip']
sd['port'] = int(sd['port'])
sd['interface_repo'] = sd['interface_repo']
+ sd['location'] = (str(sd['protocol']), str(sd['ip']),
+ int(sd['port']))
+ conf['globals.local_locations'].append(sd['location'])
except KeyError, e:
print "Service=%s, Required option %s not defined" % (sn, str(e))
sys.exit(1)
@@ -386,7 +391,11 @@
obj_repo = objrepo.BasesObjectRepository(b)
comp_dir = directory.BasesComponentDirectory(conf, obj_repo, b)
# Register the comp_dir with the object-broker
- obj_repo.addStaticObject(common.BASES_OID_COMPONENTDIR, comp_dir, None)
+ compid = comp_dir.generateComponentID("__bases_service__",
+ "org.bases.bases.ComponentDirectory")
+ obj_repo.addStaticObjectToDB(comp_dir, common.BASES_OID_COMPONENTDIR,
+ compid, common.BASES_OID_COMPONENTDIR,
+ stateless=True)
## Registering the broker, component-dir and obj-repo to the
## builtins.
@@ -456,4 +465,9 @@
if __name__ == "__main__":
+ try:
+ import psyco
+ psyco.full()
+ except ImportError:
+ pass
main()
Modified: trunk/test/test-echo/crepo/TestComponent.py
===================================================================
--- trunk/test/test-echo/crepo/TestComponent.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/test/test-echo/crepo/TestComponent.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -1,6 +1,6 @@
+from bases.core.component import *
-
-class TestComponentClass:
+class TestComponentClass(BasesComponent_Stateless):
def testEcho(self, str):
print "Echoing: %s" % (str)
Modified: trunk/test/test-echo/test.py
===================================================================
--- trunk/test/test-echo/test.py 2009-02-26 07:51:44 UTC (rev 13)
+++ trunk/test/test-echo/test.py 2009-02-26 07:54:34 UTC (rev 14)
@@ -24,15 +24,17 @@
print "Started %d threads. Waiting for them to finish .." % (nthreads)
go = 1
while go:
- time.sleep(0.3)
lock.acquire()
if lst[0] == i:
go = 0
lock.release()
t2 = datetime.utcnow()
- print "Time taken:", t2 - t1
-
+ td = t2 - t1
+ rc = lst[0] * 1000.0
+ print "Requests handled:", int(rc)
+ print "Time taken:", td
+ print "Handled %f requests per sec" % (rc/td.seconds)
def serialEcho():
try:
@@ -48,12 +50,11 @@
print e
def testEcho():
- time.sleep(1)
a = app.BasesClientApp(conf)
o = a.getBasesComponent("org.bases.apps.TestComponent")
p = o.testEcho("Hola")
print p
if __name__ == "__main__":
- main(10)
+ main(20)
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:51:48
|
Revision: 13
http://bases.svn.sourceforge.net/bases/?rev=13&view=rev
Author: joe_steeve
Date: 2009-02-26 07:51:44 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Refactored and cleaned up documentation.
Modified Paths:
--------------
trunk/bases/core/proxy.py
Modified: trunk/bases/core/proxy.py
===================================================================
--- trunk/bases/core/proxy.py 2009-02-26 07:50:16 UTC (rev 12)
+++ trunk/bases/core/proxy.py 2009-02-26 07:51:44 UTC (rev 13)
@@ -43,7 +43,12 @@
class BasesComponentServerProxy:
"""
- A class to create proxies to remote BasesComponents
+ A class to create proxies to remote BasesComponents. This class
+ SHOULD NOT be used in the 'main thread context'. This class could
+ cause calls to be scheduled in the main-thread (where the reactor
+ is running).
+
+ This provides a sychronous interface.
"""
def __init__(self, Interface, instance, location=None, broker=None):
@@ -75,7 +80,10 @@
self._Broker = broker
if location is not None and broker is None:
raise BasesServerProxyError("Remote components need a broker")
-
+ if location is None:
+ self.MethodCallHander = self._LocalMethodCallHandler
+ else:
+ self.MethodCallHander = self._RemoteMethodCallHandler
def __getattr__(self, MethodName):
"""
@@ -83,23 +91,34 @@
MethodCallHandler.
"""
self._CalledMethod = MethodName
- return self.__MethodCallHandler
+ return self.MethodCallHandler
- def __MethodCallHandler(self, *args, **kwargs):
+ def MethodCallHandler(self, *args, **kwargs):
+ raise BasesServerProxy("Unconfigured BasesServerProxy")
+
+
+ def _LocalMethodCallHandler(self, *args, **kwargs):
+ # This calls the local component-object in the current
+ # thread-context.
+ self._Interface.validateMethodInterface(self._CalledMethod,
+ args, kwargs)
+ m = getattr (self._Object, self._CalledMethod)
+ r = m(*args, **kwargs)
+ return r
+
+
+ def _RemoteMethodCallHandler(self, *args, **kwargs):
# This is a blocking call. This blocks on the main-thread
# until the remote-call returns with a reply.
self._Interface.validateMethodInterface(self._CalledMethod,
args, kwargs)
- if self._Location is None:
- m = getattr (self._Object, self._CalledMethod)
- r = m(*args, **kwargs)
- else:
- r = callInMainThread(self._Broker.callRemote, self._Location,
- self.Object, self._CalledMethod, args, kwargs)
+ r = callInMainThread(self._Broker.callRemote, self._Location,
+ self.Object, self._CalledMethod, args, kwargs)
return r
+
class BasesComponentClientProxy:
"""
A proxy to emulate a normal caller. The broker delivers the the
@@ -107,6 +126,8 @@
method in a thread. When the call returns, I fire a callback so
that the result can be sent to whoever called us.
+ This class SHOULD BE USED ONLY IN THE MAIN-THREAD.
+
I am capable of proxying another peer. i.e, act as a proxy to
another peer.
"""
@@ -127,7 +148,7 @@
@param location: This is none when proxying a local
component. This contains the location-tuple when we are
- proxying a ComponentObject that exists on a known peer.
+ proxying a ComponentObject that exists on a peer (peer-proxy).
@param broker: When location is not None, then this is the
broker that should be used to communicate with the remote
@@ -140,16 +161,25 @@
self._Object.bases_Proxy = self
self._Location = location
self._Broker = broker
-
+ if location is None:
+ self.callMethod = self._callLocalMethod
+ else:
+ self.callMethod = self._callRemoteMethod
+
def callMethod(self, MsgID, MethodName, args, kwargs):
+ raise BasesClientProxyError("Unconfigured BasesComponentClientProxy")
+
+
+ def _callRemoteMethod(self, MsgID, MethodName, args, kwargs):
self._Interface.validateMethodInterface(MethodName, args, kwargs)
- d = None
- if self._Location is None:
- m = getattr (self._Object, MethodName)
- d = threads.deferToThread (m, *args, **kwargs)
- else:
- d = self._Broker.callRemote (self._Location, self._Object,
- MethodName, args, kwargs)
+ d = self._Broker.callRemote (self._Location, self._Object,
+ MethodName, args, kwargs)
return d
+
+ def _callLocalMethod(self, MsgID, MethodName, args, kwargs):
+ self._Interface.validateMethodInterface(MethodName, args, kwargs)
+ m = getattr (self._Object, MethodName)
+ d = threads.deferToThread (m, *args, **kwargs)
+ return d
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:50:18
|
Revision: 12
http://bases.svn.sourceforge.net/bases/?rev=12&view=rev
Author: joe_steeve
Date: 2009-02-26 07:50:16 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Simple DB abstraction
The DB abstraction checks for connection leaks and creates
connections for supported DB-types.
Modified Paths:
--------------
trunk/bases/errors.py
trunk/src/bases-server
Added Paths:
-----------
trunk/bases/util/
trunk/bases/util/__init__.py
trunk/bases/util/dbi.py
Modified: trunk/bases/errors.py
===================================================================
--- trunk/bases/errors.py 2009-02-26 07:48:14 UTC (rev 11)
+++ trunk/bases/errors.py 2009-02-26 07:50:16 UTC (rev 12)
@@ -22,3 +22,5 @@
class BasesClientProxyError(BasesError):
pass
+class BasesDBIError(BasesError):
+ pass
Added: trunk/bases/util/dbi.py
===================================================================
--- trunk/bases/util/dbi.py (rev 0)
+++ trunk/bases/util/dbi.py 2009-02-26 07:50:16 UTC (rev 12)
@@ -0,0 +1,70 @@
+
+from twisted.python import log
+from datetime import datetime, timedelta
+import thread
+
+conn_list = {}
+conn_list_lock = thread.allocate_lock()
+supportedDBs = ['pgdb']
+
+def createConnection (dbtype, connstr):
+ global conn_list, conn_list_lock, supportedDBs
+
+ if dbtype not in supportedDBs:
+ raise BasesDBIError("%s not supported" % (dbtype))
+
+ conn = None
+ try:
+ dbmod = __import__(dbtype)
+ if(dbmod.threadsafety < 1):
+ raise BasesDBIError("The mentioned adapter is not thread-safe")
+ conn = dbmod.connect(connstr)
+ except BasesError, e:
+ raise(e)
+ except Exception, e:
+ raise BasesDBIError(str(e))
+
+ conn_list_lock.acquire()
+ conn_list[id(conn)] = datetime.utcnow()
+ # Encode the creation location in the connection object for debug
+ # purpose.
+ conn.tn_creation_location = traceback.extract_stack(limit=2).pop(0)
+ lock.release()
+ return conn
+
+
+def destroyConnection (conn):
+ global conn_list, conn_list_lock
+
+ conn_list_lock.acquire()
+ try:
+ del(conn_list[id(conn)])
+ finally:
+ conn_list_lock.release()
+ conn.close()
+ del(conn)
+
+
+def cleanupConnectionLeak(maxTime):
+ """
+ This method closes connections which are older than maxTime
+ seconds
+ """
+
+ global conn_list, conn_list_lock
+ log.msg(3, "Checking for leaked DB connections")
+
+ conn_list_lock.acquire()
+ try:
+ for c in conn_list:
+ tnow = datetime.utcnow()
+ td = timedelta(seconds=maxTime)
+ if (tnow - conn_list[c]) > td:
+ log.msg(1,"DB connection Leak at: %s" % \
+ [c.tn_creation_location])
+
+ del(conn_list[id(c)])
+ c.close()
+ del(c)
+ finally:
+ conn_list_lock.release()
Modified: trunk/src/bases-server
===================================================================
--- trunk/src/bases-server 2009-02-26 07:48:14 UTC (rev 11)
+++ trunk/src/bases-server 2009-02-26 07:50:16 UTC (rev 12)
@@ -7,6 +7,8 @@
from bases.core import broker
from bases import common
+from bases.util import dbi
+from bases.errors import *
stdout = sys.stdout
stdin = sys.stdin
@@ -148,6 +150,11 @@
print "Required option %s not found in config file" % (str(e))
sys.exit(1)
+ if 'globals.db_conn_timeout' in conf:
+ conf['globals.db_conn_timeout'] = int(conf['globals.db_conn_timeout'])
+ else:
+ conf['globals.db_conn_timeout'] = 60
+
if conf.has_key('globals.verbosity'):
conf['globals.verbosity'] = int(conf['globals.verbosity'])
else:
@@ -226,6 +233,8 @@
util.switchUID(conf['globals.userid'], conf['globals.groupid'])
except:
pass
+
+ reactor.callLater(5, timedCalls, reactor, conf)
log.msg(1, "Starting bases-server [%s]" % (conf['globals.name']))
if conf['globals.profile_hs'] is True:
@@ -236,10 +245,11 @@
os.unlink(conf['globals.pidfile'])
-class BasesError(Exception):
- pass
+def timedCalls(reactor, conf):
+ dbi.cleanupConnectionLeak(conf['globals.db_conn_timeout'])
+ reactor.callLater(5, timedCalls, reactor, conf)
-
+
class BasesFileLogObserver(log.FileLogObserver):
bases_verbosity = 0
def emit (self, eventDict):
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:48:19
|
Revision: 11
http://bases.svn.sourceforge.net/bases/?rev=11&view=rev
Author: joe_steeve
Date: 2009-02-26 07:48:14 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
Added a simple readme to try the Echo-Test
Added Paths:
-----------
trunk/README
Added: trunk/README
===================================================================
--- trunk/README (rev 0)
+++ trunk/README 2009-02-26 07:48:14 UTC (rev 11)
@@ -0,0 +1,24 @@
+
+To run bases-server:
+--------------------
+
+ 1) You should have the 'bases' package in the PYTHONPATH.
+
+ 2) You should have the path to your components in the PYTHONPATH
+
+ 3) You should have a valid server.ini
+
+The source distribution comes with a echo-test component and
+client. Supposing you unpacked the sources into /home/foo/bases,
+you'll have to follow the following steps to run the echo-test.
+
+ $ cd src
+ $ PYTHONPATH=/home/foo/bases:/home/foo/bases/test/test-echo/crepo ./bases-server ../test/test-echo/server.ini start -d -vvv
+
+On another console,
+
+ $ cd test/test-echo
+ $ python test.py
+
+Enjoy :)
+
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:46:57
|
Revision: 10
http://bases.svn.sourceforge.net/bases/?rev=10&view=rev
Author: joe_steeve
Date: 2009-02-26 07:46:56 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
removed a nasty connection leak in the 'synclient.app'
* cleanup of debug lines.
* untabified some tabbed files.
Modified Paths:
--------------
trunk/bases/core/broker.py
trunk/bases/core/transports/transports.py
trunk/bases/services/directory.py
trunk/bases/services/objrepo.py
trunk/bases/synclient/app.py
trunk/bases/synclient/broker.py
trunk/bases/synclient/services.py
trunk/bases/synclient/transports/bbtrans.py
trunk/test/test-echo/server.ini
trunk/test/test-echo/test.py
Modified: trunk/bases/core/broker.py
===================================================================
--- trunk/bases/core/broker.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/core/broker.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -164,7 +164,6 @@
d = None
try:
o = self.ObjRepo.getObject(dmesg[BASES_MSG_OBJID])
- print dmesg
d = o.callMethod(mid, dmesg[BASES_MSG_METHODNAME],
dmesg[BASES_MSG_ARGS], dmesg[BASES_MSG_KWARGS])
except:
@@ -188,14 +187,16 @@
@param string mid: The internal-id for the incoming message.
"""
- MsgID, trans, ts = self.in_MethodCalls[mid]
- dmesg = [BASES_METHOD_RESPONSE, MsgID, response]
- print "returning: ", dmesg
try:
- trans.queueMsg(dmesg)
- finally:
- self.in_Transports[id(trans)].remove(mid)
- del(self.in_MethodCalls[mid])
+ MsgID, trans, ts = self.in_MethodCalls[mid]
+ dmesg = [BASES_METHOD_RESPONSE, MsgID, response]
+ try:
+ trans.queueMsg(dmesg)
+ finally:
+ self.in_Transports[id(trans)].remove(mid)
+ del(self.in_MethodCalls[mid])
+ except KeyError:
+ pass
def _handleInMethodErrResponse(self, response, mid):
@@ -210,14 +211,17 @@
@param string mid: The internal-id for the incoming message.
"""
- MsgID, trans, ts = self.in_MethodCalls[mid]
- dmesg = [BASES_METHOD_ERESPONSE, MsgID, response]
- print "returning error: ", dmesg
try:
- trans.queueMsg(dmesg)
- finally:
- self.in_Transports[id(trans)].remove(mid)
- del(self.in_MethodCalls[mid])
+ MsgID, trans, ts = self.in_MethodCalls[mid]
+ dmesg = [BASES_METHOD_ERESPONSE, MsgID, str(response)]
+ try:
+ trans.queueMsg(dmesg)
+ finally:
+ self.in_Transports[id(trans)].remove(mid)
+ del(self.in_MethodCalls[mid])
+ except KeyError:
+ pass
+ return response
def callRemote(self, location, ObjID, MethodName, args, kwargs):
@@ -270,21 +274,15 @@
@param trans: The bases-transport that got this message
"""
- mref = None
+ mref = self.out_MethodCalls[dmesg[BASES_MSG_MSGID]]
try:
- mref = self.out_MethodCalls[dmesg[BASES_MSG_MSGID]]
- except:
- raise BasesBrokerError \
- ("Unexpected response to MsgId=%d from %s" %
- (dmesg[BASES_MSG_MSGID], trans.remoteAddr))
-
- try:
mref[0].callback(dmesg[BASES_MSG_RESPONSE])
finally:
del(self.out_MethodCalls[dmesg[BASES_MSG_MSGID]])
- self.out_Transports[id(trans)]['msgs'].remove(dmesg[BASES_MSG_MSGID])
+ self.out_Transports[id(trans)]['msgs'].remove(
+ dmesg[BASES_MSG_MSGID])
+
-
def _handleOutMethodErrResponse(self, dmesg, trans):
"""
I handle the error reponse for an outgoing MethodCall from
@@ -295,19 +293,13 @@
@param trans: The bases-transport that got this message
"""
- mref = None
+ mref = self.out_MethodCalls[dmesg[BASES_MSG_MSGID]]
try:
- mref = self.out_MethodCalls[dmesg[BASES_MSG_MSGID]]
- except:
- raise BasesBrokerError \
- ("Unexpected response to MsgId=%d from %s" %
- (dmesg[BASES_MSG_MSGID], trans.remoteAddr))
-
- try:
mref[0].errback(dmesg[BASES_MSG_RESPONSE])
finally:
del(self.out_MethodCalls[dmesg[BASES_MSG_MSGID]])
- self.out_Transports[id(trans)]['msgs'].remove(dmesg[BASES_MSG_MSGID])
+ self.out_Transports[id(trans)]['msgs'].remove(
+ dmesg[BASES_MSG_MSGID])
def getTransport(self, LocationTuple):
@@ -328,26 +320,29 @@
tmap = transports.basesTransportsMap
if LocationTuple[0] in tmap['secure']:
secure = True
-########## incomplete here
- def cleanupTransReferences(self, trans, server=False):
+ def cleanupTransReferences(self, trans):
"""
I should be called when a transport is dying to clear out its
references and do cleanup job.
"""
- if server is True:
+ print "Transport cleanup", trans
+ if id(trans) in self.in_Transports:
# Should clear up all in-coming related book-keeping
- pass
- else:
- # Should clear up all out-going related book-keeping
- pass
-
- #TBD: cleanup references of the transport from the in_MethodCalls
- #TBD: cleanup references of the transport from the in_Transports
- pass
+ mlist = self.in_Transports[id(trans)]
+ for mid in mlist:
+ del(self.in_MethodCalls[mid])
+ del(self.in_Transports[id(trans)])
+ if id(trans) in self.out_Transports:
+ # Should clear up all out-going related book-keeping
+ mlist = self.out_Transports[id(trans)]
+ for mid in mlist:
+ del(self.out_MethodCalls[mid])
+ del(self.out_Transports[id(trans)])
+
def getSnameOfMsg(self, MsgID):
trans = self.in_MethodCalls[MsgID][1]
return trans.factory.basesService
Modified: trunk/bases/core/transports/transports.py
===================================================================
--- trunk/bases/core/transports/transports.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/core/transports/transports.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -23,15 +23,14 @@
def connectionLost(self,reason):
log.msg(3, str(reason))
- self.factory.basesBroker.cleanupTransReferences(self,
- server=self._ServerSide)
+ self.factory.basesBroker.cleanupTransReferences(self)
def canHandle(self, LocationTuple):
"""
I check whether I can handle data for the given LocationTuple
and return a boolean True if I can. Else, I return a False.
"""
- pass
+ return False
def makeSSLContext(Key, Cert, verify=False, CA=None):
Modified: trunk/bases/services/directory.py
===================================================================
--- trunk/bases/services/directory.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/services/directory.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -1,6 +1,7 @@
"""Provides the component-directory service for a bases-server"""
from twisted.internet import defer
+from twisted.python import failure
import thread
from bases import common
from bases.core import component, proxy
@@ -42,7 +43,6 @@
self.remoteComponents = {}
self.scanLocalComponents(conf)
self._Interface = component.BasesComponentInterface(interface=self.interface)
- print self.localComponents
def scanLocalComponents(self, conf):
"""
@@ -63,6 +63,7 @@
def _scanLocalComponents(self, sname, repodir, location):
import glob
ifiles = glob.glob ("%s/*.interface" % (repodir))
+ print ifiles
for f in ifiles:
cinfo = {}
ciface = component.BasesComponentInterface(file=f)
@@ -81,9 +82,11 @@
to me and I dispatch it.
"""
m = getattr (self, MethodName)
- d = m(MsgID, *args, **kwargs)
- print "Got back", type(d)
- return d
+ try:
+ d = m(MsgID, *args, **kwargs)
+ return d
+ except:
+ return defer.fail(failure.Failure())
def getComponentInfo (self, msgid, ComponentName):
@@ -123,7 +126,6 @@
i = open(self.localComponents[compid]['interfacef']).read()
ci['interface'] = i
if msgid is not None:
- print "Calling defer.succeed with", ci
return defer.succeed(ci)
else:
return ci
@@ -134,7 +136,6 @@
i = open(self.remoteComponents[compid]['interfacef']).read()
ci['interface'] = i
if msgid is not None:
- print "Calling defer.succeed with", ci
return defer.succeed(ci)
else:
return ci
@@ -142,11 +143,7 @@
# Code to lookup the ComponentName with other bases-servers
# needed here. When we get a suitable information. We should
# give it back and keep a note of it.
- if msgid is not None:
- print "Calling defer.fail"
- return defer.fail(result="Component Not Found")
- else:
- raise BasesComponentError("Component not found")
+ raise BasesComponentError("Component not found")
def getComponentOID(self, msgid, ComponentName):
@@ -169,7 +166,8 @@
ifacef = self.localComponents[compid]['interfacef']
iface = component.BasesComponentInterface(file=ifacef)
- # Instantiate the module and the class
+ # Get the object-id
+
obj = __import__(self.localComponents[compid]['module'])
klass = self.localComponents[compid]['klass']
if klass is not None:
@@ -185,7 +183,4 @@
return oid
# Support to check remote Components should go here
- if msgid is not None:
- return defer.fail(result="Component Not Found")
- else:
- raise BasesComponentError("Component not found")
+ raise BasesComponentError("Component not found")
Modified: trunk/bases/services/objrepo.py
===================================================================
--- trunk/bases/services/objrepo.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/services/objrepo.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -2,27 +2,27 @@
shafunc = None
try:
- import hashlib
- shafunc = hashlib.sha512
+ import hashlib
+ shafunc = hashlib.sha512
except:
- import sha
- shafunc = sha.sha
-
+ import sha
+ shafunc = sha.sha
+
from datetime import datetime
import os
class BasesObjRepoError(Exception):
- pass
+ pass
class BasesObjectRepository:
- """
- - Should keep a database of alive objects.
+ """
+ - Should keep a database of alive objects.
- TBD: Garbage collection. Should hook a method into twisted's
- calllater and get it to do cleanup.
-
- - Stateless objects: Need not keep track of who created this
+ TBD: Garbage collection. Should hook a method into twisted's
+ calllater and get it to do cleanup.
+
+ - Stateless objects: Need not keep track of who created this
object. Should verify whether this object is published by a
given service.
@@ -30,77 +30,78 @@
the object (location). Should keep track of which service the
object is published on. Cannot be garbage-collected. The object
can timeout and kill itself.
-
- """
+
+ """
- def __init__(self, Broker):
- self.Broker = Broker
- self.ObjDB = {}
- self.Broker.ObjRepo = self
+ def __init__(self, Broker):
+ self.Broker = Broker
+ self.ObjDB = {}
+ self.StatelessObjDB={}
+ self.Broker.ObjRepo = self
-
- def getObject(self, ObjID):
- """
- I return an object from the ObjDB with the given ObjID
- """
-
- try:
- o = self.ObjDB[ObjID]
- return o[0]
- except KeyError:
- raise BasesObjRepoError("Bad object-id")
-
-
- def addObject(self, Object, OwnerID):
- """
- I put the given object into the ObjDB and return an ObjID
+
+ def getObject(self, ObjID):
+ """
+ I return an object from the ObjDB with the given ObjID
+ """
+
+ try:
+ o = self.ObjDB[ObjID]
+ return o[0]
+ except KeyError:
+ raise BasesObjRepoError("Bad object-id")
+
+
+ def addObject(self, Object, OwnerID):
+ """
+ I put the given object into the ObjDB and return an ObjID
- @param Object: The object that should be stored
+ @param Object: The object that should be stored
- @param CallerID: A string that identifies the owner. Should be
- the KeyID of the client application when available. Else this
- should be generated by the protocol-factory.
- """
+ @param CallerID: A string that identifies the owner. Should be
+ the KeyID of the client application when available. Else this
+ should be generated by the protocol-factory.
+ """
- m = ""
- # We make sure we dont accidentally kill another
- # object. Checking for collisions is extreme paranoia.
- while True:
- s = "%s%s%s" % (id(Object), datetime.utcnow(), os.urandom(4))
- m = shafunc(s).hexdigest()
- if not self.ObjDB.has_key(m):
- break
- o = (Object, datetime.utcnow(), OwnerID)
- self.ObjDB[m] = o
- return m
+ m = ""
+ # We make sure we dont accidentally kill another
+ # object. Checking for collisions is extreme paranoia.
+ while True:
+ s = "%s%s%s" % (id(Object), datetime.utcnow(), os.urandom(4))
+ m = shafunc(s).hexdigest()
+ if not self.ObjDB.has_key(m):
+ break
+ o = (Object, datetime.utcnow(), OwnerID)
+ self.ObjDB[m] = o
+ return m
- def addStaticObject(self, ObjID, Object, OwnerID):
- """
- I let the caller choose an object-id for the given
- object. This is not for normal use. This is for internal use.
- """
- if self.ObjDB.has_key(ObjID):
- raise BasesObjRepoError("ObjectID=%s already exists" % (ObjID))
- o = (Object, datetime.utcnow(), OwnerID)
- self.ObjDB[ObjID] = o
-
-
- def removeObject(self, ObjID):
- del(self.ObjDB[ObjID])
+ def addStaticObject(self, ObjID, Object, OwnerID):
+ """
+ I let the caller choose an object-id for the given
+ object. This is not for normal use. This is for internal use.
+ """
+ if self.ObjDB.has_key(ObjID):
+ raise BasesObjRepoError("ObjectID=%s already exists" % (ObjID))
+ o = (Object, datetime.utcnow(), OwnerID)
+ self.ObjDB[ObjID] = o
+
+
+ def removeObject(self, ObjID):
+ del(self.ObjDB[ObjID])
-
-
- def updateObject(self, ObjID):
- """
- I recompute a new hash key for the given object and refile it
- in the DB.
- """
-
- o = self.ObjDB[ObjectID]
- s = "%s%s%s" % (id(o(0)), datetime.utcnow(), os.urandom(4))
- o1 = (o(0), datetime.utcnow(), o(2))
- m = shafunc(s).hexdigest()
- del(self.ObjDB[ObjID])
- self.ObjDB[m] = o1
- return m
+
+
+ def updateObject(self, ObjID):
+ """
+ I recompute a new hash key for the given object and refile it
+ in the DB.
+ """
+
+ o = self.ObjDB[ObjectID]
+ s = "%s%s%s" % (id(o(0)), datetime.utcnow(), os.urandom(4))
+ o1 = (o(0), datetime.utcnow(), o(2))
+ m = shafunc(s).hexdigest()
+ del(self.ObjDB[ObjID])
+ self.ObjDB[m] = o1
+ return m
Modified: trunk/bases/synclient/app.py
===================================================================
--- trunk/bases/synclient/app.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/synclient/app.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -22,20 +22,12 @@
self.DirClient = services.BasesDirectoryClient(conf['dircache'])
self.DirClient.Broker = self.Broker
self.DirClient.addServers(slist)
- print self.DirClient.slist
def getBasesComponent(self, ComponentName):
ci = self.DirClient.getComponentInfo(ComponentName)
- print "Got component-info:", ci
oid = self.DirClient.getComponentOID(ComponentName)
- print "Got Object-ID:", oid
i = BasesComponentInterface(interface=ci['interface'])
p = BasesComponentServerProxy(ci["location"], i, oid, self.Broker)
return p
-
- def __del__(self):
- self.Broker.stop()
- self.Broker = None
- self.DirClient = None
Modified: trunk/bases/synclient/broker.py
===================================================================
--- trunk/bases/synclient/broker.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/synclient/broker.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -59,7 +59,6 @@
# Construct the message-list
msg = [BASES_METHOD_CALL, MsgID, ObjID, MethodName, args, kwargs]
- print msg
# get a transport
t = self.getTransport(location)
# send it across
@@ -72,11 +71,11 @@
raise BasesBrokerError ("Response for unexpected msg-id %s" \
% (r[BASES_MSG_MSGID]))
if r[BASES_MSG_MSGTYPE] == BASES_METHOD_ERESPONSE:
- raise BasesComponentError(r[BASES_MSG_RESPONSE])
- except Exception, e:
+ raise BasesComponentError(str(r[BASES_MSG_RESPONSE]))
+ except:
t.conn.close()
del self.out_transports[location]
- raise Exception(e)
+ raise
return r[BASES_MSG_RESPONSE]
@@ -97,8 +96,7 @@
return t
def stop(self):
+ print "cleaning up %d transports" % (len(self.out_transports))
for l in self.out_transports:
- self.out_transports[l].conn.shutdown()
self.out_transports[l].conn.close()
- self.out_transports[l].conn = None
del(self.out_transports[l])
Modified: trunk/bases/synclient/services.py
===================================================================
--- trunk/bases/synclient/services.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/synclient/services.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -41,7 +41,7 @@
ci = proxy.getComponentInfo(ComponentName)
break
except BasesError,e:
- print e
+ pass
except:
raise
Modified: trunk/bases/synclient/transports/bbtrans.py
===================================================================
--- trunk/bases/synclient/transports/bbtrans.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/bases/synclient/transports/bbtrans.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -63,7 +63,7 @@
self._InMsgQueue = []
self._InitDone = False
self._Msg_TransitState = STATE_GET_HOLA
- self.Debug = True
+ self.Debug = False
self._Buffer = None
def connectionMade(self):
@@ -164,7 +164,6 @@
@return tuple(int, int): A tuple of (length, type). (1) length
of the incoming message and (2) type of the incoming message"""
- print "Checking for hOla"
try:
hola,length,msgtype = struct.unpack(BBTRANS_HOLA_FMT,data)
except:
@@ -291,4 +290,3 @@
self.conn.send(''.join((hola,emsg)))
self._InitDone = True
- print "Done init"
Modified: trunk/test/test-echo/server.ini
===================================================================
--- trunk/test/test-echo/server.ini 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/test/test-echo/server.ini 2009-02-26 07:46:56 UTC (rev 10)
@@ -3,8 +3,8 @@
# daemon stuff
userid=1
groupid=1
-pidfile=/home/joe/work/bases/test/bases-server.pid
-log=/home/joe/work/bases/test/bases-server.log
+pidfile=/home/joe/work/bases/bases/test/test-echo/bases-server.pid
+log=/home/joe/work/bases/bases/test/test-echo/bases-server.log
log_keep_days = 5
# a custom prefix string for this server
syslog_prefix = bases-server
@@ -13,8 +13,8 @@
verbosity=2
# component repo for Inter-Component interaction locally.
-interface_repo = /home/joe/work/bases/test/cirepo
-bases_cache = /home/joe/work/bases/test/cache
+interface_repo = /home/joe/work/bases/bases/test/test-echo/cirepo
+bases_cache = /home/joe/work/bases/bases/test/test-echo/cache
[service:serv1]
# network options
protocol=bbtrans
@@ -22,4 +22,4 @@
port=10000
# component repo
-interface_repo=/home/joe/work/bases/test/cirepo
+interface_repo=/home/joe/work/bases/bases/test/test-echo/cirepo
Modified: trunk/test/test-echo/test.py
===================================================================
--- trunk/test/test-echo/test.py 2009-02-26 07:43:30 UTC (rev 9)
+++ trunk/test/test-echo/test.py 2009-02-26 07:46:56 UTC (rev 10)
@@ -1,23 +1,59 @@
+import thread, time
+from datetime import datetime
+
import sys
sys.path.append("/home/joe/work/bases/bases/")
-
from bases.synclient import app
conf = {"bases-servers":"bbtrans:127.0.0.1:10000",
"dircache":"/home/joe/work/bases/test/cache"
}
-def main():
- testEcho()
+lock = thread.allocate_lock()
+lst = [0]
+
+
+def main(nthreads):
+ t1 = datetime.utcnow()
+ i = 0
+ while i < nthreads:
+ thread.start_new_thread(serialEcho, ())
+ i += 1
+
+ print "Started %d threads. Waiting for them to finish .." % (nthreads)
+ go = 1
+ while go:
+ time.sleep(0.3)
+ lock.acquire()
+ if lst[0] == i:
+ go = 0
+ lock.release()
+
+ t2 = datetime.utcnow()
+ print "Time taken:", t2 - t1
+
+def serialEcho():
+ try:
+ for i in range(0,1000):
+ try:
+ testEcho()
+ except Exception,e:
+ print e
+ lock.acquire()
+ lst[0] += 1
+ lock.release()
+ except Exception,e:
+ print e
+
def testEcho():
+ time.sleep(1)
a = app.BasesClientApp(conf)
o = a.getBasesComponent("org.bases.apps.TestComponent")
p = o.testEcho("Hola")
- print "Returned: %s" % (p)
-
-
+ print p
+
if __name__ == "__main__":
- main()
+ main(10)
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <joe...@us...> - 2009-02-26 07:43:36
|
Revision: 9
http://bases.svn.sourceforge.net/bases/?rev=9&view=rev
Author: joe_steeve
Date: 2009-02-26 07:43:30 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
A simple Echo Component works :)
Modified Paths:
--------------
trunk/ChangeLog
trunk/TODO
trunk/bases/__init__.py
trunk/bases/app.py
trunk/bases/core/broker.py
trunk/bases/core/component.py
trunk/bases/core/proxy.py
trunk/bases/core/transports/bbtrans.py
trunk/bases/core/transports/transports.py
trunk/bases/services/directory.py
trunk/bases/services/objrepo.py
trunk/bases/synclient/app.py
trunk/bases/synclient/broker.py
trunk/bases/synclient/component.py
trunk/bases/synclient/services.py
trunk/bases/synclient/transports/bbtrans.py
trunk/bases/synclient/transports/transports.py
trunk/src/bases-server
trunk/src/server.ini
Added Paths:
-----------
trunk/bases/common.py
trunk/bases/errors.py
trunk/bases/synclient/transports/__init__.py
trunk/docs/
trunk/docs/bases-interface.text
trunk/test/test-echo/
trunk/test/test-echo/cirepo/
trunk/test/test-echo/cirepo/TestComponent.interface
trunk/test/test-echo/crepo/
trunk/test/test-echo/crepo/TestComponent.py
trunk/test/test-echo/server.ini
trunk/test/test-echo/test.py
Modified: trunk/ChangeLog
===================================================================
--- trunk/ChangeLog 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/ChangeLog 2009-02-26 07:43:30 UTC (rev 9)
@@ -1,3 +1,17 @@
+2008-02-09 Joe Steeve <joe...@gm...>
+
+ * src/bases-server (sanitizeConfigOptions): changed the
+ arrangement of the 'conf' structure so that we dont have to
+ iterate every time we want a service-description
+
+2008-02-03 Joe Steeve <joe...@gm...>
+
+ * bases/synclient/services.py (BasesDirectoryClient): inital code
+ to behave as a client to the bases-directory-service.
+
+ * bases/synclient/transports/transports.py (getTransport): added
+ to create a transport object based on the given protocol name.
+
2008-01-23 Joe Steeve <joe...@gm...>
* src/bases-server (sanitizeConfigOptions): convert config option
Modified: trunk/TODO
===================================================================
--- trunk/TODO 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/TODO 2009-02-26 07:43:30 UTC (rev 9)
@@ -1,9 +1,23 @@
+* bases-server
+- Support the creation of a manhole service
+- Support manhole service over SSL
+
+
* Broker related stuff
+
* Things to verify
- Does the logger rotate the logs?
- Does the privilege dropping code work?
+
+* Synclient
+
+- Creating SSL context. make sure that thing actually works.
+
+* ComponentProxy
+
+- Complete Interface validation
Modified: trunk/bases/__init__.py
===================================================================
--- trunk/bases/__init__.py 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/bases/__init__.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -0,0 +1 @@
+
Modified: trunk/bases/app.py
===================================================================
--- trunk/bases/app.py 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/bases/app.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -6,10 +6,10 @@
from bases.core import broker
-
+
def getBasesComponent(compName):
- """
- Get a bases component object.
- """
-
- pass
+ """
+ Get a bases component object.
+ """
+
+ pass
Added: trunk/bases/common.py
===================================================================
--- trunk/bases/common.py (rev 0)
+++ trunk/bases/common.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -0,0 +1,4 @@
+
+
+BASES_OID_COMPONENTDIR = "1"
+
Modified: trunk/bases/core/broker.py
===================================================================
--- trunk/bases/core/broker.py 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/bases/core/broker.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -4,17 +4,19 @@
of sent messages, and acts on obtaining a response."""
from datetime import datetime
+import os
from twisted.internet import defer, error, protocol
from bases.core.transports import transports
+from bases.errors import *
shafunc = None
try:
- import hashlib
- shafunc = hashlib.sha512
+ import hashlib
+ shafunc = hashlib.sha512
except:
- import sha
- shafunc = sha.sha
+ import sha
+ shafunc = sha.sha
## Order of values in the message-list
@@ -29,268 +31,327 @@
## Values for the MsgType in a message-list
BASES_METHOD_CALL = 1
BASES_METHOD_RESPONSE = 2
+BASES_METHOD_ERESPONSE = 3
-## Transport types we support
-BASES_TRANS_BBTRANS = 1
-## Convenience strings
-BASES_TRANSPORTS_STR = ["",
- "bbtrans"]
-
class BasesBrokerError(Exception):
pass
class BasesBroker:
- """
- The BasesBroker keeps track of messages to/from an Object. It does
- not care about the details of the message. There are two types of
- messages that can come to a broker (either from a remote or a
- local)
+ """
+ The BasesBroker keeps track of messages to/from an Object. It does
+ not care about the details of the message. There are two types of
+ messages that can come to a broker (either from a remote or a
+ local)
- a. BASES_METHOD_CALL: This is a message which initiates a
- method-call.
+ a. BASES_METHOD_CALL: This is a message which initiates a
+ method-call.
- b. BASES_METHOD_RESPONSE: This is a message which holds a
- 'method-response'.
+ b. BASES_METHOD_RESPONSE: This is a message which holds a
+ 'method-response'.
- Every 'method-call' is completed with a 'method-response'.
+ Every 'method-call' is completed with a 'method-response'.
- When a local object tries to call a method on a remote object, the
- broker on the calling side (calling-broker) generates a unique
- MsgID for the method-call. This MsgID is used purely for message
- call-response tracking. The calling-broker generates a
- 'BASES_METHOD_CALL' type message and sends it across to the other
- end. It also keeps a note of the MsgID. The remote-broker that
- gets this message, calls the appropriate object's method and gives
- its response back with a 'BASES_METHOD_RESPONSE' type message. A
- 'BASES_METHOD_RESPONSE' type message holds the same MsgID that was
- given in the respective 'BASES_METHOD_CALL' message. This is used
- by the calling-broker to send the response back to the correct
- object that is waiting for the response.
+ When a local object tries to call a method on a remote object, the
+ broker on the calling side (calling-broker) generates a unique
+ MsgID for the method-call. This MsgID is used purely for message
+ call-response tracking. The calling-broker generates a
+ 'BASES_METHOD_CALL' type message and sends it across to the other
+ end. It also keeps a note of the MsgID. The remote-broker that
+ gets this message, calls the appropriate object's method and gives
+ its response back with a 'BASES_METHOD_RESPONSE' type message. A
+ 'BASES_METHOD_RESPONSE' type message holds the same MsgID that was
+ given in the respective 'BASES_METHOD_CALL' message. This is used
+ by the calling-broker to send the response back to the correct
+ object that is waiting for the response.
- Based on the MsgType, there can be two forms of message-object.
+ Based on the MsgType, there can be two forms of message-object.
- 1) MsgType = BASES_METHOD_CALL
-
- '[MsgType, MsgID, ObjID, MethodName, args, kwargs]'
-
- int MsgType: This says whether a message is a 'method-call' or a
- 'method-response'.
+ 1) MsgType = BASES_METHOD_CALL
+
+ '[MsgType, MsgID, ObjID, MethodName, args, kwargs]'
+
+ int MsgType: This says whether a message is a 'method-call' or a
+ 'method-response'.
- string MsgID: An ID uniquely identifying a message. This ID is
- used to map a 'method-response' to its 'method-call'. This is used
- by the BasesBroker to keep track of messages and their respective
- replies.
+ string MsgID: An ID uniquely identifying a message. This ID is
+ used to map a 'method-response' to its 'method-call'. This is used
+ by the BasesBroker to keep track of messages and their respective
+ replies.
- int ObjID: An ID that uniquely identifies an object to which the
- 'method-call' should be sent to. This is used to obtain the
- correct object from the object repository and deliver the message
- to it.
-
- string MethodName: The name of the method that should be called.
+ int ObjID: An ID that uniquely identifies an object to which the
+ 'method-call' should be sent to. This is used to obtain the
+ correct object from the object repository and deliver the message
+ to it.
+
+ string MethodName: The name of the method that should be called.
- list args: The list of arguments to the method.
+ list args: The list of arguments to the method.
- dict kwargs: A dictionary of keyword arguments.
+ dict kwargs: A dictionary of keyword arguments.
- 2) MsgType = BASES_METHOD_RESPONSE
+ 2) MsgType = BASES_METHOD_RESPONSE
- '[MsgType, MsgID, ResponseObject]'
+ '[MsgType, MsgID, ResponseObject]'
- string MsgID: The MsgID for which this is a response.
+ string MsgID: The MsgID for which this is a response.
- ResponseObject: The object that was returned as a response.
- """
+ ResponseObject: The object that was returned as a response.
+ """
- def __init__(self):
- self.in_MethodCalls = {}
- self.out_MethodCalls = {}
- self.in_Transports = {}
- self.out_Transports = {}
- self.ObjRepo = None
-
-
- def processInMessage(self, dmesg, trans):
- """I process a given message object. This method is called
- from a bases-transport. The message is either a
- BASES_METHOD_CALL or a BASES_METHOD_RESPONSE.
+ def __init__(self):
+ self.in_MethodCalls = {}
+ self.out_MethodCalls = {}
+ self.in_Transports = {}
+ self.out_Transports = {}
+ self.ObjRepo = None
+
+
+ def processInMessage(self, dmesg, trans):
+ """I process a given message object. This method is called
+ from a bases-transport. The message is either a
+ BASES_METHOD_CALL or a BASES_METHOD_RESPONSE.
- @param dmesg: a message object from the transport-protocol
+ @param dmesg: a message object from the transport-protocol
- @param trans: a bases-transport object that has called us
- """
-
- if dmesg[BASES_MSG_TYPE] == BASES_METHOD_CALL:
- self._handleInMethodCall(dmesg,trans)
- elif dmesg[BASES_MSG_TYPE] == BASES_METHOD_RESPONSE:
- self._handleOutMethodResponse(dmesg,trans)
- else:
- raise BasesBrokerError \
- ("Unknown MsgType=%d in message" % (dmesg[0]))
+ @param trans: a bases-transport object that has called us
+ """
+
+ if dmesg[BASES_MSG_MSGTYPE] == BASES_METHOD_CALL:
+ self._handleInMethodCall(dmesg,trans)
+ elif dmesg[BASES_MSG_MSGTYPE] == BASES_METHOD_RESPONSE:
+ self._handleOutMethodResponse(dmesg,trans)
+ elif dmesg[BASES_MSG_MSGTYPE] == BASES_METHOD_ERESPONSE:
+ self._handleOutMethodErrResponse(dmesg,trans)
+ else:
+ raise BasesBrokerError \
+ ("Unknown MsgType=%d in message" % (dmesg[0]))
- def _handleInMethodCall(self, dmesg, trans):
- """
- I handle an incoming MethodCall. I lookup the requested
- object, and do the method call.
+ def _handleInMethodCall(self, dmesg, trans):
+ """
+ I handle an incoming MethodCall. I lookup the requested
+ object, and do the method call.
- @param dmesg: The message from the transport
+ @param dmesg: The message from the transport
- @param trans: The bases-transport object that got this
- incoming message.
- """
+ @param trans: The bases-transport object that got this
+ incoming message.
+ """
- # We generate an internal ID to index this message. We cannot
- # rely on the uniqueness of the MsgID that was provided by the
- # remote server.
- mid = ""
- while True:
- mid_s = "%s%s%s%s" % (id(trans), dmesg[BASES_MSG_MSGID],
- datetime.utcnow(), os.urandom(4))
- mid = shafunc(mid_s).hexdigest()
- if not self.in_MethodCalls.has_key(mid):
- break
-
- # Get the object from the 'object repository' and get the
- # MethodCall started.
- o = self.ObjRepo.getObject(dmesg[BASES_MSG_OBJID])
- d = o.callMethod(dmesg[BASES_MSG_METHODNAME],
- dmesg[BASES_MSG_ARGS], dmesg[BASES_MSG_KWARGS])
- # Add message info to in_MethodCalls
- self.in_MethodCalls[mid] = (dmesg[BASES_MSG_MSGID], trans,
- datetime.utcnow())
- # Add mid to in_Transports
- if not self.in_Transports.has_key(id(trans)):
- self.in_Transports[id(trans)] = []
- self.in_Transports[id(trans)].append(mid)
- d.addCallback(self._handleInMethodResponse, mid)
+ # We generate an internal ID to index this message. We cannot
+ # rely on the uniqueness of the MsgID that was provided by the
+ # remote server.
+ mid = ""
+ while True:
+ mid_s = "%s%s%s%s" % (id(trans), dmesg[BASES_MSG_MSGID],
+ datetime.utcnow(), os.urandom(4))
+ mid = shafunc(mid_s).hexdigest()
+ if not self.in_MethodCalls.has_key(mid):
+ break
+ # Add message info to in_MethodCalls
+ self.in_MethodCalls[mid] = (dmesg[BASES_MSG_MSGID], trans,
+ datetime.utcnow())
+ # Add mid to in_Transports
+ if not self.in_Transports.has_key(id(trans)):
+ self.in_Transports[id(trans)] = []
+ self.in_Transports[id(trans)].append(mid)
- def _handleInMethodResponse(self, response, mid):
- """
- I handle the response for an incoming MethodCall. This
- response should be sent to the remote bases-server that called
- us.
+ # Get the object from the 'object repository' and get the
+ # MethodCall started.
+ d = None
+ try:
+ o = self.ObjRepo.getObject(dmesg[BASES_MSG_OBJID])
+ print dmesg
+ d = o.callMethod(mid, dmesg[BASES_MSG_METHODNAME],
+ dmesg[BASES_MSG_ARGS], dmesg[BASES_MSG_KWARGS])
+ except:
+ self.in_Transports[id(trans)].remove(mid)
+ del(self.in_MethodCalls[mid])
+ raise
- @param object response: The result that is obtained from the
- method call.
+ d.addCallback(self._handleInMethodResponse, mid).addErrback(
+ self._handleInMethodErrResponse,mid)
- @param string mid: The internal-id for the incoming message.
- """
- MsgID, trans, ts = self.in_MethodCalls[mid]
- dmesg = [BASES_METHOD_RESPONSE, MsgID, response]
- try:
- trans.queueMsg(dmesg)
- finally:
- self.in_Transports[id(trans)].remove(mid)
- del(self.in_MethodCalls[mid])
+ def _handleInMethodResponse(self, response, mid):
+ """
+ I handle the response for an incoming MethodCall. This
+ response should be sent to the remote bases-server that called
+ us.
+ @param object response: The result that is obtained from the
+ method call.
- def callRemote(self, location, ObjID, MethodName, *args, **kwargs):
- """
- I am called by a BasesComponentServerProxy object. I do a
- method call to a component-object on a remote bases-server
+ @param string mid: The internal-id for the incoming message.
+ """
- @param tuple location: The location tuple
+ MsgID, trans, ts = self.in_MethodCalls[mid]
+ dmesg = [BASES_METHOD_RESPONSE, MsgID, response]
+ print "returning: ", dmesg
+ try:
+ trans.queueMsg(dmesg)
+ finally:
+ self.in_Transports[id(trans)].remove(mid)
+ del(self.in_MethodCalls[mid])
- @param str ObjID: the ID of the component-object on the remote
- bases-erver.
- @param str MethodName: Name of the method on the object=ObjID to
- be called on the remote side.
+ def _handleInMethodErrResponse(self, response, mid):
+ """
+ I handle the error response for an incoming MethodCall. This
+ response should be sent to the remote bases-server that called
+ us.
- @param list args: A list of arguments
-
- @param dict kwargs: A dict of keyword arguments
+ @param object response: The result that is obtained from the
+ method call.
- """
- t = self.getTransport(location)
- d = defer.Deferred()
+ @param string mid: The internal-id for the incoming message.
+ """
- # Generate a unique MsgID
- MsgID = ""
- while True:
- MsgID_s = "%s%s%s%s" % (id(d), ObjID, datetime.utcnow(),
- os.urandom(4))
- MsgID = shafunc(MsgID_s).hexdigest()
- if not self.out_MethodCalls.has_key(MsgID):
- break
-
- msg = [BASES_METHOD_CALL, MsgID, ObjectID, MethodName, args, kwargs]
- t.queueMsg(msg)
- # Book-keeping
- self.out_MethodCalls[MsgID] = (d, datetime.utcnow())
- if not self.out_Transports.has_key(id(t)):
- self.out_Transports[id(t)] = {"trans":t, "msgs":[]}
- self.out_Transports[id(t)]['msgs'].append(MsgID)
- return d
+ MsgID, trans, ts = self.in_MethodCalls[mid]
+ dmesg = [BASES_METHOD_ERESPONSE, MsgID, response]
+ print "returning error: ", dmesg
+ try:
+ trans.queueMsg(dmesg)
+ finally:
+ self.in_Transports[id(trans)].remove(mid)
+ del(self.in_MethodCalls[mid])
-
- def _handleOutMethodResponse(self, dmesg, trans):
- """
- I handle the reponse for an outgoing MethodCall from this
- bases-server. I lookup the deferred associated with the
- message, and fire it with the provided response.
- @param dmesg: The message list from the transport
+ def callRemote(self, location, ObjID, MethodName, args, kwargs):
+ """
+ I am called by a BasesComponentServerProxy object. I do a
+ method call to a component-object on a remote bases-server
- @param trans: The bases-transport that got this message
- """
- mref = None
- try:
- mref = self.out_MethodCalls[dmesg[BASES_MSG_MSGID]]
- except:
- raise BasesBrokerError \
- ("Unexpected response to MsgId=%d from %s" %
- (dmesg[BASES_MSG_MSGID], trans.remoteAddr))
+ @param tuple location: The location tuple
- try:
- mref[0].Callback(dmesg[BASES_MSG_RESPONSE])
- finally:
- del(self.out_MethodCalls[dmesg[BASES_MSG_MSGID]])
- self.out_Transports[id(trans)]['msgs'].remove(dmesg[BASES_MSG_MSGID])
+ @param str ObjID: the ID of the component-object on the remote
+ bases-erver.
-
- def getTransport(self, LocationTuple):
- """Get a transport object for the given LocationTuple
+ @param str MethodName: Name of the method on the object=ObjID to
+ be called on the remote side.
- @param LocationTuple: A tuple in the following format
- (protocol, host/ip, port)
+ @param list args: A list of arguments
+
+ @param dict kwargs: A dict of keyword arguments
- @rtype: A BasesTransport object that can talk to the given
- location. If there is an existing connection, then that object
- is returned.
- """
- for tid in self.out_Transports:
- if self.out_Transports[tid]['trans'].canHandle(LocationTuple):
- return self.out_Transports[tid]['trans']
+ """
+ t = self.getTransport(location)
+ d = defer.Deferred()
- secure = False
- tmap = transports.basesTransportsMap
- if LocationTuple[0] in tmap['secure']:
- secure = True
-########## incomplete here
+ # Generate a unique MsgID
+ MsgID = ""
+ while True:
+ MsgID_s = "%s%s%s%s" % (id(d), ObjID, datetime.utcnow(),
+ os.urandom(4))
+ MsgID = shafunc(MsgID_s).hexdigest()
+ if not self.out_MethodCalls.has_key(MsgID):
+ break
+
+ msg = [BASES_METHOD_CALL, MsgID, ObjectID, MethodName, args, kwargs]
+ t.queueMsg(msg)
+ # Book-keeping
+ self.out_MethodCalls[MsgID] = (d, datetime.utcnow())
+ if not self.out_Transports.has_key(id(t)):
+ self.out_Transports[id(t)] = {"trans":t, "msgs":[]}
+ self.out_Transports[id(t)]['msgs'].append(MsgID)
+ return d
-
+
+ def _handleOutMethodResponse(self, dmesg, trans):
+ """
+ I handle the reponse for an outgoing MethodCall from this
+ bases-server. I lookup the deferred associated with the
+ message, and fire it with the provided response.
- def cleanupTransReferences(self, trans, server=False):
- """
- I should be called when a transport is dying to clear out its
- references and do cleanup job.
- """
- if server is True:
- # Should clear up all in-coming related book-keeping
- pass
- else:
- # Should clear up all out-going related book-keeping
- pass
-
- #TBD: cleanup references of the transport from the in_MethodCalls
- #TBD: cleanup references of the transport from the in_Transports
- pass
+ @param dmesg: The message list from the transport
-
+ @param trans: The bases-transport that got this message
+ """
+ mref = None
+ try:
+ mref = self.out_MethodCalls[dmesg[BASES_MSG_MSGID]]
+ except:
+ raise BasesBrokerError \
+ ("Unexpected response to MsgId=%d from %s" %
+ (dmesg[BASES_MSG_MSGID], trans.remoteAddr))
+
+ try:
+ mref[0].callback(dmesg[BASES_MSG_RESPONSE])
+ finally:
+ del(self.out_MethodCalls[dmesg[BASES_MSG_MSGID]])
+ self.out_Transports[id(trans)]['msgs'].remove(dmesg[BASES_MSG_MSGID])
+
+
+ def _handleOutMethodErrResponse(self, dmesg, trans):
+ """
+ I handle the error reponse for an outgoing MethodCall from
+ this bases-server. I lookup the deferred associated with the
+ message, and fire it with the provided response.
+
+ @param dmesg: The message list from the transport
+
+ @param trans: The bases-transport that got this message
+ """
+ mref = None
+ try:
+ mref = self.out_MethodCalls[dmesg[BASES_MSG_MSGID]]
+ except:
+ raise BasesBrokerError \
+ ("Unexpected response to MsgId=%d from %s" %
+ (dmesg[BASES_MSG_MSGID], trans.remoteAddr))
+
+ try:
+ mref[0].errback(dmesg[BASES_MSG_RESPONSE])
+ finally:
+ del(self.out_MethodCalls[dmesg[BASES_MSG_MSGID]])
+ self.out_Transports[id(trans)]['msgs'].remove(dmesg[BASES_MSG_MSGID])
+
+
+ def getTransport(self, LocationTuple):
+ """Get a transport object for the given LocationTuple
+
+ @param LocationTuple: A tuple in the following format
+ (protocol, host/ip, port)
+
+ @rtype: A BasesTransport object that can talk to the given
+ location. If there is an existing connection, then that object
+ is returned.
+ """
+ for tid in self.out_Transports:
+ if self.out_Transports[tid]['trans'].canHandle(LocationTuple):
+ return self.out_Transports[tid]['trans']
+
+ secure = False
+ tmap = transports.basesTransportsMap
+ if LocationTuple[0] in tmap['secure']:
+ secure = True
+########## incomplete here
+
+
+ def cleanupTransReferences(self, trans, server=False):
+ """
+ I should be called when a transport is dying to clear out its
+ references and do cleanup job.
+ """
+ if server is True:
+ # Should clear up all in-coming related book-keeping
+ pass
+ else:
+ # Should clear up all out-going related book-keeping
+ pass
+
+ #TBD: cleanup references of the transport from the in_MethodCalls
+ #TBD: cleanup references of the transport from the in_Transports
+ pass
+
+
+ def getSnameOfMsg(self, MsgID):
+ trans = self.in_MethodCalls[MsgID][1]
+ return trans.factory.basesService
+
+
class BasesAsyncPoser:
- pass
+ pass
Modified: trunk/bases/core/component.py
===================================================================
--- trunk/bases/core/component.py 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/bases/core/component.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -1,151 +1,107 @@
+import elementtree.ElementTree as ET
+from bases.errors import *
+
BASES_STATELESS_COMPONENT = 1
BASES_STATEFUL_COMPONENT = 2
BASES_STATEFULLONGLIVED_COMPONENT = 3
-class BasesComponentError(Exception):
- pass
-
class BasesComponent:
- def __init__(self):
- self.componentInit()
+ def __init__(self):
+ self.componentInit()
- def componentInit(self):
- raise BasesComponentError("componentInit: Not implemented")
+ def componentInit(self):
+ raise BasesComponentError("componentInit: Not implemented")
- def __del__(self):
- self.componentDel()
+ def __del__(self):
+ self.componentDel()
- def componentDel(self):
- raise BasesComponentError("componentDel: Not implemented")
+ def componentDel(self):
+ raise BasesComponentError("componentDel: Not implemented")
class BasesComponent_Stateless(BasesComponent):
- def __init__(self):
- self.ComponentType = BASES_STATELESS_COMPONENT
- BasesComponent.__init__(self)
+ def __init__(self):
+ self.ComponentType = BASES_STATELESS_COMPONENT
+ BasesComponent.__init__(self)
- def __del__(self):
- BasesComponent.__del__(self)
+ def __del__(self):
+ BasesComponent.__del__(self)
class BasesComponent_Stateful(BasesComponent):
- def __init__(self):
- self.ComponentType = BASES_STATEFUL_COMPONENT
- BasesComponent.__init__(self)
+ def __init__(self):
+ self.ComponentType = BASES_STATEFUL_COMPONENT
+ BasesComponent.__init__(self)
- def __del__(self):
- BasesComponent.__del__(self)
+ def __del__(self):
+ BasesComponent.__del__(self)
class BasesComponent_StatefulLongLived(BasesComponent):
- def __init__(self):
- self.ComponentType = BASES_STATEFULLONGLIVED_COMPONENT
- BasesComponent.__init__(self)
+ def __init__(self):
+ self.ComponentType = BASES_STATEFULLONGLIVED_COMPONENT
+ BasesComponent.__init__(self)
- def __del__(self):
- BasesComponent.__del__(self)
+ def __del__(self):
+ BasesComponent.__del__(self)
-
-class BasesComponentServerProxy:
- """A class to create proxies to remote BasesComponents"""
-
- def __init__(self, Interface, local, instance, transport=None):
- """
- @param Interface: Interface name in the namespace
- @param local: Boolean flag that says whether the proxied
- object is available locally or remote.
- @param instance: When local=True, this points to the instance
- of the component. Else, this should be the object-id obtained
- from the remote bases-server.
+class BasesComponentInterface:
+
+ def __init__ (self, file=None, interface=None):
+ tree = None
+ if file is not None:
+ tree = ET.ElementTree(file=file)
+ elif interface is not None:
+ tree = ET.fromstring(interface)
+ else:
+ raise BasesInterfaceError("interface-description required")
+
+ self.ComponentName = tree.findtext('ComponentName')
+ if self.ComponentName is None:
+ raise BasesInterfaceError("Bad interface: No 'ComponentName'")
+ self.ModuleName = tree.findtext('ModuleName')
+ if self.ModuleName is None:
+ raise BasesInterfaceError("Bad interface: No 'ModuleName'")
+ self.KlassName = tree.findtext('KlassName')
+ if self.KlassName is None:
+ raise BasesInterfaceError("Bad interface: No 'KlassName'")
- @param transport: When local=False, this points to a
- BasesTransport object which can be used to connect to the
- remote bases-server. The provided transport is assumed to be
- configured with the location where the bases-server exists.
+ self.Methods = {}
+ miter = tree.getiterator("Method")
+ for m in miter:
+ piter = m.getiterator("param")
+ plist = []
+ for p in piter:
+ pdict={}
+ pdict['name'] = p.attrib.get('name')
+ if pdict['name'] is None:
+ raise BasesInterfaceError("%s:%s param needs 'name'" % \
+ (self.ComponentName,
+ m.attrib.get('name')))
+ pdict['type'] = p.attrib.get('type')
+ if pdict['name'] is None:
+ raise BasesInterfaceError("%s:%s:%s param needs 'type'" % \
+ (self.ComponentName,
+ m.attrib.get('name'),
+ pdict['name']))
+ plist.append(pdict)
+ self.Methods[m.attrib.get('name')] = plist
- """
- self._Interface = Interface
- self._LocalObject = local
- self._Object = instance
- self._Transport = transport
- if local == False and transport == None:
- raise "Remote components need a Transport"
+ def validateMethodInterface(self, MethodName, args, kwargs):
+ """
+ I validate a Method's parameters with the stored interface.
+ """
+ try:
+ plist = self.Methods[MethodName]
+ except KeyError:
+ raise BasesInterfaceError("Method=%s not in %s" %
+ (MethodName, self.ComponentName))
+ return
-
- def __getattr__(self, MethodName):
- """I simply route all component-method invocations to the
- MethodCallHandler."""
-
- self._CalledMethod = MethodName
- return self.__MethodCallHandler
-
- def __MethodCallHandler(self, *args, **kwargs):
- # Validate the called method and its parameters against the
- # published interface.
- bValidInterface = bases_ValidateInterface(self._Interface,
- self._CalledMethod,
- args, kwargs)
- if bValidInterface is False:
- raise ""
- if self._LocalObject is True:
- m = getattr (self._Object, self._CalledMethod)
- r = m(*args, **kwargs)
- else:
- r = self.__DoRemoteMethod(self._CalledMethod, args, kwargs)
- return r
-
-
- def __DoRemoteMethod(self, *args, **kwargs):
- """I convert the method-call to a list and give it to the
- transport for the remote call. I block the current thread
- until I get a response."""
-
- #TBD:
-
- pass
-
-
-class BasesComponentClientProxy:
- def __init__(self):
-
- pass
-
-
-class BasesComponentFactory:
- """I create component proxy objects and configure them
- properly. Any application that wants to use a component should use
- me to get an instance to the desired component."""
-
- def __init__(self, ComponentName):
- """
- I create a component-proxy instance for the requested component.
-
- @param ComponentName: The full namespace of a component.
- """
-
-
- def _GetLocation(self, ComponentName):
- """I search the ComponentDirectory to find the location of
- this component.
-
- @param ComponentName: The full namespace of a component"""
- pass
-
-
- def _GetInterfaceFromRemote(self, ComponentName, Location):
- """I connect to the remote bases-server given in 'location'
- and fetch the Interface for the given ComponentName
-
- @param ComponentName: The full namespace of a component
-
- @param Location: The location of a given component. Given as
- an Bases-URL"""
- pass
-
Modified: trunk/bases/core/proxy.py
===================================================================
--- trunk/bases/core/proxy.py 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/bases/core/proxy.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -1,42 +1,155 @@
+"""
+"""
+import threading, sys
+from twisted.internet import reactor, threads
+from bases.errors import *
-class BasesComponentProxyError(Exception):
- pass
+def callInMainThread(func, *args, **kwargs):
+ """
+ """
+ e = threading.Event()
+ l = []
+ def wrapped_func():
+ try:
+ d = func(*args, **kwargs)
+ d.addCallback(deferCallback).addErrBack(deferErrBack)
+ except:
+ l.append(sys.exc_info())
+ l.append(1)
+ e.set()
+ def deferCallBack(result):
+ l.append(result)
+ l.append(0)
+ e.set()
+ def deferErrBack(reason):
+ l.append(reason)
+ l.append(2)
+ e.set()
+
+ reactor.callFromThread(wrapped_func)
+ e.wait()
+ result, status = l
+ if status == 0:
+ return result
+ if status == 1:
+ # Whee! Cross-thread exceptions!
+ raise result[0], result[1], result[2]
+ elif status == 2:
+ raise BasesComponentError(str(result))
+ else:
+ raise BasesError("Unknown status=%d" % (status))
+
+class BasesComponentServerProxy:
+ """
+ A class to create proxies to remote BasesComponents
+ """
+
+ def __init__(self, Interface, instance, location=None, broker=None):
+ """
+ @param Interface: An instance of BasesComponentInterface
+ configured with a valid interface-description. This interface
+ is used to validate outgoing calls and the return values.
-class BasesComponentProxy:
- """
- I behave like a client that is calling a component object's
- methods.
- """
-
- def __init__(self):
- pass
+ @param instance: When 'location' is None, then the
+ ComponentObject is available locally, and this holds its
+ instance.
+ When the ComponentObject is a remote-object, 'location' is not
+ None. And, this holds the Obj-ID of the remote
+ ComponentObject.
- def callMethod(self, MethodName, args, kwargs):
- """
- I invoke a method of name 'MethodName' with the given 'args'
- and 'kwargs' for the configured component. Before calling the
- method I do a validation of the parameters with the published
- interface. Once the parameters are validated, I start the
- appropriate method in a thread, and return a deferred back.
+ @param location: When the proxy is for a locally availble
+ ComponentObject, this is None. When the proxy is for a remote
+ ComponentObject, then this should hold the location tuple of
+ the remote component.
- On completion of the method (in the thread), I call the
- callbacks on the deferred.
+ @param broker: When location is not None, then this is the
+ broker that should be used to communicate with the remote
+ bases-server holding the ComponentObject.
+ """
+ self._Interface = Interface
+ self._Object = instance
+ self._Location = location
+ self._Broker = broker
+ if location is not None and broker is None:
+ raise BasesServerProxyError("Remote components need a broker")
- @param MethodName: A string containing the name of the method
- to be invoked.
+
+ def __getattr__(self, MethodName):
+ """
+ I simply route all component-method invocations to the
+ MethodCallHandler.
+ """
+ self._CalledMethod = MethodName
+ return self.__MethodCallHandler
- @param args: A list of arguments that should be passed
- @param kwargs: A dictionary of keyword arguments that should
- be passed to the method.
+ def __MethodCallHandler(self, *args, **kwargs):
+ # This is a blocking call. This blocks on the main-thread
+ # until the remote-call returns with a reply.
+ self._Interface.validateMethodInterface(self._CalledMethod,
+ args, kwargs)
+ if self._Location is None:
+ m = getattr (self._Object, self._CalledMethod)
+ r = m(*args, **kwargs)
+ else:
+ r = callInMainThread(self._Broker.callRemote, self._Location,
+ self.Object, self._CalledMethod, args, kwargs)
+ return r
- @return deferred: A deferred that will notify the completion
- of the method-call with the returned object.
- """
- pass
-
-
-
+
+class BasesComponentClientProxy:
+ """
+ A proxy to emulate a normal caller. The broker delivers the the
+ method call to me. I validate the call, and call the appropriate
+ method in a thread. When the call returns, I fire a callback so
+ that the result can be sent to whoever called us.
+
+ I am capable of proxying another peer. i.e, act as a proxy to
+ another peer.
+ """
+
+ def __init__(self, interface, instance, location=None, broker=None):
+ """
+ @param interface: An instance of BasesComponentInterface
+ configured with a valid interface-description. This interface
+ is used to validate the method invocations.
+
+ @param instance: When we are recieving messages for a locally
+ available ComponentObject, this points to the instance of the
+ ComponentObject.
+
+ When we are acting as a proxy to a non-local ComponentObject,
+ this is the Obj-ID of the ComponentObject that we are
+ proxying.
+
+ @param location: This is none when proxying a local
+ component. This contains the location-tuple when we are
+ proxying a ComponentObject that exists on a known peer.
+
+ @param broker: When location is not None, then this is the
+ broker that should be used to communicate with the remote
+ bases-server holding the ComponentObject we are proxying for.
+ """
+ if location is not None and broker is None:
+ raise BasesClientProxyError("Remote components need a broker")
+ self._Interface = interface
+ self._Object = instance
+ self._Object.bases_Proxy = self
+ self._Location = location
+ self._Broker = broker
+
+
+ def callMethod(self, MsgID, MethodName, args, kwargs):
+ self._Interface.validateMethodInterface(MethodName, args, kwargs)
+ d = None
+ if self._Location is None:
+ m = getattr (self._Object, MethodName)
+ d = threads.deferToThread (m, *args, **kwargs)
+ else:
+ d = self._Broker.callRemote (self._Location, self._Object,
+ MethodName, args, kwargs)
+ return d
+
Modified: trunk/bases/core/transports/bbtrans.py
===================================================================
--- trunk/bases/core/transports/bbtrans.py 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/bases/core/transports/bbtrans.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -5,8 +5,9 @@
import struct, traceback
from bases.core.transports import transports
+from bases.errors import *
-class BBTransError(Exception):
+class BBTransError(BasesTransportError):
pass
# A limit on the maximum length of a bbtrans message.
@@ -27,10 +28,10 @@
BBTRANS_VER_MINOR = 1
# Init advertisement data
BBTRANS_INIT = {"ver_major":BBTRANS_VER_MAJOR,
- "ver_minor":BBTRANS_VER_MINOR,
- "limit":BBTRANS_MAX_MSG_SIZE,
- "keepalive":True,
- "async":True}
+ "ver_minor":BBTRANS_VER_MINOR,
+ "limit":BBTRANS_MAX_MSG_SIZE,
+ "keepalive":True,
+ "async":True}
# How many consecutive bad messages do we tolerate
BBTRANS_ERROR_TOLERANCE=10
@@ -56,294 +57,294 @@
class _BBTransProtocol(transports._BasesTransport):
- """
- The bases binary transport (bbtrans) protocol.
- """
-
- def __init__(self):
- self._InMsgQueue = []
- self._OutMsgQueue = []
- self._ErrTolerance = 0
- self._RemoteSizeLimit = 0
- self._KeepConnectionAlive = False
- self.Debug = False
- self._Msg_TransitState = STATE_GET_HOLA
- self._Buffer = None
- self._ServerSide = True
- transports._BasesTransport.__init__(self)
-
+ """
+ The bases binary transport (bbtrans) protocol.
+ """
+
+ def __init__(self):
+ self._InMsgQueue = []
+ self._OutMsgQueue = []
+ self._ErrTolerance = 0
+ self._RemoteSizeLimit = 0
+ self._KeepConnectionAlive = False
+ self.Debug = False
+ self._Msg_TransitState = STATE_GET_HOLA
+ self._Buffer = None
+ self._ServerSide = True
+ transports._BasesTransport.__init__(self)
+
- def connectionMade(self):
- if self._ServerSide is True:
- self.sendInit()
+ def connectionMade(self):
+ if self._ServerSide is True:
+ self.sendInit()
- def dataReceived(self,data):
- """I recieve data and break them into messages. The messages
- are then delivered to the broker for processing.
+ def dataReceived(self,data):
+ """I recieve data and break them into messages. The messages
+ are then delivered to the broker for processing.
- @param data: a stream of bytes straight from the network"""
+ @param data: a stream of bytes straight from the network"""
- data_len = len(data)
- bytes_left = len(data)
- pos = 0
- try:
- # Parse the stream and break it into messages
- while bytes_left != 0:
- if self._Msg_TransitState == STATE_GET_HOLA:
- log.msg(3, "_Msg_TransitState == STATE_GET_HOLA")
- pos = self._handleHola(data,pos)
- elif self._Msg_TransitState == STATE_GET_MSG:
- log.msg(3, "_Msg_TransitState == STATE_GET_MSG")
- pos = self._handleMsg(data,pos)
- elif self._Msg_TransitState == STATE_GET_INIT:
- log.msg(3, "_Msg_TransitState == STATE_GET_INIT")
- pos = self._handleInit(data,pos)
- elif self._Msg_TransitState == STATE_GET_INITRESPONSE:
- log.msg(3, "_Msg_TransitState == STATE_GET_INITRESPONSE")
- pos = self._handleInitR(data,pos)
- else:
- log.msg(1, "ERROR: Undefined state = %d" % \
- (self._Msg_TransitState))
- bytes_left = data_len - pos
- log.msg(3,"processed %d bytes, left %d" % (pos, bytes_left))
-
-
- # Process the messages that we have recieved.
- while len(self._InMsgQueue) != 0:
- emsg = self._InMsgQueue.pop(0)
- try:
- dmsg = rencode.loads(emsg)
- self.factory.basesBroker.processInMessage(dmesg, self)
- self._ErrTolerance = 0
- except Exception, e:
- log.msg(3, "Exception while processing _InMsgQueue")
- log.msg(3, str(e))
- if self.Debug is True:
- s = traceback.format_exc()
- log.msg(3, s)
- self._ErrTolerance += 1
-
- if self._ErrTolerance > BBTRANS_ERROR_TOLERANCE:
- log.msg(1, "ERROR: _ErrTolerance exceeded, closing")
- self.transport.loseConnection()
-
- except Exception, e:
- log.msg(3, "Exception in DataRecieved")
- log.msg(3, str(e))
- if self.Debug is True:
- s = traceback.format_exc()
- log.msg(3, s)
- self.transport.loseConnection()
+ data_len = len(data)
+ bytes_left = len(data)
+ pos = 0
+ try:
+ # Parse the stream and break it into messages
+ while bytes_left != 0:
+ if self._Msg_TransitState == STATE_GET_HOLA:
+ log.msg(3, "_Msg_TransitState == STATE_GET_HOLA")
+ pos = self._handleHola(data,pos)
+ elif self._Msg_TransitState == STATE_GET_MSG:
+ log.msg(3, "_Msg_TransitState == STATE_GET_MSG")
+ pos = self._handleMsg(data,pos)
+ elif self._Msg_TransitState == STATE_GET_INIT:
+ log.msg(3, "_Msg_TransitState == STATE_GET_INIT")
+ pos = self._handleInit(data,pos)
+ elif self._Msg_TransitState == STATE_GET_INITRESPONSE:
+ log.msg(3, "_Msg_TransitState == STATE_GET_INITRESPONSE")
+ pos = self._handleInitR(data,pos)
+ else:
+ log.msg(1, "ERROR: Undefined state = %d" % \
+ (self._Msg_TransitState))
+ bytes_left = data_len - pos
+ log.msg(3,"processed %d bytes, left %d" % (pos, bytes_left))
+
+
+ # Process the messages that we have recieved.
+ while len(self._InMsgQueue) != 0:
+ emsg = self._InMsgQueue.pop(0)
+ try:
+ dmsg = rencode.loads(emsg)
+ self.factory.basesBroker.processInMessage(dmsg, self)
+ self._ErrTolerance = 0
+ except Exception, e:
+ log.msg(3, "Exception while processing _InMsgQueue")
+ log.msg(3, str(e))
+ if self.Debug is True:
+ s = traceback.format_exc()
+ log.msg(3, s)
+ self._ErrTolerance += 1
+
+ if self._ErrTolerance > BBTRANS_ERROR_TOLERANCE:
+ log.msg(1, "ERROR: _ErrTolerance exceeded, closing")
+ self.transport.loseConnection()
+
+ except Exception, e:
+ log.msg(3, "Exception in DataRecieved")
+ log.msg(3, str(e))
+ if self.Debug is True:
+ s = traceback.format_exc()
+ log.msg(3, s)
+ self.transport.loseConnection()
- def _handleHola(self,data,pos):
- done, npos, emsg = self._readData(data,pos,BBTRANS_HOLA_LEN)
- if done is True:
- l, t = self._checkHolaSig(emsg)
- self._Msg_Len = l
- if t == BBTRANS_HOLA_TYPE_MSG:
- self._Msg_TransitState = STATE_GET_MSG
- elif t == BBTRANS_HOLA_TYPE_INIT:
- self._Msg_TransitState = STATE_GET_INIT
- elif t == BBTRANS_HOLA_TYPE_INITRESPONSE:
- self._Msg_TransitState = STATE_GET_INITRESPONSE
- return npos
+ def _handleHola(self,data,pos):
+ done, npos, emsg = self._readData(data,pos,BBTRANS_HOLA_LEN)
+ if done is True:
+ l, t = self._checkHolaSig(emsg)
+ self._Msg_Len = l
+ if t == BBTRANS_HOLA_TYPE_MSG:
+ self._Msg_TransitState = STATE_GET_MSG
+ elif t == BBTRANS_HOLA_TYPE_INIT:
+ self._Msg_TransitState = STATE_GET_INIT
+ elif t == BBTRANS_HOLA_TYPE_INITRESPONSE:
+ self._Msg_TransitState = STATE_GET_INITRESPONSE
+ return npos
- def _checkHolaSig(self,data):
- """I check the given data stream for a HOLA message and return
- the encoded length of the message
+ def _checkHolaSig(self,data):
+ """I check the given data stream for a HOLA message and return
+ the encoded length of the message
- @param data: a packed string of length BBTRANS_HOLA_LEN
+ @param data: a packed string of length BBTRANS_HOLA_LEN
- @return tuple(int, int): A tuple of (length, type). (1) length
- of the incoming message and (2) type of the incoming message"""
+ @return tuple(int, int): A tuple of (length, type). (1) length
+ of the incoming message and (2) type of the incoming message"""
- log.msg(3, "Checking for hOla")
- try:
- hola,length,msgtype = struct.unpack(BBTRANS_HOLA_FMT,data)
- except:
- raise BBTransError("HOLA signature format error")
- # Check for hola signature
- if hola != "h0La":
- raise BBTransError \
- ("HOLA signature error. Expected:%s, Got:%s" % \
- ("h0La", hola))
- # Check the hola-msg-type
- if msgtype not in (BBTRANS_HOLA_TYPE_MSG, BBTRANS_HOLA_TYPE_INIT,
- BBTRANS_HOLA_TYPE_INITRESPONSE):
- raise BBTransError \
- ("HOLA msgtype error. Got unknown type:%d" % \
- (msgtype))
- # Check the hola-msg-len
- if msgtype==BBTRANS_HOLA_TYPE_MSG and length > BBTRANS_MAX_MSG_SIZE:
- raise BBTransError \
- ("HOLA type=%d size=%d exceeds MAX=%d" % \
- (msgtype, length, BBTRANS_MAX_MSG_SIZE))
- elif (msgtype in (BBTRANS_HOLA_TYPE_INIT, BBTRANS_HOLA_TYPE_INIT)) \
- and length > BBTRANS_MAX_INIT_SIZE:
- raise BBTransError \
- ("HOLA type=%d size=%d exceeds MAX=%d" % \
- (msgtype, length, BBTRANS_MAX_INIT_SIZE))
+ log.msg(3, "Checking for hOla")
+ try:
+ hola,length,msgtype = struct.unpack(BBTRANS_HOLA_FMT,data)
+ except:
+ raise BBTransError("HOLA signature format error")
+ # Check for hola signature
+ if hola != "h0La":
+ raise BBTransError \
+ ("HOLA signature error. Expected:%s, Got:%s" % \
+ ("h0La", hola))
+ # Check the hola-msg-type
+ if msgtype not in (BBTRANS_HOLA_TYPE_MSG, BBTRANS_HOLA_TYPE_INIT,
+ BBTRANS_HOLA_TYPE_INITRESPONSE):
+ raise BBTransError \
+ ("HOLA msgtype error. Got unknown type:%d" % \
+ (msgtype))
+ # Check the hola-msg-len
+ if msgtype==BBTRANS_HOLA_TYPE_MSG and length > BBTRANS_MAX_MSG_SIZE:
+ raise BBTransError \
+ ("HOLA type=%d size=%d exceeds MAX=%d" % \
+ (msgtype, length, BBTRANS_MAX_MSG_SIZE))
+ elif (msgtype in (BBTRANS_HOLA_TYPE_INIT, BBTRANS_HOLA_TYPE_INIT)) \
+ and length > BBTRANS_MAX_INIT_SIZE:
+ raise BBTransError \
+ ("HOLA type=%d size=%d exceeds MAX=%d" % \
+ (msgtype, length, BBTRANS_MAX_INIT_SIZE))
- return (length, msgtype)
+ return (length, msgtype)
-
- def _handleMsg(self,data,pos):
- done, npos, emsg = self._readData(data,pos,self._Msg_Len)
- if done is True:
- self._InMsgQueue.append(emsg)
- self._Msg_TransitState = STATE_GET_HOLA
- return npos
+
+ def _handleMsg(self,data,pos):
+ done, npos, emsg = self._readData(data,pos,self._Msg_Len)
+ if done is True:
+ self._InMsgQueue.append(emsg)
+ self._Msg_TransitState = STATE_GET_HOLA
+ return npos
- def _handleInit(self,data,pos):
- done, npos, emsg = self._readData(data,pos,self._Msg_Len)
- if done is True:
- self.recievedInit(emsg,respond=True)
- self._Msg_TransitState = STATE_GET_HOLA
- return npos
+ def _handleInit(self,data,pos):
+ done, npos, emsg = self._readData(data,pos,self._Msg_Len)
+ if done is True:
+ self.recievedInit(emsg,respond=True)
+ self._Msg_TransitState = STATE_GET_HOLA
+ return npos
- def _handleInitR(self,data,pos):
- done, npos, emsg = self._readData(data,pos,self._Msg_Len)
- if done is True:
- self.recievedInit(emsg)
- self._Msg_TransitState = STATE_GET_HOLA
- return npos
+ def _handleInitR(self,data,pos):
+ done, npos, emsg = self._readData(data,pos,self._Msg_Len)
+ if done is True:
+ self.recievedInit(emsg)
+ self._Msg_TransitState = STATE_GET_HOLA
+ return npos
- def _readData(self,data,pos,maxLen=0):
- """I read bytes from the given 'data' stream from position=pos
- bytes until I get a byte stream of length maxLen. If the given
- data-stream was not enough, I return a tuple saying so. I
- should be called with more chunks of data till get the
- required stream length.
+ def _readData(self,data,pos,maxLen=0):
+ """I read bytes from the given 'data' stream from position=pos
+ bytes until I get a byte stream of length maxLen. If the given
+ data-stream was not enough, I return a tuple saying so. I
+ should be called with more chunks of data till get the
+ required stream length.
- When the given data-stream did not have enough bytes, I return
- a (False, new-position, None).
+ When the given data-stream did not have enough bytes, I return
+ a (False, new-position, None).
- Once I get all the required bytes, I return a (True,
- new-position, byte-stream)
+ Once I get all the required bytes, I return a (True,
+ new-position, byte-stream)
- @param data: data stream to read data from
+ @param data: data stream to read data from
- @param pos: position from which data should be read
+ @param pos: position from which data should be read
- @param maxLen: The maximum length of the bytestream that I
- should gather
+ @param maxLen: The maximum length of the bytestream that I
+ should gather
- @return tuple (bool, int, str): I return a tuple containing
- the following: (1) boolean saying whether I have collected the
- byte-stream completely or whether I would need more data. (2)
- int saying the new-position from where the next read should be
- done. (3) byte-stream containing the requested bytestream
- collection after I have finished reading the entire length."""
-
- left = len(data) - pos
- assert left > 0
-
- if self._Buffer is None:
- self._Buffer = []
- assert maxLen != 0
- self._bufferRequiredLen = maxLen
-
- if self._bufferRequiredLen > left:
- npos = pos + left
- self._Buffer.append(data[pos:npos])
- self._bufferRequiredLen -= left
- return (False, npos, None)
- else:
- npos = pos + self._bufferRequiredLen
- self._Buffer.append(data[pos:npos])
- b = ''.join(self._Buffer)
- self._Buffer = None
- self._bufferRequiredLen = 0
- return (True, npos, b)
+ @return tuple (bool, int, str): I return a tuple containing
+ the following: (1) boolean saying whether I have collected the
+ byte-stream completely or whether I would need more data. (2)
+ int saying the new-position from where the next read should be
+ done. (3) byte-stream containing the requested bytestream
+ collection after I have finished reading the entire length."""
+
+ left = len(data) - pos
+ assert left > 0
+
+ if self._Buffer is None:
+ self._Buffer = []
+ assert maxLen != 0
+ self._bufferRequiredLen = maxLen
+
+ if self._bufferRequiredLen > left:
+ npos = pos + left
+ self._Buffer.append(data[pos:npos])
+ self._bufferRequiredLen -= left
+ return (False, npos, None)
+ else:
+ npos = pos + self._bufferRequiredLen
+ self._Buffer.append(data[pos:npos])
+ b = ''.join(self._Buffer)
+ self._Buffer = None
+ self._bufferRequiredLen = 0
+ return (True, npos, b)
- def queueMsg(self, m):
- """
- Serialize the message and queue it for sending to the other
- side
+ def queueMsg(self, m):
+ """
+ Serialize the message and queue it for sending to the other
+ side
- TBD: I should check whether the remote's accepted message
- size.
- """
- emsg = rencode.dumps(m)
- l = len(emsg)
- if l > BBTRANS_MAX_MSG_SIZE:
- raise BBTransError\
- ("Object too long=%d to transmit" % (l))
- hola = struct.pack(BBTRANS_HOLA_FMT,"h0La", l, BBTRANS_HOLA_TYPE_MSG)
- self._OutMsgQueue.append((hola,emsg))
- self.sendMsgs()
-
+ TBD: I should check whether the remote's accepted message
+ size.
+ """
+ emsg = rencode.dumps(m)
+ l = len(emsg)
+ if l > BBTRANS_MAX_MSG_SIZE:
+ raise BBTransError\
+ ("Object too long=%d to transmit" % (l))
+ hola = struct.pack(BBTRANS_HOLA_FMT,"h0La", l, BBTRANS_HOLA_TYPE_MSG)
+ self._OutMsgQueue.append((hola,emsg))
+ self.sendMsgs()
+
- def sendInit(self):
- emsg = rencode.dumps(BBTRANS_INIT)
- l = len(emsg)
- if l > BBTRANS_MAX_INIT_SIZE:
- raise BBTransError\
- ("Object too long=%d to transmit" % (l))
- hola = struct.pack(BBTRANS_HOLA_FMT,"h0La", l, BBTRANS_HOLA_TYPE_INIT)
- self._OutMsgQueue.append((hola,emsg))
- self.sendMsgs()
-
-
- def recievedInit(self,einit,respond=False):
- """I am called whenever we recieve a init-sequence from the
- other end of the line. I see whether we are ok with it. When
- respond is True, I send back a init-response."""
+ def sendInit(self):
+ emsg = rencode.dumps(BBTRANS_INIT)
+ l = len(emsg)
+ if l > BBTRANS_MAX_INIT_SIZE:
+ raise BBTransError\
+ ("Object too long=%d to transmit" % (l))
+ hola = struct.pack(BBTRANS_HOLA_FMT,"h0La", l, BBTRANS_HOLA_TYPE_INIT)
+ self._OutMsgQueue.append((hola,emsg))
+ self.sendMsgs()
+
+
+ def recievedInit(self,einit,respond=False):
+ """I am called whenever we recieve a init-sequence from the
+ other end of the line. I see whether we are ok with it. When
+ respond is True, I send back a init-response."""
- init = rencode.loads(einit)
- # checking the protocol version
- if (init['ver_major'], init['ver_minor']) != \
- (BBTRANS_VER_MAJOR, BBTRANS_VER_MINOR):
- raise BBTransError \
- ("BBTrans version mismatch. Expected:%d.%d, Got:%d.%d" % \
- (BBTRANS_VER_MAJOR, BBTRANS_VER_MINOR, \
- init['ver_major'], init['ver_minor']))
- if init['limit'] < BBTRANS_MAX_INIT_SIZE:
- raise BBTransError \
- ("Remote wont take even a init")
- self._RemoteSizeLimit = init['limit']
- self._KeepConnectionAlive = init['keepalive']
+ init = rencode.loads(einit)
+ # checking the protocol version
+ if (init['ver_major'], init['ver_minor']) != \
+ (BBTRANS_VER_MAJOR, BBTRANS_VER_MINOR):
+ raise BBTransError \
+ ("BBTrans version mismatch. Expected:%d.%d, Got:%d.%d" % \
+ (BBTRANS_VER_MAJOR, BBTRANS_VER_MINOR, \
+ init['ver_major'], init['ver_minor']))
+ if init['limit'] < BBTRANS_MAX_INIT_SIZE:
+ raise BBTransError \
+ ("Remote wont take even a init")
+ self._RemoteSizeLimit = init['limit']
+ self._KeepConnectionAlive = init['keepalive']
- if respond is True:
- emsg = rencode.dumps(BBTRANS_INIT)
- l = len(emsg)
- if l > BBTRANS_MAX_INIT_SIZE:
- raise BBTransError\
- ("Object too long=%d to transmit" % (l))
- hola = struct.pack(BBTRANS_HOLA_FMT,"h0La", l,
- BBTRANS_HOLA_TYPE_INITRESPONSE)
- self._OutMsgQueue.append((hola,emsg))
- self.sendMsgs()
+ if respond is True:
+ emsg = rencode.dumps(BBTRANS_INIT)
+ l = len(emsg)
+ if l > BBTRANS_MAX_INIT_SIZE:
+ raise BBTransError\
+ ("Object too long=%d to transmit" % (l))
+ hola = struct.pack(BBTRANS_HOLA_FMT,"h0La", l,
+ BBTRANS_HOLA_TYPE_INITRESPONSE)
+ self._OutMsgQueue.append((hola,emsg))
+ self.sendMsgs()
- def sendMsgs(self):
- """Send the serialized messages in the queue to the other side"""
+ def sendMsgs(self):
+ """Send the serialized messages in the queue to the other side"""
- while len(self._OutMsgQueue) != 0:
- h, m = self._OutMsgQueue.pop(0)
- self.transport.write(''.join((h,m)))
+ while len(self._OutMsgQueue) != 0:
+ h, m = self._OutMsgQueue.pop(0)
+ self.transport.write(''.join((h,m)))
class BBTransServerFactory(transports._BasesServerFactory):
- def buildProtocol(self, addr):
- bb = _BBTransProtocol()
- bb.factory = self
- bb.remoteAddr = addr
- bb._ServerSide = True
- bb.Debug = self.Debug
- return bb
-
+ def buildProtocol(self, addr):
+ bb = _BBTransProtocol()
+ bb.factory = self
+ bb.remoteAddr = addr
+ bb._ServerSide = True
+ bb.Debug = True
+ return bb
+
class BBTransClientFactory(transports._BasesClientFactory):
- def buildProtocol(self, addr):
- bb = _BBTransProtocol()
- bb.factory = self
- bb._ServerSide = False
- bb.Debug = self.Debug
- return bb
+ def buildProtocol(self, addr):
+ bb = _BBTransProtocol()
+ bb.factory = self
+ bb._ServerSide = False
+ bb.Debug = self.Debug
+ return bb
Modified: trunk/bases/core/transports/transports.py
===================================================================
--- trunk/bases/core/transports/transports.py 2009-02-26 07:12:16 UTC (rev 8)
+++ trunk/bases/core/transports/transports.py 2009-02-26 07:43:30 UTC (rev 9)
@@ -2,87 +2,86 @@
from twisted.internet import protocol
from twisted.python import log
-class BasesTransportError(Exception):
- pass
+from bases.errors import *
class _BasesServerFactory(protocol.ServerFactory):
- def __init__(self, sname, broker, poser, Debug=False):
- self.basesService = sname
- self.basesBroker = broker
- self.basesPoser = poser
- self.Debug = Debug
+ def __init__(self, sname, broker, poser, Debug=False):
+ self.basesService = sname
+ self.basesBroker = broker
+ ...
[truncated message content] |
|
From: <joe...@us...> - 2009-02-26 07:12:25
|
Revision: 8
http://bases.svn.sourceforge.net/bases/?rev=8&view=rev
Author: joe_steeve
Date: 2009-02-26 07:12:16 +0000 (Thu, 26 Feb 2009)
Log Message:
-----------
More work on the synclient front
Modified Paths:
--------------
trunk/bases/core/broker.py
trunk/bases/synclient/app.py
trunk/bases/synclient/broker.py
trunk/bases/synclient/services.py
trunk/bases/synclient/transports/bbtrans.py
Added Paths:
-----------
trunk/bases/synclient/transports/transports.py
Modified: trunk/bases/core/broker.py
===================================================================
--- trunk/bases/core/broker.py 2009-02-26 07:09:40 UTC (rev 7)
+++ trunk/bases/core/broker.py 2009-02-26 07:12:16 UTC (rev 8)
@@ -195,7 +195,7 @@
@param tuple location: The location tuple
- @param int ObjID: the ID of the component-object on the remote
+ @param str ObjID: the ID of the component-object on the remote
bases-erver.
@param str MethodName: Name of the method on the object=ObjID to
@@ -271,7 +271,7 @@
tmap = transports.basesTransportsMap
if LocationTuple[0] in tmap['secure']:
secure = True
-
+########## incomplete here
Modified: trunk/bases/synclient/app.py
===================================================================
--- trunk/bases/synclient/app.py 2009-02-26 07:09:40 UTC (rev 7)
+++ trunk/bases/synclient/app.py 2009-02-26 07:12:16 UTC (rev 8)
@@ -1,39 +1,34 @@
from bases.synclient import services
+from bases.synclient.broker import BasesBroker
from bases.synclient.component import sBasesComponentServerProxy
-from bases.synclient.broker import sBasesBroker
-def createBasesContext(conf):
- """
- I initialize a 'Bases application context'. The context is need to
- communicate with other bases-servers.
+class BasesClientApp:
+ def __init__(self, conf):
+ slist = conf['bases-servers']
+ try:
+ self.SSLKey = conf['SSLKey']
+ self.SSLCert = conf['SSLCert']
+ self.CA = conf['CA']
+ except:
+ self.SSLKey = None
+ self.SSLCert = None
+ self.CA = None
+ self.Broker = BasesBroker()
+ self.Broker.app = self
+ self.DirClient = services.BasesDirectoryClient(conf['dircache'])
+ self.DirClient.broker = self.Broker
+ self.DirClient.addServers(slist)
+
- @param conf: A configuration dictionary.
- """
- c = {}
- b = sBasesBroker()
- o = services.sBasesObjectRepoClient()
- o.broker = b
- d = services.sBasesDirectoryClient(conf['dircache'])
- d.broker = b
- d.objrepo = o
+ def getBasesComponent(self, ComponentName):
+ i, oid = self.DirClient.getObject(ComponentName)
+ p = BasesComponentServerProxy(i, oid, self.Broker)
+ return p
- c['dir'] = d
- c['objrepo'] = o
- c['broker'] = b
- return c
-
-
-def getBasesComponent(cxt, ComponentName):
- """
- I get a proxy object to a remote bases-component. The proxy can be
- used to invoke all methods advertized in the interface.
- """
- i = cxt['dir'].getInterface(ComponentName)
- l = cxt['dir'].getLocation(ComponentName)
- obj = cxt['objrepo'].getObject(l, ComponentName)
-
- p = sBasesComponentServerProxy(l, i, obj, cxt['broker'])
- return p
+ def __del__(self):
+ self.Broker.stop()
+ self.Broker = None
+ self.DirClient = None
Modified: trunk/bases/synclient/broker.py
===================================================================
--- trunk/bases/synclient/broker.py 2009-02-26 07:09:40 UTC (rev 7)
+++ trunk/bases/synclient/broker.py 2009-02-26 07:12:16 UTC (rev 8)
@@ -1,67 +1,40 @@
-from bases.synclient.transports import bbtrans
-from OpenSSL import SSL
+from datetime import datetime
+import os
+shafunc = None
+try:
+ import hashlib
+ shafunc = hashlib.sha512
+except:
+ import sha
+ shafunc = sha.sha
+from bases.synclient.transports import transports
+
+## Order of values in the message-list
+BASES_MSG_MSGTYPE = 0
+BASES_MSG_MSGID = 1
+BASES_MSG_OBJID = 2
+BASES_MSG_METHODNAME = 3
+BASES_MSG_ARGS = 4
+BASES_MSG_KWARGS = 5
+BASES_MSG_RESPONSE = 2
+
## Values for the MsgType in a message-list
BASES_METHOD_CALL = 1
BASES_METHOD_RESPONSE = 2
-def SSLVerificationCB(conn, cert, errnum, depth, ok):
- #TBD: Server certificate verificaton should be done here.
- return ok
+class BasesBrokerError(Exception):
+ pass
-
-class sBasesTransportFactory:
- """
- I am different from the implementation in bases.core.broker. I
- manage the transports for synchronous clients.
- """
-
- def __init__(self, SSLKey=None, SSLCert=None, CA=None):
- if SSLKey is not None:
- self._SSLKey = SSLKey
- self._SSLCert = SSLCert
- self._CA = CA
- else:
- self._SSLKey = None
- self._SSLCert = None
- self._CA = None
-
- def getTransport(self, location):
- """
- Given a location tuple, I create a transport object. I do a
- SSL connection to the remote location, and connect the
- transport to it.
- """
- #TBD: Check the proto in location to obtain the correct
- #transport. We should do this automagially.
- sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+class BasesBroker:
+ def __init__(self,Debug=False):
+ self.out_transports={}
+ self.Debug = Debug
+
- if self._SSLKey is not None:
- ctx = SSL.Context(SSL.SSLv23_METHOD)
- ctx.set_verify(SSL.VERIFY_PEER, SSLVerificationCB)
- ctx.use_privatekey_file (self._SSLKey)
- ctx.use_certificate_file(self._SSLCert)
- ctx.load_verify_locations(self._CA)
- sock = SSL.Connection(ctx, sock)
-
- sock.connect((location(1), location(2)))
-
- bb = bbtrans.sBasesBinaryTransport()
- bb.conn = sock
- bb.location = location
- bb.connectionMade()
- return bb
-
-
-class sBasesBroker:
- def __init__(self):
- self._transports={}
- self._tfactory = sBasesTransportFactory()
- self.next_msgid = id(self._tfactory)
-
def callRemote(self, location, ObjID, MethodName, *args, **kwargs):
"""
I am called by a BasesComponentServerProxy object. I do a
@@ -69,7 +42,7 @@
@param tuple location: The location tuple
- @param int ObjID: the ID of the component-object on the remote
+ @param str ObjID: the ID of the component-object on the remote
bases-erver.
@param str MethodName: Name of the method on the object=ObjID to
@@ -81,34 +54,51 @@
"""
- if self.Debug is True:
- for m in out_method_calls:
- if m[1] == self.next_msgid:
- raise sBasesBrokerError \
- ("Inconsistent next_msgid-%d" % (next_msgid))
-
- msg = [BASES_METHOD_CALL, self.next_msgid, ObjectID, MethodName,
- args, kwargs]
+ # Generate a unique MsgID
+ MsgID = ""
+ MsgID_s = "%s%s%s%s" % (id(self), ObjID, datetime.utcnow(),
+ os.urandom(4))
+ MsgID = shafunc(MsgID_s).hexdigest()
+
+ # Construct the message-list
+ msg = [BASES_METHOD_CALL, MsgID, ObjectID, MethodName, args, kwargs]
+ # get a transport
t = self.getTransport(location)
+ # send it across
try:
t.sendMsg(call_list)
r = t.recvMsg()
- if r[1] != self.next_msgid:
- raise sBasesBroker ("Other side is giving us junk")
+ if r[BASES_MSG_MSGTYPE] != BASES_METHOD_RESPONSE:
+ raise BasesBrokerError ("Cannot handle method calls")
+ if r[BASES_MSG_MSGID] != MsgID:
+ raise BasesBrokerError ("Response for unexpected msg-id %s" \
+ % (r[BASES_MSG_MSGID]))
except Exception, e:
- l = t.location
t.conn.shutdown()
t.conn.close()
- del self._transports[l]
+ del self.out_transports[location]
raise Exception(e)
- return r
+ return r[BASES_MSG_RESPONSE]
def getTransport(self, location):
- if self._transports.has_key(location):
- return self._transports[location]
+ """
+ I return a transport from the available transports. If I dont
+ have one, then I instantiate the appropriate transport.
+ """
+ if self.out_transports.has_key(location):
+ return self.out_transports[location]
else:
- t = self._tfactory.getTransport(location)
- self._transports[location] = t
+ t = transports.getTransport(location,
+ SSLKey=self.app.SSLKey,
+ SSLCert=self.app.SSLCert,
+ CA=self.app.CA)
+ self.out_transports[location] = t
return t
+ def stop(self):
+ for l in self.out_transports:
+ self.out_transports[l].conn.shutdown()
+ self.out_transports[l].conn.close()
+ self.out_transports[l].conn = None
+ del(self.out_transports[l])
Modified: trunk/bases/synclient/services.py
===================================================================
--- trunk/bases/synclient/services.py 2009-02-26 07:09:40 UTC (rev 7)
+++ trunk/bases/synclient/services.py 2009-02-26 07:12:16 UTC (rev 8)
@@ -1,20 +1,8 @@
-class sBasesObjectRepoClient:
- """
- I talk to a remote bases-server's object-repo to get a reference
- to a remote-object.
- """
+class BasesDirectoryClient:
def __init__(self):
- self.broker = None
pass
- def getObject(self, location, componentname):
- pass
-class sBasesDirectoryClient:
-
- def __init__(self):
- self.broker = None
- pass
Modified: trunk/bases/synclient/transports/bbtrans.py
===================================================================
--- trunk/bases/synclient/transports/bbtrans.py 2009-02-26 07:09:40 UTC (rev 7)
+++ trunk/bases/synclient/transports/bbtrans.py 2009-02-26 07:12:16 UTC (rev 8)
@@ -52,21 +52,21 @@
return result
-class sBasesBinaryTransportError(Exception):
+class BBTransError(Exception):
pass
-class sBasesBinaryTransport():
-
+class _BBTransProtocol:
def __init__(self):
self.conn = None
- self._InitTimeStamp = None
self._InMsgQueue = []
self._InitDone = False
-
+
def connectionMade(self):
- self.doInit()
+ while self._InitDone is False:
+ data = self.conn.recv(1024)
+ self.dataReceived(data)
def sendMsg(self, m):
@@ -76,7 +76,6 @@
@param m: A serializable python object.
"""
- self.doInit()
emsg = rencode.dumps(m)
l = len(emsg)
if l > BBTRANS_MAX_MSG_SIZE:
@@ -93,34 +92,15 @@
@rtype: A python object holding a message that we got
"""
- self.doInit()
while len(self._InMsgQueue) == 0:
data = self.conn.recv(1024)
- self.dataRecieved(data)
+ self.dataReceived(data)
emsg = self._InMsgQueue.pop(0)
r = rencode.loads(emsg)
return r
-
- def doInit(self):
- """
- I initiate a 'Init sequence', and wait for a response.
- """
- if self._InitDone is True:
- d = timedelta()
- d = datetime.utcnow() - self._InitTimeStamp
- if d.seconds <= sBBTRANS_REINIT_TIMEOUT:
- return
- self._InitTimeStamp = datetime.utcnow()
- self._InitDone = False
- self.sendInit()
- while self._InitDone is False:
- data = self.conn.recv(1024)
- self.dataRecieved(data)
-
-
def dataReceived(self, data):
"""I recieve data and break them into messages. The messages
are then delivered to the broker for processing.
@@ -227,6 +207,7 @@
self._Msg_TransitState = STATE_GET_HOLA
return npos
+
def _handleInitR(self,data,pos):
done, npos, emsg = self._readData(data,pos,self._Msg_Len)
if done is True:
@@ -236,7 +217,6 @@
def _readData(self,data,pos,maxLen=0):
-
"""I read bytes from the given 'data' stream from position=pos
bytes until I get a byte stream of length maxLen. If the given
data-stream was not enough, I return a tuple saying so. I
@@ -284,16 +264,6 @@
self._bufferRequiredLen = 0
return (True, npos, b)
-
- def sendInit(self):
- emsg = rencode.dumps(BBTRANS_INIT)
- l = len(emsg)
- if l > BBTRANS_MAX_INIT_SIZE:
- raise sBasesBinaryTransportError\
- ("Object too long=%d to transmit" % (l))
- hola = struct.pack(BBTRANS_HOLA_FMT,"h0La", l, BBTRANS_HOLA_TYPE_INIT)
- self.conn.send(''.join((hola,emsg)))
-
def recievedInit(self,einit,respond=False):
"""I am called whenever we recieve a init-sequence from the
Added: trunk/bases/synclient/transports/transports.py
===================================================================
--- trunk/bases/synclient/transports/transports.py (rev 0)
+++ trunk/bases/synclient/transports/transports.py 2009-02-26 07:12:16 UTC (rev 8)
@@ -0,0 +1,60 @@
+
+
+class BasesTransportError(Exception):
+ pass
+
+
+def SSLVerificationCB(conn, cert, errnum, depth, ok):
+ #TBD: Server certificate verificaton should be done here.
+ return ok
+
+
+def createBBTransTransport(*args, **kwargs):
+ from bases.synclient.transports import bbtrans
+ return bbtrans._BBTransProtocol()
+
+
+basesTransportsMap = \
+ {"client": \
+ {"bbtrans": createBBTransClientFactory},
+ "secure": \
+ {"sbbtrans":"bbtrans"}
+ }
+
+
+def getTransport(self, location, SSLKey=None, SSLCert=None, CA=None):
+ """
+ Given a location tuple, I create a transport object. If the
+ requested protocol is a secure protocol, I use the key, cert and
+ ca provided in SSLKey, SSLCert, CA to create a SSL connection to
+ the remote location.
+ """
+
+ # Sanity check
+ secure = False
+ proto = location[0]
+ if location[0] in basesTransportsMap['secure']:
+ if SSLKey == None or SSLCert == None or CA ==None:
+ raise Exception("SSL details required for %s" % (location[0]))
+ secure = True
+ proto = basesTransportsMap['secure'][location[0]]
+ if proto not in basesTransportsMap['client']:
+ raise Exception("Unknown protocol %s" % (proto))
+ elif location[0] not in basesTransportsMap['client']:
+ raise Exception("Unknown protocol %s" % (location[0]))
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if secure is True:
+ ctx = SSL.Context(SSL.SSLv23_METHOD)
+ ctx.set_verify(SSL.VERIFY_PEER, SSLVerificationCB)
+ ctx.use_privatekey_file (self._SSLKey)
+ ctx.use_certificate_file(self._SSLCert)
+ ctx.load_verify_locations(self._CA)
+ sock = SSL.Connection(ctx, sock)
+
+ sock.connect((location(1), location(2)))
+ t = basesTransportsMap['client'][proto]
+ t.conn = sock
+ t.location = location
+ t.connectionMade()
+ return t
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|