From: <pc...@us...> - 2006-12-31 12:56:07
|
Revision: 39 http://svn.sourceforge.net/firebird/?rev=39&view=rev Author: pcisar Date: 2006-12-31 04:56:05 -0800 (Sun, 31 Dec 2006) Log Message: ----------- v1.0 Implemented ability to Run tests from editor and update test stdout and stderr from captured test run aoutput. Modified Paths: -------------- qa/trunk/qmedit/README.TXT qa/trunk/qmedit/qmedit_model.py qa/trunk/qmedit/ui.py qa/trunk/qmedit/ui.wxg qa/trunk/testsuite/QMTest/fbqa.py Property Changed: ---------------- qa/trunk/qmedit/ Property changes on: qa/trunk/qmedit ___________________________________________________________________ Name: svn:ignore + *.bak Modified: qa/trunk/qmedit/README.TXT =================================================================== --- qa/trunk/qmedit/README.TXT 2006-12-30 18:57:18 UTC (rev 38) +++ qa/trunk/qmedit/README.TXT 2006-12-31 12:56:05 UTC (rev 39) @@ -1,5 +1,5 @@ -qmEditor v0.95 -============== +qmEditor v1.0 +============= Requirements ------------ @@ -13,9 +13,13 @@ Run qmedit.py (python qmedit.py) from within the qmTest test database. +Running tests from qmEditor +--------------------------- -Known Issues ------------- - -Right now you can only edit tests, not resources (well, we don't use them much anyway), and you can't run and (re)bench tests from the editor. - +Starting from version 1.0, you can run test directly from qmEditor and rebench tests +from actual output from test run. You will need updated fbqa.py file in test suite QMTest +subdirectory. To tun test, simply select the desired test version and hit the (now enabled) +Run button. You'll be asked for QMTest context file. After the test run is finished, a dialog +window with QMTest output and test stdout and stderr is displayed. To move test output to the +editor, use the Apply button (it will only update test stdout and stderr fields, so you +have to save the chenages). Test run is equal to QMTest RUN command. Modified: qa/trunk/qmedit/qmedit_model.py =================================================================== --- qa/trunk/qmedit/qmedit_model.py 2006-12-30 18:57:18 UTC (rev 38) +++ qa/trunk/qmedit/qmedit_model.py 2006-12-31 12:56:05 UTC (rev 39) @@ -26,10 +26,14 @@ # Contributor(s): ______________________________________. import os +from StringIO import StringIO +import sys +import qm from qm.test import base from qm.test.database import TestDescriptor from qm.test.classes.xml_database import XMLDatabase from qm.extension import get_class_arguments +from qm.test.cmdline import QMTest TEST_CLASS_NAME = 'fbqa.FirebirdTest' @@ -41,6 +45,13 @@ outcomes = ['PASS','FAIL','UNTESTED','ERROR'] def __init__(self,databasePath=None): + # Next code initializes QMTest + qm_home = 'c:/Python24' + qm.prefix = qm_home + qm.common.program_name = "QMTest" + qm.diagnostic.load_messages("test") + qm.rc.Load("test") + # Wrapper Initialization self.current_test = [] self.current_test_version = None if databasePath == None: @@ -92,6 +103,29 @@ #---public---#000000#80FFFF-------------------------------------------------- + def runTest(self,testid,contextFile='context'): + command = QMTest(['run','-C',contextFile,'-c','SaveOutput=test', + '-f','full','-o','testrun.qmr',testid], '') + saved_stdout = sys.stdout + saved_stderr = sys.stderr + stdout = test_stdout = "STDOUT NOT AVAILABLE" + try: + sys.stdout = StringIO() + sys.stderr = sys.stdout + exit_code = command.Execute() + stdout = sys.stdout.getvalue() + f = open('test.out','r') + test_stdout = f.read() + f.close() + f = open('test.err','r') + test_stderr = f.read() + f.close() + os.remove('test.out') + os.remove('test.err') + finally: + sys.stdout = saved_stdout + sys.stderr = saved_stderr + return (exit_code, stdout, test_stdout,test_stderr) def getSuiteIds(self,scan_subdirs=0): return self.db.GetSuiteIds(scan_subdirs=scan_subdirs) def getAllSuiteIds(self,scan_subdirs=0): Modified: qa/trunk/qmedit/ui.py =================================================================== --- qa/trunk/qmedit/ui.py 2006-12-30 18:57:18 UTC (rev 38) +++ qa/trunk/qmedit/ui.py 2006-12-31 12:56:05 UTC (rev 39) @@ -503,7 +503,7 @@ def __set_properties(self): # begin wxGlade: qmEditFrame.__set_properties - self.SetTitle("qmEditor v0.95") + self.SetTitle("qmEditor v1.0") self.SetSize((850, 600)) self.statusbar.SetStatusWidths([-1]) # statusbar fields Modified: qa/trunk/qmedit/ui.wxg =================================================================== --- qa/trunk/qmedit/ui.wxg 2006-12-30 18:57:18 UTC (rev 38) +++ qa/trunk/qmedit/ui.wxg 2006-12-31 12:56:05 UTC (rev 39) @@ -1,14 +1,14 @@ <?xml version="1.0"?> -<!-- generated by wxGlade 0.4.1 on Wed Sep 27 19:05:14 2006 --> +<!-- generated by wxGlade 0.4.1 on Sat Dec 30 19:02:22 2006 --> <application path="D:\job\python-projects\qmedit\ui.py" name="" class="" option="0" language="python" top_window="mainFrame" encoding="ISO-8859-1" use_gettext="0" overwrite="0" use_new_namespace="1" for_version="2.6"> <object class="qmEditFrame" name="mainFrame" base="EditFrame"> <style>wxDEFAULT_FRAME_STYLE</style> - <title>qmEditor v0.95</title> + <title>qmEditor v1.0</title> <menubar>1</menubar> <centered>1</centered> <statusbar>1</statusbar> - <size>850, 600</size> + <size>905, 600</size> <object class="wxMenuBar" name="menubar" base="EditMenuBar"> <menus> </menus> @@ -454,7 +454,7 @@ <border>0</border> <option>1</option> <object class="wxTextCtrl" name="testDescription" base="EditTextCtrl"> - <style>wxTE_MULTILINE|wxTE_LINEWRAP|wxNO_BORDER</style> + <style>wxTE_MULTILINE|wxNO_BORDER</style> <events> <handler event="EVT_TEXT">evtTestChanged</handler> </events> @@ -1364,6 +1364,27 @@ </events> </object> </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="spacer" name="spacer" base="EditSpacer"> + <height>20</height> + <width>20</width> + </object> + </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="wxButton" name="btnRunTest" base="EditButton"> + <label>Run</label> + <disabled>1</disabled> + <events> + <handler event="EVT_BUTTON">evtRunTest</handler> + </events> + </object> + </object> </object> </object> </object> @@ -1713,7 +1734,7 @@ </object> </object> </object> - <object class="DialogNewSubstitution" name="dialog_2" base="EditDialog"> + <object class="DialogNewSubstitution" name="dlgNewSubstitution" base="EditDialog"> <style>wxDEFAULT_DIALOG_STYLE|wxRESIZE_BORDER|wxTHICK_FRAME|wxSTAY_ON_TOP</style> <title>New substitution</title> <object class="wxBoxSizer" name="sizer_64" base="EditBoxSizer"> @@ -1825,4 +1846,231 @@ </object> </object> </object> + <object class="DialogRunTest" name="dlgRunTest" base="EditDialog"> + <style>wxDEFAULT_DIALOG_STYLE</style> + <title>dialog_1</title> + <size>448, 113</size> + <object class="wxBoxSizer" name="sizer_69" base="EditBoxSizer"> + <orient>wxVERTICAL</orient> + <object class="sizeritem"> + <flag>wxALL|wxEXPAND</flag> + <border>3</border> + <option>1</option> + <object class="wxBoxSizer" name="sizer_71" base="EditBoxSizer"> + <orient>wxHORIZONTAL</orient> + <object class="sizeritem"> + <flag>wxRIGHT|wxALIGN_CENTER_VERTICAL|wxADJUST_MINSIZE</flag> + <border>3</border> + <option>0</option> + <object class="wxStaticText" name="lblContextFile" base="EditStaticText"> + <attribute>1</attribute> + <label>Context File:</label> + </object> + </object> + <object class="sizeritem"> + <flag>wxRIGHT|wxALIGN_CENTER_VERTICAL|wxADJUST_MINSIZE</flag> + <border>3</border> + <option>1</option> + <object class="wxTextCtrl" name="edContextFile" base="EditTextCtrl"> + <focused>1</focused> + </object> + </object> + <object class="sizeritem"> + <flag>wxALIGN_CENTER_VERTICAL|wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="wxButton" name="btnSelectContextFile" base="EditButton"> + <label>...</label> + <events> + <handler event="EVT_BUTTON">evtPickContextFile</handler> + </events> + <size>30, -1</size> + </object> + </object> + </object> + </object> + <object class="sizeritem"> + <flag>wxALL|wxALIGN_CENTER_HORIZONTAL</flag> + <border>3</border> + <option>0</option> + <object class="wxBoxSizer" name="sizer_70" base="EditBoxSizer"> + <orient>wxHORIZONTAL</orient> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="wxButton" name="btnRun" base="EditButton"> + <default>1</default> + <label>Run</label> + <id>wx.ID_OK</id> + </object> + </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="spacer" name="spacer" base="EditSpacer"> + <height>20</height> + <width>20</width> + </object> + </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="wxButton" name="btnCancel" base="EditButton"> + <label>Cancel</label> + <id>wx.ID_CANCEL</id> + </object> + </object> + </object> + </object> + </object> + </object> + <object class="DialogRunResults" name="dlgRunResults" base="EditDialog"> + <style>wxDEFAULT_DIALOG_STYLE|wxRESIZE_BORDER|wxTHICK_FRAME</style> + <title>Test Run Result</title> + <centered>1</centered> + <size>673, 300</size> + <object class="wxBoxSizer" name="sizer_72" base="EditBoxSizer"> + <orient>wxVERTICAL</orient> + <object class="sizeritem"> + <flag>wxEXPAND</flag> + <border>0</border> + <option>1</option> + <object class="wxNotebook" name="ntbRunDetails" base="EditNotebook"> + <style>0</style> + <tabs> + <tab window="pageQMTestOutput">QMTest Output</tab> + <tab window="pageTest_stdout">Test stdout</tab> + <tab window="pageTest_stderr">Test stderr</tab> + </tabs> + <object class="wxPanel" name="pageQMTestOutput" base="EditPanel"> + <style>wxTAB_TRAVERSAL</style> + <object class="wxBoxSizer" name="sizer_74" base="EditBoxSizer"> + <orient>wxHORIZONTAL</orient> + <object class="sizeritem"> + <flag>wxEXPAND|wxADJUST_MINSIZE</flag> + <border>0</border> + <option>1</option> + <object class="wxTextCtrl" name="edQMTestOutput" base="EditTextCtrl"> + <style>wxTE_MULTILINE|wxTE_READONLY|wxHSCROLL</style> + <focused>1</focused> + <font> + <size>10</size> + <family>modern</family> + <style>normal</style> + <weight>normal</weight> + <underlined>0</underlined> + <face></face> + </font> + </object> + </object> + </object> + </object> + <object class="wxPanel" name="pageTest_stdout" base="EditPanel"> + <style>wxTAB_TRAVERSAL</style> + <object class="wxBoxSizer" name="sizer_75" base="EditBoxSizer"> + <orient>wxHORIZONTAL</orient> + <object class="sizeritem"> + <flag>wxEXPAND|wxADJUST_MINSIZE</flag> + <border>0</border> + <option>1</option> + <object class="wxTextCtrl" name="edTest_stdout" base="EditTextCtrl"> + <style>wxTE_MULTILINE|wxTE_READONLY|wxHSCROLL</style> + <font> + <size>10</size> + <family>modern</family> + <style>normal</style> + <weight>normal</weight> + <underlined>0</underlined> + <face></face> + </font> + </object> + </object> + </object> + </object> + <object class="wxPanel" name="pageTest_stderr" base="EditPanel"> + <style>wxTAB_TRAVERSAL</style> + <object class="wxBoxSizer" name="sizer_76" base="EditBoxSizer"> + <orient>wxHORIZONTAL</orient> + <object class="sizeritem"> + <flag>wxEXPAND|wxADJUST_MINSIZE</flag> + <border>0</border> + <option>1</option> + <object class="wxTextCtrl" name="edTest_stderr" base="EditTextCtrl"> + <style>wxTE_MULTILINE|wxTE_READONLY|wxHSCROLL</style> + <font> + <size>10</size> + <family>modern</family> + <style>normal</style> + <weight>normal</weight> + <underlined>0</underlined> + <face></face> + </font> + </object> + </object> + </object> + </object> + </object> + </object> + <object class="sizeritem"> + <flag>wxALL|wxALIGN_CENTER_HORIZONTAL</flag> + <border>3</border> + <option>0</option> + <object class="wxBoxSizer" name="sizer_73" base="EditBoxSizer"> + <orient>wxHORIZONTAL</orient> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="wxButton" name="button_1" base="EditButton"> + <default>1</default> + <label>OK</label> + <id>wx.ID_OK</id> + </object> + </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="spacer" name="spacer" base="EditSpacer"> + <height>20</height> + <width>20</width> + </object> + </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="wxButton" name="btnBenchTest" base="EditButton"> + <label>Rebench Test</label> + <id>wx.ID_APPLY</id> + <events> + <handler event="EVT_BUTTON">evtDoApply</handler> + </events> + </object> + </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="spacer" name="spacer" base="EditSpacer"> + <height>20</height> + <width>20</width> + </object> + </object> + <object class="sizeritem"> + <flag>wxADJUST_MINSIZE</flag> + <border>0</border> + <option>0</option> + <object class="wxButton" name="button_3" base="EditButton"> + <label>Cancel</label> + <id>wx.ID_CANCEL</id> + </object> + </object> + </object> + </object> + </object> + </object> </application> Modified: qa/trunk/testsuite/QMTest/fbqa.py =================================================================== --- qa/trunk/testsuite/QMTest/fbqa.py 2006-12-30 18:57:18 UTC (rev 38) +++ qa/trunk/testsuite/QMTest/fbqa.py 2006-12-31 12:56:05 UTC (rev 39) @@ -699,6 +699,13 @@ exc_info = sys.exc_info() self.__result[Result.EXCEPTION]= "%s: %s" % exc_info[:2] else: + if self.__context.has_key("SaveOutput"): + f = open(self.__context["SaveOutput"]+'.out',"w") + f.write(self.__OutputStrip(stdout)) + f.close() + f = open(self.__context["SaveOutput"]+'.err',"w") + f.write(stderr) + f.close() stdout_stripped= self.__StringStrip(stdout) # strip whole stdout stdout_e_stripped= self.__StringStrip(self.result_string) # strip whole expected stdout stderr_stripped= self.__StringStrip(stderr) # strip whole stderr @@ -774,6 +781,12 @@ stderr_a_strp.splitlines() ), "\n") self.__result.Fail("Expected error output from %s does not match actual error output." % desc) + def __OutputStrip(self, string): + for regex in self.__isqlsubs: + string = re.sub(regex, "", string).lstrip() + for pattern, replacement in self.substitutions: + string= re.compile(pattern.encode('UTF8'), re.M).sub(replacement.encode('UTF8'), string) + return string def __StringStrip(self, string, isql=True): """Strip command prompts and superfluous whitespace which might cause comparisons to fail on insignificant differences @@ -783,7 +796,7 @@ return string if isql: for regex in self.__isqlsubs: - string= re.sub(regex, "", string) + string = re.sub(regex, "", string) for pattern, replacement in self.substitutions: string= re.compile(pattern.encode('UTF8'), re.M).sub(replacement.encode('UTF8'), string) return self.__SpaceStrip(string) @@ -903,6 +916,13 @@ else: stdout_a = "STDOUT NOT AVAILABLE IN DEBUG MODE" sys.stdout= saved_out + if self.__context.has_key("SaveOutput"): + f = open(self.__context["SaveOutput"]+',out',"w") + f.write(stdout_a) + f.close() + f = open(self.__context["SaveOutput"]+'.err',"w") + f.write('') + f.close() stdout_e_stripped= self.__StringStrip(self.result_string.encode('UTF8'), isql=False) stdout_a_stripped= self.__StringStrip(stdout_a, isql=False) if stdout_a_stripped == stdout_e_stripped: This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <pc...@us...> - 2007-08-16 16:33:40
|
Revision: 370 http://firebird.svn.sourceforge.net/firebird/?rev=370&view=rev Author: pcisar Date: 2007-08-16 09:33:41 -0700 (Thu, 16 Aug 2007) Log Message: ----------- Separate test suite for tests that require non-standard setup. Added Paths: ----------- qa/trunk/specsetupsuite/ qa/trunk/specsetupsuite/README.TXT qa/trunk/specsetupsuite/access_restriction.qms/ qa/trunk/specsetupsuite/access_restriction.qms/database.qms/ Added: qa/trunk/specsetupsuite/README.TXT =================================================================== --- qa/trunk/specsetupsuite/README.TXT (rev 0) +++ qa/trunk/specsetupsuite/README.TXT 2007-08-16 16:33:41 UTC (rev 370) @@ -0,0 +1,7 @@ +Firebird QA Special Setup Test Suite +==================================== + +Tests in this test suite require non-standard server setup to run. + +Best regards +Pavel Cisar <pcisar _AT_ users.sourceforge.net> This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <pc...@us...> - 2007-11-26 09:24:50
|
Revision: 437 http://firebird.svn.sourceforge.net/firebird/?rev=437&view=rev Author: pcisar Date: 2007-11-26 01:24:53 -0800 (Mon, 26 Nov 2007) Log Message: ----------- Firebird Benchmark 1.0.1 Added Paths: ----------- qa/trunk/benchmark/ qa/trunk/benchmark/fbench.py qa/trunk/benchmark/fbenchdb.fbk Added: qa/trunk/benchmark/fbench.py =================================================================== --- qa/trunk/benchmark/fbench.py (rev 0) +++ qa/trunk/benchmark/fbench.py 2007-11-26 09:24:53 UTC (rev 437) @@ -0,0 +1,541 @@ +#!/usr/bin/python +# +# PROGRAM: Firebird Benchmark +# MODULE: fbench.py +# DESCRIPTION: Firebird Benchmark +# +# The contents of this file are subject to the Initial +# Developer's Public License Version 1.0 (the "License"); +# you may not use this file except in compliance with the +# License. You may obtain a copy of the License at +# http://www.firebirdsql.org/index.php?op=doc&id=idpl +# +# Software distributed under the License is distributed AS IS, +# WITHOUT WARRANTY OF ANY KIND, either express or implied. +# See the License for the specific language governing rights +# and limitations under the License. +# +# The Original Code was created by Pavel Cisar +# for the Firebird Open Source RDBMS project. +# http://www.firebirdsql.org +# +# Copyright (c) 2007 Pavel Cisar <pc...@us...> +# and all contributors signed below. +# +# All Rights Reserved. +# Contributor(s): ______________________________________. + +import kinterbasdb as kdb +import time +import sys +import stats +import csv +from optparse import OptionParser + +version="%prog v1.0.1" + +class Benchmark(object): + results = [] + fetch = 1 + loop = 10 + + def prepare(self,db): + self.results = [] + self.db = db + self.c = db.cursor() + self.prep = self.c.prep(self.statement) + self.plan = self.prep.plan + + def close(self): + self.c.close() + self.median = stats.lmedian(self.results[1:]) + self.runtime = float(self.median) / float(self.loop) + + def execute(self): + for i in xrange(self.loop): + self.c.execute(self.prep) + if self.fetch == -1: + for r in self.c: + pass + else: + for i in xrange(self.fetch): + self.c.fetchone() + + def run(self): + start = time.clock() + self.execute() + stop = time.clock() + elapsed = stop-start + self.results.append(elapsed) + if self.fetch == 0: + self.c.fetchone() + return elapsed + + def name(self): + name = self.__class__.__name__ + if name[:5] == 'bench': + name = name[5:] + return name + +class BenchmarkRunner(object): + benchmarks = [] + samples = 5 + verbose = False + host = 'localhost' + + def __init__(self,dbfile): + self.dbfile = dbfile + + def sampling(self,benchmark): + benchmark.prepare(self.db) + # Throw away the first run + for i in xrange(self.samples+1): + print '.', + benchmark.run() + print + benchmark.close() + + def printResults(self,benchmark): + i = 1 + for r in benchmark.results[1:]: + print '%04i %f' % (i,r) + i = i+1 + print 'median',benchmark.median + print '1 run %f' % benchmark.runtime + print + + def run(self,host=None): + if host: + dsn = '%s:%s' % (host,self.dbfile) + else: + dsn = self.dbfile + print 'Database ',dsn + self.db = kdb.connect(dsn=dsn,user='sysdba',password='masterkey') + self.db.begin() + for benchmark in self.benchmarks: + print "Running %s %ix%i" % (benchmark.name(),self.samples,benchmark.loop), + self.sampling(benchmark) + if self.verbose: + self.printResults(benchmark) + self.db.commit() + + def addBenchmarks(self,benchmarkList,strict=True): + benchmarkList.sort() + for name in benchmarkList: + if strict and name[:5] != 'bench': + pass + else: + self.benchmarks.append(globals()[name]()) + +class benchComputeClient(Benchmark): + """Calls SP that returns one value 100000 times. + Compare it with result of benchComputeServer divided by 100000 to + see the client interface call overhead. + """ + def __init__(self): + self.statement = 'select * from SP_COMPUTE(1)' + self.loop = 100000 + self.fetch = -1 # fetch all ! + +class benchComputeServer(Benchmark): + """Calls SP once that returns 100000 values/rows. + Compare result divided by 100000 it with result of benchComputeClient to + see the client interface call overhead. + """ + def __init__(self): + self.statement = 'select * from SP_COMPUTE(100000)' + self.loop = 1 + self.fetch = -1 # fetch all ! + +class benchComputeClientProxy(Benchmark): + """Calls SP that returns one value 1000000 times via proxy SP. + Compare it with result of benchComputeClient to see internal SP call + overhead. + """ + def __init__(self): + self.statement = 'select * from SP_COMPUTE_PROXY(1)' + self.loop = 100000 + self.fetch = -1 # fetch all ! + +class benchComputeServerProxy(Benchmark): + """Calls SP once that returns 100000 values/rows via proxy SP. + Compare it with result of benchComputeServer to see internal SP call + overhead. + """ + def __init__(self): + self.statement = 'select * from SP_COMPUTE_PROXY(100000)' + self.loop = 1 + self.fetch = -1 # fetch all ! + +class benchComputeServerProxyNofetch(Benchmark): + """Calls SP once that calls value-generating SP 100000 times but returns single value. + Basic raw SP performance benchmark. + """ + def __init__(self): + self.statement = 'select * from SP_COMPUTE_PROXY_2(100000)' + self.loop = 1 + +class benchGeneratorClient(Benchmark): + """Calls SP that returns one value 100000 times. + This version uses GENERATOR to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR(1)' + self.loop = 100000 + self.fetch = -1 # fetch all ! + +class benchGeneratorServer(Benchmark): + """Calls SP once that returns one 100000 values/rows. + This version uses GENERATOR to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR(100000)' + self.loop = 1 + self.fetch = -1 # fetch all ! + +class benchGeneratorClientProxy(Benchmark): + """Calls SP that returns one value 100000 times via proxy SP. + This version uses GENERATOR to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_PROXY(1)' + self.loop = 100000 + self.fetch = -1 # fetch all ! + +class benchGeneratorServerProxy(Benchmark): + """Calls SP once that returns one 100000 values/rows via proxy SP. + This version uses GENERATOR to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_PROXY(100000)' + self.loop = 1 + self.fetch = -1 # fetch all ! + +class benchGeneratorServerProxyNofetch(Benchmark): + """Calls SP once that calls value-generating SP 100000 times but returns single value. + This version uses GENERATOR to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_PROXY_2(100000)' + self.loop = 1 + +class benchGeneratorTableClient(Benchmark): + """Calls SP that returns one value 100000 times. + This version uses GENERATOR and TABLE to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_TABLE(1)' + self.loop = 10000 + self.fetch = -1 # fetch all ! + +class benchGeneratorTableServer(Benchmark): + """Calls SP once that returns one 100000 values/rows. + This version uses GENERATOR and TABLE to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_TABLE(100000)' + self.loop = 1 + self.fetch = -1 # fetch all ! + +class benchGeneratorTableClientProxy(Benchmark): + """Calls SP that returns one value 100000 times via proxy SP. + This version uses GENERATOR and TABLE to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_TABLE_PROXY(1)' + self.loop = 10000 + self.fetch = -1 # fetch all ! + +class benchGeneratorTableServerProxy(Benchmark): + """Calls SP once that returns one 100000 values/rows via proxy SP. + This version uses GENERATOR and TABLE to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_TABLE_PROXY(100000)' + self.loop = 1 + self.fetch = -1 # fetch all ! + +class benchGeneratorTableServerProxyNofetch(Benchmark): + """Calls SP once that calls value-generating SP 100000 times but returns single value. + This version uses GENERATOR and TABLE to generate the value. + """ + def __init__(self): + self.statement = 'select * from SP_GENERATOR_TABLE_PROXY_2(100000)' + self.loop = 1 + +class benchNaturalFullNofetch(Benchmark): + """Retrieve 1 000 000 rows from table via natural scan with NO FETCH. + """ + def __init__(self): + self.statement = 'select * from T2' + self.loop = 1000 + self.fetch = 0 + +class benchNaturalFullOne(Benchmark): + """Retrieve 1 000 000 rows from table via natural scan with ONE fetch. + """ + def __init__(self): + self.statement = 'select * from T2' + self.loop = 10 + self.fetch = 1 + +class benchNaturalFullAll(Benchmark): + """Retrieve 1 000 000 rows from table via natural scan with fetch ALL. + """ + def __init__(self): + self.statement = 'select * from T2' + self.loop = 5 + self.fetch = -1 # fetch all ! + +class benchNaturalFilterNofetch(Benchmark): + """Retrieve 8966 from 1 mil rows from table via natural scan with NO FETCH. + """ + def __init__(self): + self.statement = 'select * from T2 where data+0 = 50' + self.loop = 1000 + self.fetch = 0 + +class benchNaturalFilterOne(Benchmark): + """Retrieve 8966 from 1 mil rows from table via natural scan with ONE fetch. + """ + def __init__(self): + self.statement = 'select * from T2 where data+0 = 50' + self.loop = 10 + self.fetch = 1 + +class benchNaturalFilterAll(Benchmark): + """Retrieve 8966 from 1 mil rows from table via natural scan with fetch ALL. + """ + def __init__(self): + self.statement = 'select * from T2 where data+0 = 50' + self.loop = 10 + self.fetch = -1 # fetch all ! + +class benchIndexFilterNofetch(Benchmark): + """Retrieve 8966 from 1 mil rows from table via index scan with NO FETCH. + """ + def __init__(self): + self.statement = 'select * from T2 where data = 50' + self.loop = 1000 + self.fetch = 0 + +class benchIndexFilterOne(Benchmark): + """Retrieve 8966 from 1 mil rows from table via index scan with ONE fetch. + """ + def __init__(self): + self.statement = 'select * from T2 where data = 50' + self.loop = 10 + self.fetch = 1 + +class benchIndexFilterAll(Benchmark): + """Retrieve 8966 from 1 mil rows from table via index scan with fetch ALL. + """ + def __init__(self): + self.statement = 'select * from T2 where data = 50' + self.loop = 10 + self.fetch = -1 # fetch all ! + +class benchIndex2OrFilterNofetch(Benchmark): + """Retrieve 17985 from 1 mil rows from table via (A OR B) index scan with NO FETCH. + """ + def __init__(self): + self.statement = 'select * from T2 where data = 50 or data = 60' + self.loop = 1000 + self.fetch = 0 + +class benchIndex2OrFilterOne(Benchmark): + """Retrieve 17985 from 1 mil rows from table via (A OR B) index scan with ONE fetch. + """ + def __init__(self): + self.statement = 'select * from T2 where data = 50 or data = 60' + self.loop = 100 + self.fetch = 1 + +class benchIndex2OrFilterAll(Benchmark): + """Retrieve 17985 from 1 mil rows from table via (A OR B) index scan with fetch ALL. + """ + def __init__(self): + self.statement = 'select * from T2 where data = 50 or data = 60' + self.loop = 100 + self.fetch = -1 # fetch all ! + +class benchIndex2AndFilterNofetch(Benchmark): + """Retrieve 4413 from 1 mil rows from table via (A AND B) index scan with NO FETCH. + """ + def __init__(self): + self.statement = 'select * from t2 where data = 50 and id >= 500000' + self.loop = 500 + self.fetch = 0 + +class benchIndex2AndFilterOne(Benchmark): + """Retrieve 4413 from 1 mil rows from table via (A AND B) index scan with ONE fetch. + """ + def __init__(self): + self.statement = 'select * from t2 where data = 50 and id >= 500000' + self.loop = 100 + self.fetch = 1 + +class benchIndex2AndFilterAll(Benchmark): + """Retrieve 4413 from 1 mil rows from table via (A AND B) index scan with fetch ALL. + """ + def __init__(self): + self.statement = 'select * from t2 where data = 50 and id >= 500000' + self.loop = 100 + self.fetch = -1 # fetch all ! + +class benchIndex3OrAndFilterNofetch(Benchmark): + """Retrieve 8918 from 1 mil rows from table via (A OR B) AND C index scan with NO FETCH. + """ + def __init__(self): + self.statement = 'select * from t2 where (data = 50 or data = 60) and id >= 500000' + self.loop = 100 + self.fetch = 0 + +class benchIndex3OrAndFilterOne(Benchmark): + """Retrieve 8918 from 1 mil rows from table via (A OR B) AND C index scan with ONE fetch. + """ + def __init__(self): + self.statement = 'select * from t2 where (data = 50 or data = 60) and id >= 500000' + self.loop = 100 + self.fetch = 1 + +class benchIndex3OrAndFilterAll(Benchmark): + """Retrieve 8918 from 1 mil rows from table via (A OR B) AND C index scan with fetch ALL. + """ + def __init__(self): + self.statement = 'select * from t2 where (data = 50 or data = 60) and id >= 500000' + self.loop = 100 + self.fetch = -1 # fetch all ! + +class benchSortNofetch(Benchmark): + """Sort 1 000 000 rows into 9 groups using SORT with NO FETCH. + """ + def __init__(self): + self.statement = 'select cardinality+0,count(*) from t1 group by 1' + self.loop = 1000 + self.fetch = 0 + +class benchSortOne(Benchmark): + """Sort 1 000 000 rows into 9 groups using SORT with ONE fetch. + """ + def __init__(self): + self.statement = 'select cardinality+0,count(*) from t1 group by 1' + self.loop = 5 + self.fetch = 1 + +class benchSortAll(Benchmark): + """Sort 1 000 000 rows into 9 groups using SORT with fetch ALL. + """ + def __init__(self): + self.statement = 'select cardinality+0,count(*) from t1 group by 1' + self.loop = 5 + self.fetch = -1 # fetch all ! + +class benchOrderNofetch(Benchmark): + """Sort 1 000 000 rows into 9 groups using index ORDER with NO FETCH. + """ + def __init__(self): + self.statement = 'select cardinality,count(*) from t1 group by 1' + self.loop = 1000 + self.fetch = 0 + +class benchOrderOne(Benchmark): + """Sort 1 000 000 rows into 9 groups using index ORDER with ONE fetch. + """ + def __init__(self): + self.statement = 'select cardinality,count(*) from t1 group by 1' + self.loop = 5 + self.fetch = 1 + +class benchOrderAll(Benchmark): + """Sort 1 000 000 rows into 9 groups using index ORDER with fetch ALL. + """ + def __init__(self): + self.statement = 'select cardinality,count(*) from t1 group by 1' + self.loop = 5 + self.fetch = -1 # fetch all ! + +class benchJoinNofetch(Benchmark): + """Join 1 000 000 rows with 1 000 000 rows using FK index with NO FETCH. + """ + def __init__(self): + self.statement = 'select t1.id,t2.id,t2.data from t1 join t2 on t1.t2id = t2.id' + self.loop = 1000 + self.fetch = 0 + +class benchJoinOne(Benchmark): + """Join 1 000 000 rows with 1 000 000 rows using FK index with ONE fetch. + """ + def __init__(self): + self.statement = 'select t1.id,t2.id,t2.data from t1 join t2 on t1.t2id = t2.id' + self.loop = 10 + self.fetch = 1 + +class benchJoinAll(Benchmark): + """Join 1 000 000 rows with 1 000 000 rows using FK index with fetch ALL. + """ + def __init__(self): + self.statement = 'select t1.id,t2.id,t2.data from t1 join t2 on t1.t2id = t2.id' + self.loop = 1 + self.fetch = -1 # fetch all ! + + +def main(options, args): + runner = BenchmarkRunner(args[0]) + runner.verbose = options.verbose + runner.samples = options.samples + runner.host = options.host + if options.benchmark: + benchlist = options.benchmark.split(',') + runner.addBenchmarks(['bench'+b for b in benchlist]) + else: + runner.addBenchmarks(globals().keys()) + runner.run(options.host) + if options.output: + f = open(options.output,'w') + frm = ['Name','Iterations','Statement','Plan'] + for i in xrange(runner.samples): + frm.append('Sample%i' % (i+1)) + frm.extend(('Median','Runtime')) + row = dict().fromkeys(frm) + f.write(','.join(frm)) + f.write('\n') + csvfile = csv.DictWriter(f,frm) + for benchmark in runner.benchmarks: + row['Name'] = benchmark.name() + row['Iterations'] = benchmark.loop + row['Statement'] = benchmark.statement + row['Plan'] = benchmark.plan + for i in xrange(runner.samples): + row['Sample%i' % (i+1)] = benchmark.results[i+1] + row['Median'] = benchmark.median + row['Runtime'] = benchmark.runtime + csvfile.writerow(row) + f.close() + +if __name__ == '__main__': + usage = "usage: %prog [options] database" + parser = OptionParser(usage, version=version) + parser.add_option("-t", "--host", help="host database server") + parser.add_option("-l", "--list", action="store_true", help="list benchmark names") + parser.add_option("-b", "--benchmark", help="run only specified BENCHMARKs") + parser.add_option("-s", "--samples", help="number of sample runs", + default="5",type="int") + parser.add_option("-v", "--verbose", help="print results on screen", + action="store_true", dest="verbose") + parser.add_option("-o", "--output", + metavar="FILE", help="write output to FILE") + + (options, args) = parser.parse_args() + if options.list: + runner = BenchmarkRunner(None) + runner.addBenchmarks(globals().keys()) + print 'List of available benchmarks:' + print + for benchmark in runner.benchmarks: + print benchmark.name() + elif len(args) != 1: + parser.error("database name not specified") + else: + main(options, args) + Added: qa/trunk/benchmark/fbenchdb.fbk =================================================================== (Binary files differ) Property changes on: qa/trunk/benchmark/fbenchdb.fbk ___________________________________________________________________ Name: svn:mime-type + application/octet-stream This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <pc...@us...> - 2007-11-26 09:30:30
|
Revision: 438 http://firebird.svn.sourceforge.net/firebird/?rev=438&view=rev Author: pcisar Date: 2007-11-26 01:30:34 -0800 (Mon, 26 Nov 2007) Log Message: ----------- kinterbasdb fixes for Firebird 2.1 Added Paths: ----------- qa/trunk/kinterbasdb/ qa/trunk/kinterbasdb/__init__.py Added: qa/trunk/kinterbasdb/__init__.py =================================================================== --- qa/trunk/kinterbasdb/__init__.py (rev 0) +++ qa/trunk/kinterbasdb/__init__.py 2007-11-26 09:30:34 UTC (rev 438) @@ -0,0 +1,2353 @@ +# KInterbasDB Python Package - Python Wrapper for Core +# +# Version 3.2 +# +# The following contributors hold Copyright (C) over their respective +# portions of code (see license.txt for details): +# +# [Original Author (maintained through version 2.0-0.3.1):] +# 1998-2001 [alex] Alexander Kuznetsov <al...@us...> +# [Maintainers (after version 2.0-0.3.1):] +# 2001-2002 [maz] Marek Isalski <kin...@ma...> +# 2002-2006 [dsr] David Rushby <woo...@ro...> +# [Contributors:] +# 2001 [eac] Evgeny A. Cherkashin <eug...@ic...> +# 2001-2002 [janez] Janez Jere <jan...@vo...> + +# The doc strings throughout this module explain what API *guarantees* +# kinterbasdb makes. +# Notably, the fact that users can only rely on the return values of certain +# functions/methods to be sequences or mappings, not instances of a specific +# class. This policy is still compliant with the DB API spec, and is more +# future-proof than implying that all of the classes defined herein can be +# relied upon not to change. Module members whose names begin with an +# underscore cannot be expected to have stable interfaces. + +__version__ = (3, 2, 0, 'final', 0) +__timestamp__ = '2006.08.11.18.22.07.UTC' + +import os, struct, sys + +if sys.platform.lower().startswith('win'): + import os.path + + # Better out-of-box support for embedded DB engine on Windows: if the + # client library is detected in the same directory as kinterbasdb, or in + # the 'embedded' subdirectory of kinterbasdb's directory, or in the same + # directory as the Python executable, give that precedence over the + # location listed in the registry. + # + # Client programmers who happen to read this should note that the embedded + # engine malfunctions in various ways unless it's located in the same + # directory as the actual executable (python.exe). + _clientLibDir = None + _kinterbasdbDir = os.path.dirname(os.path.abspath(__file__)) + for _clientLibName in ( + 'firebird.dll', # Vulcan + 'fbclient.dll', # FB 1.5, 2.x + ): + for _location in ( + os.path.join(os.getcwd(), _clientLibName), + os.path.join(_kinterbasdbDir, _clientLibName), + os.path.join(os.path.join(_kinterbasdbDir, 'embedded'), + _clientLibName + ), + os.path.join(os.path.dirname(sys.executable), _clientLibName), + ): + if os.path.isfile(_location): + _clientLibDir = os.path.dirname(_location) + break + + if _clientLibDir: + break + + if _clientLibDir: + os.environ['PATH'] = _clientLibDir + os.pathsep + os.environ['PATH'] + # At least with FB 1.5.2, the FIREBIRD environment variable must also + # be set in order for all features to work properly. + os.environ['FIREBIRD'] = _clientLibDir + else: + # FB 1.5 RC7 and later, when installed via the packaged installer or + # the "instreg.exe" command-line tool, record their installation dir in + # the registry. If no client library was detected earlier, we'll add + # the "bin" subdirectory of the directory from the registry to the + # *end* of the PATH, so it'll be used as a last resort. + import _winreg + + _reg = _winreg.ConnectRegistry(None, _winreg.HKEY_LOCAL_MACHINE) + try: + try: + _dbInstPathsKey = _winreg.OpenKey( + _reg, r'SOFTWARE\Firebird Project\Firebird Server' + '\Instances' + ) + try: + _instPath = _winreg.QueryValueEx( + _dbInstPathsKey, 'DefaultInstance' + )[0] + finally: + _dbInstPathsKey.Close() + del _dbInstPathsKey + except WindowsError: + # Versions of IB/FB prior to FB 1.5 RC7 don't have this _reg + # entry, but they install the client library into a system + # library directory, so there's no problem. + pass + else: + os.environ['PATH'] += os.pathsep + os.path.join( + _instPath, 'bin' + ) + del _instPath + finally: + _reg.Close() + + del _winreg, _reg + + del _clientLibDir, _kinterbasdbDir, _clientLibName, _location + +# The underlying C module: +import _kinterbasdb as _k + +# Import database API constants into the namespace of this module as Python +# objects: +_k.init_kidb_basic_header_constants(globals()) + +# Export utility members: +FB_API_VER = _k.FB_API_VER + +portable_int = _k.portable_int +raw_timestamp_to_tuple = _k.raw_timestamp_to_tuple + +DEFAULT_CONCURRENCY_LEVEL = _k.DEFAULT_CONCURRENCY_LEVEL +get_concurrency_level = _k.concurrency_level_get + +# Initialize the k_exceptions so that other Python modules in kinterbasdb can +# have access to kinterbasdb's exceptions without a circular import. +import k_exceptions + +Warning = k_exceptions.Warning = _k.Warning +Error = k_exceptions.Error = _k.Error +InterfaceError = k_exceptions.InterfaceError = _k.InterfaceError +DatabaseError = k_exceptions.DatabaseError = _k.DatabaseError +DataError = k_exceptions.DataError = _k.DataError +OperationalError = k_exceptions.OperationalError = _k.OperationalError +IntegrityError = k_exceptions.IntegrityError = _k.IntegrityError +InternalError = k_exceptions.InternalError = _k.InternalError + +ProgrammingError = k_exceptions.ProgrammingError = _k.ProgrammingError +TransactionConflict = k_exceptions.TransactionConflict = _k.TransactionConflict + +NotSupportedError = k_exceptions.NotSupportedError = _k.NotSupportedError + +_EVENT_HANDLING_SUPPORTED = hasattr(_k, 'ConduitWasClosed') +if _EVENT_HANDLING_SUPPORTED: + ConduitWasClosed = k_exceptions.ConduitWasClosed = _k.ConduitWasClosed + +_CONNECTION_TIMEOUT_SUPPORTED = hasattr(_k, 'ConnectionTimedOut') +if _CONNECTION_TIMEOUT_SUPPORTED: + ConnectionTimedOut = k_exceptions.ConnectionTimedOut = \ + _k.ConnectionTimedOut + import _connection_timeout + +_ALL_EXCEPTION_CLASSES = [ + Warning, + Error, + InterfaceError, + DatabaseError, + DataError, + OperationalError, + IntegrityError, + InternalError, + ProgrammingError, + NotSupportedError, + ] + +if _EVENT_HANDLING_SUPPORTED: + _ALL_EXCEPTION_CLASSES.append(ConduitWasClosed) + +if _CONNECTION_TIMEOUT_SUPPORTED: + _ALL_EXCEPTION_CLASSES.append(ConnectionTimedOut) + +_ALL_EXCEPTION_CLASSES = tuple(_ALL_EXCEPTION_CLASSES) + +########################################## +## PUBLIC CONSTANTS: BEGIN ## +########################################## + +# Note: Numerous database API constants were imported into the global +# namespace of this module by an earlier call to +# _k.init_kidb_basic_header_constants. See _kinterbasdb_constants.c for more +# info. + +apilevel = '2.0' +threadsafety = 1 +paramstyle = 'qmark' + +# Named positional constants to be used as indices into the description +# attribute of a cursor (these positions are defined by the DB API spec). +# For example: +# nameOfFirstField = cursor.description[0][kinterbasdb.DESCRIPTION_NAME] + +DESCRIPTION_NAME = 0 +DESCRIPTION_TYPE_CODE = 1 +DESCRIPTION_DISPLAY_SIZE = 2 +DESCRIPTION_INTERNAL_SIZE = 3 +DESCRIPTION_PRECISION = 4 +DESCRIPTION_SCALE = 5 +DESCRIPTION_NULL_OK = 6 + +# Default transaction parameter buffer: +default_tpb = ( + # isc_tpb_version3 is a *purely* infrastructural value. kinterbasdb will + # gracefully handle user-specified TPBs that don't start with + # isc_tpb_version3 (as well as those that do start with it). + isc_tpb_version3 + + + isc_tpb_write # Access mode + + isc_tpb_read_committed + isc_tpb_rec_version # Isolation level + + isc_tpb_wait # Lock resolution strategy +# + isc_tpb_shared # Table reservation + # access method + ) + +from _request_buffer_builder import RequestBufferBuilder as _RequestBufferBuilder +_request_buffer_builder.portable_int = portable_int + +########################################## +## PUBLIC CONSTANTS: END ## +########################################## + + +################################################### +## DYNAMIC TYPE TRANSLATION CONFIGURATION: BEGIN ## +################################################### +# Added deferred loading of dynamic type converters to facilitate the +# elimination of all dependency on the mx package. The implementation is quite +# ugly due to backward compatibility constraints. + +BASELINE_TYPE_TRANSLATION_FACILITIES = ( + # Date and time translator names: + 'date_conv_in', 'date_conv_out', + 'time_conv_in', 'time_conv_out', + 'timestamp_conv_in', 'timestamp_conv_out', + + # Fixed point translator names: + 'fixed_conv_in_imprecise', 'fixed_conv_in_precise', + 'fixed_conv_out_imprecise', 'fixed_conv_out_precise', + + # Optional unicode converters: + 'OPT:unicode_conv_in', 'OPT:unicode_conv_out', + + # DB API 2.0 standard date and time type constructors: + 'Date', 'Time', 'Timestamp', + 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', + ) + +# The next three will be modified by the init function as appropriate: +_MINIMAL_TYPE_TRANS_TYPES = ('DATE', 'TIME', 'TIMESTAMP', 'FIXED',) +_NORMAL_TYPE_TRANS_IN = None +_NORMAL_TYPE_TRANS_OUT = None + + +initialized = False +def init(type_conv=1, concurrency_level=_k.DEFAULT_CONCURRENCY_LEVEL): + global initialized, _MINIMAL_TYPE_TRANS_TYPES, \ + _NORMAL_TYPE_TRANS_IN, _NORMAL_TYPE_TRANS_OUT + + if initialized: + raise ProgrammingError('Cannot initialize module more than once.') + + if _k.DEFAULT_CONCURRENCY_LEVEL == 0: + if concurrency_level != 0: + raise ProgrammingError('Support for concurrency was disabled at' + ' compile time, so only Level 0 is available.' + ) + # Since only Level 0 is available and it's already active, there's no + # need to do anything. + else: + if concurrency_level not in (1,2): + raise ProgrammingError('Only Levels 1 and 2 are accessible at' + ' runtime; Level 0 can only be activated at compile time.' + ) + _k.concurrency_level_set(concurrency_level) + + _k.provide_refs_to_python_entities( + _RowMapping, + _make_output_translator_return_type_dict_from_trans_dict, + _look_up_array_descriptor, + _look_up_array_subtype, + _Cursor_execute_exception_type_filter, + ) + + globalz = globals() + if not isinstance(type_conv, int): + typeConvModule = type_conv + else: + typeConvOptions = { + 0: 'typeconv_naked', + 1: 'typeconv_backcompat', # the default + 100: 'typeconv_23plus', + 199: 'typeconv_23plus_lowmem', + 200: 'typeconv_24plus', # considered the "ideal" as of KIDB 3.2 + } + chosenTypeConvModuleName = typeConvOptions[type_conv] + typeConvModule = __import__('kinterbasdb.' + chosenTypeConvModuleName, + globalz, locals(), (chosenTypeConvModuleName,) + ) + if type_conv > 1: + _MINIMAL_TYPE_TRANS_TYPES = \ + _MINIMAL_TYPE_TRANS_TYPES + ('TEXT_UNICODE',) + + for name in BASELINE_TYPE_TRANSLATION_FACILITIES: + if not name.startswith('OPT:'): + typeConvModuleMember = getattr(typeConvModule, name) + else: + # Members whose entries in BASELINE_TYPE_TRANSLATION_FACILITIES + # begin with 'OPT:' are not required. + name = name[4:] + try: + typeConvModuleMember = getattr(typeConvModule, name) + except AttributeError: + continue + + globalz[name] = typeConvModuleMember + + # Modify the initial, empty version of the DB API type singleton DATETIME, + # transforming it into a fully functional version. + # The fact that the object is *modifed* rather than replaced is crucial to + # the preservation of compatibility with the 'from kinterbasdb import *' + # form of importation. + DATETIME.values = ( + # Date, Time, and Timestamp refer to functions just loaded from the + # typeConvModule in the loop above. + type(Date(2003,12,31)), + type(Time(23,59,59)), + type(Timestamp(2003,12,31,23,59,59)) + ) + + _NORMAL_TYPE_TRANS_IN = { + 'DATE': date_conv_in, + 'TIME': time_conv_in, + 'TIMESTAMP': timestamp_conv_in, + 'FIXED': fixed_conv_in_imprecise, + } + _NORMAL_TYPE_TRANS_OUT = { + 'DATE': date_conv_out, + 'TIME': time_conv_out, + 'TIMESTAMP': timestamp_conv_out, + 'FIXED': fixed_conv_out_imprecise, + } + if type_conv > 1: + _NORMAL_TYPE_TRANS_IN['TEXT_UNICODE'] = unicode_conv_in + _NORMAL_TYPE_TRANS_OUT['TEXT_UNICODE'] = unicode_conv_out + + initialized = True + +def _ensureInitialized(): + if not initialized: + init() + +# The following constructors will be replaced when kinterbasdb.init is called, +# whether implicitly or explicitly. If one of the constructors is called +# before kinterbasdb.init, it will trigger its own replacement by calling +# _ensureInitialized. +def Date(year, month, day): + _ensureInitialized() + return Date(year, month, day) + +def Time(hour, minute, second): + _ensureInitialized() + return Time(hour, minute, second) + +def Timestamp(year, month, day, hour, minute, second): + _ensureInitialized() + return Timestamp(year, month, day, hour, minute, second) + +def DateFromTicks(ticks): + _ensureInitialized() + return DateFromTicks(ticks) + +def TimeFromTicks(ticks): + _ensureInitialized() + return TimeFromTicks(ticks) + +def TimestampFromTicks(ticks): + _ensureInitialized() + return TimestampFromTicks(ticks) + + +################################################### +## DYNAMIC TYPE TRANSLATION CONFIGURATION: END ## +################################################### + + +############################################ +## PUBLIC DB-API TYPE CONSTRUCTORS: BEGIN ## +############################################ + +# All date/time constructors are loaded dynamically by the init function. + +# Changed from buffer to str in 3.1, with the possible addition of a lazy BLOB +# reader at some point in the future: +Binary = str + + +# DBAPITypeObject implementation is the DB API's suggested implementation. +class DBAPITypeObject: # Purposely remains a "classic class". + def __init__(self, *values): + self.values = values + def __cmp__(self, other): + if other in self.values: + return 0 + if other < self.values: + return 1 + else: + return -1 + + +STRING = DBAPITypeObject(str, unicode) + +BINARY = DBAPITypeObject(str, buffer) + +NUMBER = DBAPITypeObject(int, long, float) + +# DATETIME is loaded in a deferred manner (in the init function); this initial +# version remains empty only temporarily. +DATETIME = DBAPITypeObject() + +ROWID = DBAPITypeObject() + +############################################ +## PUBLIC DB-API TYPE CONSTRUCTORS: END ## +############################################ + + +########################################## +## PUBLIC FUNCTIONS: BEGIN ## +########################################## + +def connect(*args, **keywords_args): + """ + Minimal arguments: keyword args $dsn, $user, and $password. + Establishes a kinterbasdb.Connection to a database. See the docstring + of kinterbasdb.Connection for details. + """ + return Connection(*args, **keywords_args) + + +def create_database(*args): + """ + Creates a new database with the supplied "CREATE DATABASE" statement. + Returns an active kinterbasdb.Connection to the newly created database. + + Parameters: + $sql: string containing the CREATE DATABASE statement. Note that you may + need to specify a username and password as part of this statement (see + the Firebird SQL Reference for syntax). + $dialect: (optional) the SQL dialect under which to execute the statement + """ + _ensureInitialized() + + # For a more general-purpose immediate execution facility (the non-"CREATE + # DATABASE" variant of isc_dsql_execute_immediate, for those who care), see + # Connection.execute_immediate. + + C_con = _k.create_database(*args) + return Connection(_CConnection=C_con) + + +def raw_byte_to_int(raw_byte): + """ + Convert the byte in the single-character Python string $raw_byte into a + Python integer. This function is essentially equivalent to the built-in + function ord, but is different in intent (see the database_info method). + """ + _ensureInitialized() + + if len(raw_byte) != 1: + raise ValueError('raw_byte must be exactly one byte, not %d bytes.' + % len(raw_byte) + ) + return struct.unpack('b', raw_byte)[0] + + +########################################## +## PUBLIC FUNCTIONS: END ## +########################################## + + +########################################## +## PUBLIC CLASSES: BEGIN ## +########################################## + +# BlobReader, PreparedStatement and Cursor can't be instantiated from Python, +# but are exposed here to support isinstance(o, kinterbasdb.Class) and the +# like. +BlobReader = _k.BlobReader +PreparedStatement = _k.PreparedStatement +Cursor = _k.Cursor + +if _EVENT_HANDLING_SUPPORTED: + EventConduit = _k.EventConduit + +class Connection(object): + """ + Represents a connection between the database client (the Python process) + and the database server. + + The basic behavior of this class is documented by the Python DB API + Specification 2.0; this docstring covers only some extensions. Also see + the KInterbasDB Usage Guide (docs/usage.html). + + Attributes: + $dialect (read-only): + The Interbase SQL dialect of the connection. One of: + 1 for Interbase < 6.0 + 2 for "migration mode" + 3 for Interbase >= 6.0 and Firebird (the default) + + $precision_mode (read/write): (DEPRECATED) + precision_mode is deprecated in favor of dynamic type translation + (see the [set|get]_type_trans_[in|out] methods, and the Usage Guide). + --- + precision_mode 0 (the default) represents database fixed point values + (NUMERIC/DECIMAL fields) as Python floats, potentially losing precision. + precision_mode 1 represents database fixed point values as scaled + Python integers, preserving precision. + For more information, see the KInterbasDB Usage Guide. + + $server_version (read-only): + The version string of the database server to which this connection + is connected. + + $default_tpb (read/write): + The transaction parameter buffer (TPB) that will be used by default + for new transactions opened in the context of this connection. + For more information, see the KInterbasDB Usage Guide. + """ + + def __init__(self, *args, **keywords_args): + # self._C_con is the instance of ConnectionType that represents this + # connection in the underlying C module _k. + + _ensureInitialized() + + # Optional DB API Extension: Make the module's exception classes + # available as connection attributes to ease cross-module portability: + for exc_class in _ALL_EXCEPTION_CLASSES: + setattr(self, exc_class.__name__, exc_class) + + # Inherit the module-level default TPB. + self._default_tpb = default_tpb + + # Allow other code WITHIN THIS MODULE to obtain an instance of + # ConnectionType some other way and provide it to us instead of us + # creating one via Connection_connect. (The create_database function + # uses this facility, for example.) + if '_CConnection' in keywords_args: + self._C_con = keywords_args['_CConnection'] + # Since we given a pre-existing CConnection instance rather than + # creating it ourselves, we need to explicitly give it a reference + # to its Python companion (self): + _k.Connection_python_wrapper_obj_set(self._C_con, self) + else: + n_nonkeyword = len(args) + n_keyword = len(keywords_args) + + if n_nonkeyword == 0 and n_keyword == 0: + raise ProgrammingError( + 'connect() requires at least 3 keyword arguments.' + ) + elif n_keyword > 0 and n_nonkeyword == 0: + source_dict = keywords_args # The typical case. + else: + # This case is for backward compatibility ONLY: + import warnings # Lazy import. + warnings.warn('The non-keyword-argument form of the connect()' + ' function is deprecated. Use' + ' connect(dsn=..., user=..., password=...) rather than' + ' connect(..., ..., ...)', + DeprecationWarning + ) + if n_keyword > 0: + raise ProgrammingError('Do not specify both non-keyword' + ' args and keyword args (keyword-only is preferred).' + ) + elif n_nonkeyword != 3: + raise ProgrammingError('If using non-keyword args, must' + ' provide exactly 3: dsn, user, password.' + ) + else: + # Transform the argument tuple into an argument dict. + source_dict = {'dsn': args[0], 'user': args[1], + 'password': args[2] + } + + timeout = keywords_args.pop('timeout', None) + + if timeout is not None: + if not _CONNECTION_TIMEOUT_SUPPORTED: + raise ProgrammingError("The connection timeout feature is" + " disabled in this build." + ) + + _connection_timeout.startTimeoutThreadIfNecessary( + _k.ConnectionTimeoutThread_main, _k.CTM_halt + ) + + # Pre-render the requisite buffers (plus the dialect), then send + # them down to the C level. _k.Connection_connect() will give us a + # C-level connection structure (self._C_con, of type + # ConnectionType) in return. self will then serve as a proxy for + # self._C_con. + # + # Notice that once rendered by _build_connect_structures, the + # connection parameters are retained in self._C_con_params in case + # kinterbasdb's internals need to clone this connection. + b = _DPBBuilder() + b.buildFromParamDict(source_dict) + self._charset = b.charset + self._C_con_params = (b.dsn, b.dpb, b.dialect) + self._C_con = _k.Connection_connect(self, + b.dsn, b.dpb, b.dialect, timeout + ) + + self._normalize_type_trans() + # 2003.03.30: Moved precision_mode up to the Python level (it's + # deprecated). + self._precision_mode = 0 + + + def __del__(self): + # This method should not call the Python implementation of close(). + self._close_physical_connection(raiseExceptionOnError=False) + + + def drop_database(self): + """ + Drops the database to which this connection is attached. + + Unlike plain file deletion, this method behaves responsibly, in that + it removes shadow files and other ancillary files for this database. + """ + self._ensure_group_membership(False, "Cannot drop database via" + " connection that is part of a ConnectionGroup." + ) + _k.Connection_drop_database(self._C_con) + + + def begin(self, tpb=None): + """ + Starts a transaction explicitly. This is never *required*; a + transaction will be started implicitly if necessary. + + Parameters: + $tpb: Optional transaction parameter buffer (TPB) populated with + kinterbasdb.isc_tpb_* constants. See the Interbase API guide for + these constants' meanings. + """ + if self.group is not None: + self.group.begin() + return + + if tpb is None: + tpb = self.default_tpb + else: + tpb = _validateTPB(tpb) + + if not isinstance(tpb, str): + tpb = tpb.render() + + _k.Connection_begin(self._C_con, tpb) + + + def prepare(self): + """ + Manually triggers the first phase of a two-phase commit (2PC). Use + of this method is optional; if preparation is not triggered manually, + it will be performed implicitly by commit() in a 2PC. + See also the method ConnectionGroup.prepare. + """ + if self.group is not None: + self.group.prepare() + return + + _k.Connection_prepare(self._C_con) + + + def commit(self, retaining=False): + """ + Commits (permanently applies the actions that have taken place as + part of) the active transaction. + + Parameters: + $retaining (optional boolean that defaults to False): + If True, the transaction is immediately cloned after it has been + committed. This retains system resources associated with the + transaction and leaves undisturbed the state of any cursors open on + this connection. In effect, retaining commit keeps the transaction + "open" across commits. + See IB 6 API Guide pages 75 and 291 for more info. + """ + if self.group is not None: + self.group.commit(retaining=retaining) + return + + _k.Connection_commit(self._C_con, retaining) + + + def savepoint(self, name): + """ + Establishes a SAVEPOINT named $name. + To rollback to this SAVEPOINT, use rollback(savepoint=name). + + Example: + con.savepoint('BEGINNING_OF_SOME_SUBTASK') + ... + con.rollback(savepoint='BEGINNING_OF_SOME_SUBTASK') + """ + self.execute_immediate('SAVEPOINT ' + name) + + + def rollback(self, retaining=False, savepoint=None): + """ + Rolls back (cancels the actions that have taken place as part of) the + active transaction. + + Parameters: + $retaining (optional boolean that defaults to False): + If True, the transaction is immediately cloned after it has been + rolled back. This retains system resources associated with the + transaction and leaves undisturbed the state of any cursors open on + this connection. In effect, retaining rollback keeps the + transaction "open" across rollbacks. + See IB 6 API Guide pages 75 and 373 for more info. + $savepoint (string name of the SAVEPOINT): + If a savepoint name is supplied, only rolls back as far as that + savepoint, rather than rolling back the entire transaction. + """ + if savepoint is None: + if self.group is not None: + self.group.rollback(retaining=retaining) + return + + _k.Connection_rollback(self._C_con, retaining) + else: + self.execute_immediate('ROLLBACK TO ' + savepoint) + + + def execute_immediate(self, sql): + """ + Executes a statement without caching its prepared form. The + statement must NOT be of a type that returns a result set. + + In most cases (especially cases in which the same statement--perhaps + a parameterized statement--is executed repeatedly), it is better to + create a cursor using the connection's cursor() method, then execute + the statement using one of the cursor's execute methods. + """ + _k.Connection_execute_immediate(self._C_con, sql) + + + def database_info(self, request, result_type): + """ + Wraps the Interbase C API function isc_database_info. + + For documentation, see the IB 6 API Guide section entitled + "Requesting information about an attachment" (p. 51). + + Note that this method is a VERY THIN wrapper around the IB C API + function isc_database_info. This method does NOT attempt to interpret + its results except with regard to whether they are a string or an + integer. + + For example, requesting isc_info_user_names will return a string + containing a raw succession of length-name pairs. A thicker wrapper + might interpret those raw results and return a Python tuple, but it + would need to handle a multitude of special cases in order to cover + all possible isc_info_* items. + + Note: Some of the information available through this method would be + more easily retrieved with the Services API (see submodule + kinterbasdb.services). + + Parameters: + $result_type must be either: + 's' if you expect a string result, or + 'i' if you expect an integer result + """ + # Note: Server-side implementation for most of isc_database_info is in + # jrd/inf.cpp. + res = _k.Connection_database_info(self._C_con, request, result_type) + + # 2004.12.12: + # The result buffers for a few request codes don't follow the generic + # conventions, so we need to return their full contents rather than + # omitting the initial infrastructural bytes. + if ( result_type == 's' + and request not in _DATABASE_INFO__KNOWN_LOW_LEVEL_EXCEPTIONS + ): + res = res[3:] + + return res + + + def db_info(self, request): + # Contributed by Pavel Cisar; incorporated 2004.09.10; heavily modified + # 2004.12.12. + """ + Higher-level convenience wrapper around the database_info method that + parses the output of database_info into Python-friendly objects instead + of returning raw binary buffers in the case of complex result types. + If an unrecognized code is requested, ValueError is raised. + + Parameters: + $request must be either: + - A single kinterbasdb.isc_info_* info request code. + In this case, a single result is returned. + - A sequence of such codes. + In this case, a mapping of (info request code -> result) is + returned. + """ + # Notes: + # + # - IB 6 API Guide page 391: "In InterBase, integer values... + # are returned in result buffers in a generic format where + # the least significant byte is first, and the most + # significant byte last." + + # We process request as a sequence of info codes, even if only one code + # was supplied by the caller. + requestIsSingleton = isinstance(request, int) + if requestIsSingleton: + request = (request,) + + results = {} + for infoCode in request: + if infoCode == isc_info_base_level: + # (IB 6 API Guide page 52) + buf = self.database_info(infoCode, 's') + # Ignore the first byte. + baseLevel = struct.unpack('B', buf[1])[0] + results[infoCode] = baseLevel + elif infoCode == isc_info_db_id: + # (IB 6 API Guide page 52) + buf = self.database_info(infoCode, 's') + pos = 0 + + conLocalityCode = struct.unpack('B', buf[pos])[0] + pos += 1 + + dbFilenameLen = struct.unpack('B', buf[1])[0] + pos += 1 + + dbFilename = buf[pos:pos+dbFilenameLen] + pos += dbFilenameLen + + siteNameLen = struct.unpack('B', buf[pos])[0] + pos += 1 + + siteName = buf[pos:pos+siteNameLen] + pos += siteNameLen + + results[infoCode] = (conLocalityCode, dbFilename, siteName) + elif infoCode == isc_info_implementation: + # (IB 6 API Guide page 52) + buf = self.database_info(infoCode, 's') + # Skip the first four bytes. + pos = 1 + + implNumber = struct.unpack('B', buf[pos])[0] + pos += 1 + + classNumber = struct.unpack('B', buf[pos])[0] + pos += 1 + + results[infoCode] = (implNumber, classNumber) + elif infoCode in (isc_info_version, isc_info_firebird_version): + # (IB 6 API Guide page 53) + buf = self.database_info(infoCode, 's') + # Skip the first byte. + pos = 1 + + versionStringLen = struct.unpack('B', buf[pos])[0] + pos += 1 + + versionString = buf[pos:pos+versionStringLen] + + results[infoCode] = versionString + elif infoCode == isc_info_user_names: + # (IB 6 API Guide page 54) + # + # The isc_info_user_names results buffer does not exactly match + # the format declared on page 54 of the IB 6 API Guide. + # The buffer is formatted as a sequence of clusters, each of + # which begins with the byte isc_info_user_names, followed by a + # two-byte cluster length, followed by a one-byte username + # length, followed by a single username. + # I don't understand why the lengths are represented + # redundantly (the two-byte cluster length is always one + # greater than the one-byte username length), but perhaps it's + # an attempt to adhere to the general format of an information + # cluster declared on page 51 while also [trying, but failing + # to] adhere to the isc_info_user_names-specific format + # declared on page 54. + buf = self.database_info(infoCode, 's') + + usernames = [] + pos = 0 + while pos < len(buf): + if struct.unpack('B', buf[pos])[0] != isc_info_user_names: + raise OperationalError('While trying to service' + ' isc_info_user_names request, found unexpected' + ' results buffer contents at position %d of [%s]' + % (pos, buf) + ) + pos += 1 + + # The two-byte cluster length: + nameClusterLen = struct.unpack('<H', buf[pos:pos+2])[0] + pos += 2 + + # The one-byte username length: + nameLen = struct.unpack('B', buf[pos])[0] + assert nameLen == nameClusterLen - 1 + pos += 1 + + usernames.append(buf[pos:pos+nameLen]) + pos += nameLen + + # The client-exposed return value is a dictionary mapping + # username -> number of connections by that user. + res = {} + for un in usernames: + res[un] = res.get(un, 0) + 1 + + results[infoCode] = res + elif infoCode in _DATABASE_INFO_CODES_WITH_INT_RESULT: + results[infoCode] = self.database_info(infoCode, 'i') + elif infoCode in _DATABASE_INFO_CODES_WITH_COUNT_RESULTS: + buf = self.database_info(infoCode, 's') + countsByRelId = _extractDatabaseInfoCounts(buf) + # Decided not to convert the relation IDs to relation names + # for two reasons: + # 1) Performance + Principle of Least Surprise + # If the client program is trying to do some delicate + # performance measurements, it's not helpful for + # kinterbasdb to be issuing unexpected queries behind the + # scenes. + # 2) Field RDB$RELATIONS.RDB$RELATION_NAME is a CHAR field, + # which means its values emerge from the database with + # trailing whitespace, yet it's not safe in general to + # strip that whitespace because actual relation names can + # have trailing whitespace (think + # 'create table "table1 " (f1 int)'). + results[infoCode] = countsByRelId + elif infoCode in _DATABASE_INFO_CODES_WITH_TIMESTAMP_RESULT: + buf = self.database_info(infoCode, 's') + timestampTuple = raw_timestamp_to_tuple(buf) + registeredConverter = self.get_type_trans_out()['TIMESTAMP'] + timestamp = registeredConverter(timestampTuple) + results[infoCode] = timestamp + else: + raise ValueError('Unrecognized database info code %s' + % str(infoCode) + ) + + if requestIsSingleton: + return results[request[0]] + else: + return results + + + def transaction_info(self, request, result_type): + if not self._has_transaction(): + raise ProgrammingError('This connection has no active transaction.') + + return _k.Connection_transaction_info(self._C_con, request, result_type) + + + def trans_info(self, request): + # We process request as a sequence of info codes, even if only one code + # was supplied by the caller. + requestIsSingleton = isinstance(request, int) + if requestIsSingleton: + request = (request,) + + results = {} + for infoCode in request: + if infoCode == globals().get('isc_info_tra_isolation', -1): + buf = self.transaction_info(infoCode, 's') + buf = buf[1 + struct.calcsize('h'):] + if len(buf) == 1: + results[infoCode] = portable_int(buf) + else: + # For isolation level isc_info_tra_read_committed, the + # first byte indicates the isolation level + # (isc_info_tra_read_committed), while the second indicates + # the record version flag (isc_info_tra_rec_version or + # isc_info_tra_no_rec_version). + isolationLevelByte, recordVersionByte = struct.unpack('cc', buf) + isolationLevel = portable_int(isolationLevelByte) + recordVersion = portable_int(recordVersionByte) + results[infoCode] = (isolationLevel, recordVersion) + else: + # At the time of this writing (2006.02.09), + # isc_info_tra_isolation is the only known return value of + # isc_transaction_info that's not a simple integer. + results[infoCode] = self.transaction_info(infoCode, 'i') + + if requestIsSingleton: + return results[request[0]] + else: + return results + + + def cursor(self): + "Creates a new cursor that operates within the context of this" + " connection." + return Cursor(self) + + + def close(self): + "Closes the connection to the database server." + self._ensure_group_membership(False, "Cannot close a connection that" + " is a member of a ConnectionGroup." + ) + self._close_physical_connection(raiseExceptionOnError=True) + + + # closed read-only property: + def _closed_get(self): + return _k.Connection_closed_get(self._C_con) + closed = property(_closed_get) + + + def _close_physical_connection(self, raiseExceptionOnError=True): + # Sever the physical connection to the database server and replace our + # underyling _kinterbasdb.ConnectionType object with a null instance + # of that type, so that post-close() method calls on this connection + # will raise ProgrammingErrors, as required by the DB API Spec. + try: + if getattr(self, '_C_con', None) is not None: + if ( _k + and self._C_con is not _k.null_connection + and not _k.Connection_closed_get(self._C_con) + ): + try: + _k.Connection_close(self._C_con) + except ProgrammingError: + if raiseExceptionOnError: + raise + self._C_con = _k.null_connection + elif raiseExceptionOnError: + raise ProgrammingError('Connection is already closed.') + except: + if raiseExceptionOnError: + raise + + + def _has_db_handle(self): + return self._C_con is not _k.null_connection + + + def _has_transaction(self): + # Does this connection currently have an active transaction (including + # a distributed transaction)? + return _k.Connection_has_transaction(self._C_con) + + def _normalize_type_trans(self): + # Set the type translation dictionaries to their "normal" form--the + # minumum required for standard kinterbasdb operation. + self.set_type_trans_in(_NORMAL_TYPE_TRANS_IN) + self.set_type_trans_out(_NORMAL_TYPE_TRANS_OUT) + + def _enforce_min_trans(self, trans_dict, translator_source): + # Any $trans_dict that the Python programmer supplies for a + # Connection must have entries for at least the types listed in + # _MINIMAL_TYPE_TRANS_TYPES, because kinterbasdb uses dynamic type + # translation even if it is not explicitly configured by the Python + # client programmer. + # The Cursor.set_type_trans* methods need not impose the same + # requirement, because "translator resolution" will bubble upward from + # the cursor to its connection. + # This method inserts the required translators into the incoming + # $trans_dict if that $trans_dict does not already contain them. + # Note that $translator_source will differ between in/out translators. + for type_name in _MINIMAL_TYPE_TRANS_TYPES: + if type_name not in trans_dict: + trans_dict[type_name] = translator_source[type_name] + + def set_type_trans_out(self, trans_dict): + """ + Changes the outbound type translation map. + For more information, see the "Dynamic Type Translation" section of + the KInterbasDB Usage Guide. + """ + _trans_require_dict(trans_dict) + self._enforce_min_trans(trans_dict, _NORMAL_TYPE_TRANS_OUT) + return _k.set_Connection_type_trans_out(self._C_con, trans_dict) + + def get_type_trans_out(self): + """ + Retrieves the outbound type translation map. + For more information, see the "Dynamic Type Translation" section of + the KInterbasDB Usage Guide. + """ + return _k.get_Connection_type_trans_out(self._C_con) + + def set_type_trans_in(self, trans_dict): + """ + Changes the inbound type translation map. + For more information, see the "Dynamic Type Translation" section of + the KInterbasDB Usage Guide. + """ + _trans_require_dict(trans_dict) + self._enforce_min_trans(trans_dict, _NORMAL_TYPE_TRANS_IN) + return _k.set_Connection_type_trans_in(self._C_con, trans_dict) + + def get_type_trans_in(self): + """ + Retrieves the inbound type translation map. + For more information, see the "Dynamic Type Translation" section of + the KInterbasDB Usage Guide. + """ + return _k.get_Connection_type_trans_in(self._C_con) + + + if not _EVENT_HANDLING_SUPPORTED: + def event_conduit(self, event_names): + raise NotSupportedError("Event handling was not enabled when" + " kinterbasdb's C layer was compiled." + ) + else: + def event_conduit(self, event_names): + return _k.EventConduit_create(self._C_con, self._C_con_params, + event_names + ) + + + # default_tpb read-write property: + def _default_tpb_get(self): + return self._default_tpb + def _default_tpb_set(self, value): + self._default_tpb = _validateTPB(value) + default_tpb = property(_default_tpb_get, _default_tpb_set) + + # The C layer of KInterbasDB uses this read-only property when it needs a + # TPB that's strictly a memory buffer, rather than potentially a TPB + # instance. + def __default_tpb_str_get_(self): + defTPB = self.default_tpb + if not isinstance(defTPB, str): + defTPB = defTPB.render() + return defTPB + _default_tpb_str_ = property(__default_tpb_str_get_) + + + # dialect read-write property: + def _dialect_get(self): + return _k.Connection_dialect_get(self._C_con) + def _dialect_set(self, value): + _k.Connection_dialect_set(self._C_con, value) + dialect = property(_dialect_get, _dialect_set) + + + # precision_mode read-write property (deprecated): + def _precision_mode_get(self): + # Postpone this warning until a later version: + #import warnings # Lazy import. + #warnings.warn( + # 'precision_mode is deprecated in favor of dynamic type' + # ' translation (see the [set|get]_type_trans_[in|out] methods).', + # DeprecationWarning + # ) + return self._precision_mode + def _precision_mode_set(self, value): + # Postpone this warning until a later version: + #import warnings # Lazy import. + #warnings.warn( + # 'precision_mode is deprecated in favor of dynamic type' + # ' translation (see the [set|get]_type_trans_[in|out] methods).', + # DeprecationWarning + # ) + value = bool(value) + + # Preserve the previous DTT settings that were in place before this + # call to the greatest extent possible (although dynamic type + # translation and the precision_mode attribute really aren't meant to + # be used together). + trans_in = self.get_type_trans_in() + trans_out = self.get_type_trans_out() + + if value: # precise: + trans_in['FIXED'] = fixed_conv_in_precise + trans_out['FIXED'] = fixed_conv_out_precise + else: # imprecise: + trans_in['FIXED'] = fixed_conv_in_imprecise + trans_out['FIXED'] = fixed_conv_out_imprecise + + self.set_type_trans_in(trans_in) + self.set_type_trans_out(trans_out) + + self._precision_mode = value + precision_mode = property(_precision_mode_get, _precision_mode_set) + + + # server_version read-only property: + def _server_version_get(self): + return self.db_info(isc_info_version) + server_version = property(_server_version_get) + + + # charset read-only property: + def _charset_get(self): + return self._charset + def _charset_set(self, value): + # More informative error message: + raise AttributeError("A connection's 'charset' property can be" + " specified upon Connection creation as a keyword argument to" + " kinterbasdb.connect, but it cannot be modified thereafter." + ) + charset = property(_charset_get, _charset_set) + + + # group read-only property: + def _group_get(self): + return _k.Connection_get_group(self._C_con) + group = property(_group_get) + + + def _set_group(self, group): + # This package-private method allows ConnectionGroup's membership + # management functionality to bypass the conceptually read-only nature + # of the Connection.group property. + if group is not None: + assert _k.Connection_get_group(self._C_con) is None + # Unless the group is being cleared (set to None), pass a *WEAK* + # reference down to the C level (otherwise, even the cyclic garbage + # collector can't collect ConnectionGroups or their Connections because + # the Connections' references are held at the C level, not the Python + # level). + if group is None: + _k.Connection_set_group(self._C_con, None) + else: + _k.Connection_set_group(self._C_con, group) + + + def _ensure_group_membership(self, must_be_member, err_msg): + if must_be_member: + if self.group is None: + raise ProgrammingError(err_msg) + else: + if not hasattr(self, 'group'): + return + if self.group is not None: + raise ProgrammingError(err_msg) + + + def _timeout_enabled_get(self): + return _k.Connection_timeout_enabled(self._C_con) + _timeout_enabled = property(_timeout_enabled_get) + + + def _activity_stamps(self): + return _k.Connection__read_activity_stamps(self._C_con) + + +class ConnectionGroup(object): + # XXX: ConnectionGroup objects currently are not thread-safe. Since + # separate Connections can be manipulated simultaneously by different + # threads in kinterbasdb, it would make sense for a container of multiple + # connections to be safely manipulable simultaneously by multiple threads. + + # XXX: Adding two connections to the same database freezes the DB client + # library. However, I've no way to detect with certainty whether any given + # con1 and con2 are connected to the same database, what with database + # aliases, IP host name aliases, remote-vs-local protocols, etc. + # Therefore, a warning must be added to the docs. + + def __init__(self, connections=()): + _ensureInitialized() + + self._cons = [] + self._trans_handle = None + + for con in connections: + self.add(con) + + + def __del__(self): + self.disband() + + + def disband(self): + # Notice that the ConnectionGroup rollback()s itself BEFORE releasing + # its Connection references. + if getattr(self, '_trans_handle', None) is not None: + self.rollback() + if hasattr(self, '_cons'): + self.clear() + + + # Membership methods: + def add(self, con): + ### CONTRAINTS ON $con: ### + # con must be an instance of kinterbasdb.Connection: + if not isinstance(con, Connection): + raise TypeError('con must be an instance of' + ' kinterbasdb.Connection' + ) + # con cannot already be a member of this group: + if con in self: + raise ProgrammingError('con is already a member of this group.') + # con cannot belong to more than one group at a time: + if con.group: + raise ProgrammingError('con is already a member of another group;' + ' it cannot belong to more than one group at once.' + ) + # con cannot be added if it has an active transaction: + if con._has_transaction(): + raise ProgrammingError('con already has an active transaction;' + ' that must be resolved before con can join this group.' + ) + # con must be connected to a database; it must not have been closed. + if not con._has_db_handle(): + raise ProgrammingError('con has been closed; it cannot join a' + ' group.' + ) + + if con._timeout_enabled: + raise ProgrammingError('Connections with timeout enabled cannot' + ' participate in distributed transactions.' + ) + + ### CONTRAINTS ON $self: ### + # self cannot accept new members while self has an unresolved + # transaction: + self._require_transaction_state(False, + 'Cannot add connection to group that has an unresolved' + ' transaction.' + ) + # self cannot have more than DIST_TRANS_MAX_DATABASES members: + if self.count() >= DIST_TRANS_MAX_DATABASES: + raise ProgrammingError('The database engine limits the number of' + ' database handles that can participate in a single' + ' distributed transaction to %d or fewer; this group already' + ' has %d members.' + % (DIST_TRANS_MAX_DATABASES, self.count()) + ) + + ### CONTRAINTS FINISHED ### + + # Can't set con.group directly (read-only); must use package-private + # method. + con._set_group(self) + self._cons.append(con) + + + def remove(self, con): + if con not in self: + raise ProgrammingError('con is not a member of this group.') + # The following assertion was invalidated by the introduction of weak + # refs: + #assert con.group is self + self._require_transaction_state(False, + 'Cannot remove connection from group that has an unresolved' + ' transaction.' + ) + + con._set_group(None) + self._cons.remove(con) + + + def clear(self): + self._require_transaction_state(False, + 'Cannot clear group that has an unresolved transaction.' + ) + for con in self.members(): + self.remove(con) + assert self.count() == 0 + + + def members(self): + return self._cons[:] # return a *copy* of the internal list + + + def count(self): + return len(self._cons) + + + def contains(self, con): + return con in self._cons + __contains__ = contains # alias to support the 'in' operator + + + def __iter__(self): + return iter(self._cons) + + + # Transactional methods: + def _require_transaction_state(self, must_be_active, err_msg=''): + trans_handle = self._trans_handle + if ( + (must_be_active and trans_handle is None) + or (not must_be_active and trans_handle is not None) + ): + raise ProgrammingError(err_msg) + + + def _require_non_empty_group(self, operation_name): + if self.count() == 0: + raise ProgrammingError('Cannot %s distributed transaction with' + ' an empty ConnectionGroup.' % operation_name + ) + + + def begin(self): + self._require_transaction_state(False, + 'Must resolve current transaction before starting another.' + ) + self._require_non_empty_group('start') + self._trans_handle = _k.distributed_begin(self._cons) + + + def prepare(self): + """ + Manually triggers the first phase of a two-phase commit (2PC). Use + of this method is optional; if preparation is not triggered manually, + it will be performed implicitly by commit() in a 2PC. + """ + self._require_non_empty_group('prepare') + self._require_transaction_state(True, + 'This group has no transaction to prepare.' + ) + + _k.distributed_prepare(self._trans_handle) + + + def commit(self, retaining=False): + self._require_non_empty_group('commit') + # The consensus among Python DB API experts is that transactions should + # always be started implicitly, even if that means allowing a commit() + # or rollback() without an actual transaction. + if self._trans_handle is None: + return + + _k.distributed_commit(self._trans_handle, retaining) + self._trans_handle = None + + for con in self._cons: # 2005.07.22 + _k.Connection_clear_transaction_stats(con._C_con) + + + def rollback(self, retaining=False): + self._require_non_empty_group('roll back') + # The consensus among Python DB API experts is that transactions should + # always be started implicitly, even if that means allowing a commit() + # or rollback() without an actual transaction. + if self._trans_handle is None: + return + + _k.distributed_rollback(self._trans_handle, retaining) + self._trans_handle = None + + for con in self._cons: # 2005.07.22 + _k.Connection_clear_transaction_stats(con._C_con) + + +########################################## +## PUBLIC CLASSES: END ## +########################################## + +class _RowMapping(object): + """ + An internal kinterbasdb class that wraps a row of results in order to map + field name to field value. + + kinterbasdb makes ABSOLUTELY NO GUARANTEES about the return value of the + fetch(one|many|all) methods except that it is a sequence indexed by field + position, and no guarantees about the return value of the + fetch(one|many|all)map methods except that it is a mapping of field name + to field value. + Therefore, client programmers should NOT rely on the return value being + an instance of a particular class or type. + """ + + def __init__(self, description, row): + self._description = description + fields = self._fields = {} + pos = 0 + for fieldSpec in description: + # It's possible for a result set from the database engine to return + # multiple fields with the same name, but kinterbasdb's key-based + # row interface only honors the first (thus setdefault, which won't + # store the position if it's already present in self._fields). + fields.setdefault(fieldSpec[DESCRIPTION_NAME], row[pos]) + pos += 1 + + + def __len__(self): + return len(self._fields) + + + def __getitem__(self, fieldName): + fields = self._fields + # Straightforward, unnormalized lookup will work if the fieldName is + # already uppercase and/or if it refers to a database field whose + # name is case-sensitive. + if fieldName in fields: + return fields[fieldName] + else: + fieldNameNormalized = _normalizeDatabaseIdentifier(fieldName) + try: + return fields[fieldNameNormalized] + except KeyError: + raise KeyError('Result set has no field named "%s". The field' + ' name must be one of: (%s)' + % (fieldName, ', '.join(fields.keys())) + ) + + + def get(self, fieldName, defaultValue=None): + try: + return self[fieldName] + except KeyError: + return defaultValue + + + def __contains__(self, fieldName): + try: + self[fieldName] + except KeyError: + return False + else: + return True + + + def __str__(self): + # Return an easily readable dump of this row's field names and their + # corresponding... [truncated message content] |