From: <jr...@us...> - 2009-02-18 19:44:32
|
Revision: 6922 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=6922&view=rev Author: jrevans Date: 2009-02-18 19:44:29 +0000 (Wed, 18 Feb 2009) Log Message: ----------- Unit-Test harness initial check-in. Added Paths: ----------- trunk/matplotlib/test/ trunk/matplotlib/test/README.txt trunk/matplotlib/test/mplTest/ trunk/matplotlib/test/mplTest/MplNosePlugin.py trunk/matplotlib/test/mplTest/MplTestCase.py trunk/matplotlib/test/mplTest/TestTEMPLATE.py trunk/matplotlib/test/mplTest/__init__.py trunk/matplotlib/test/mplTest/compare.py trunk/matplotlib/test/mplTest/directories.py trunk/matplotlib/test/mplTest/path_utils.py trunk/matplotlib/test/mplTest/units/ trunk/matplotlib/test/mplTest/units/Duration.py trunk/matplotlib/test/mplTest/units/Epoch.py trunk/matplotlib/test/mplTest/units/EpochConverter.py trunk/matplotlib/test/mplTest/units/StrConverter.py trunk/matplotlib/test/mplTest/units/UnitDbl.py trunk/matplotlib/test/mplTest/units/UnitDblConverter.py trunk/matplotlib/test/mplTest/units/UnitDblFormatter.py trunk/matplotlib/test/mplTest/units/__init__.py trunk/matplotlib/test/run-mpl-test.py trunk/matplotlib/test/test_artists/ trunk/matplotlib/test/test_backends/ trunk/matplotlib/test/test_backends/TestAgg.py trunk/matplotlib/test/test_basemap/ trunk/matplotlib/test/test_cxx/ trunk/matplotlib/test/test_mathtext/ trunk/matplotlib/test/test_matplotlib/ trunk/matplotlib/test/test_matplotlib/TestAxes.py trunk/matplotlib/test/test_matplotlib/TestCookbook.py trunk/matplotlib/test/test_matplotlib/TestTickers.py trunk/matplotlib/test/test_matplotlib/baseline/ trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/ trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/default_datetime.png trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/empty_datetime.png trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/formatter_ticker_001.png trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/formatter_ticker_002.png trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/formatter_ticker_003.png trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/formatter_ticker_004.png trunk/matplotlib/test/test_matplotlib/baseline/TestAxes/formatter_ticker_005.png trunk/matplotlib/test/test_matplotlib/baseline/TestTickers/ trunk/matplotlib/test/test_matplotlib/baseline/TestTickers/DateFormatter_fractionalSeconds.png trunk/matplotlib/test/test_matplotlib/baseline/TestTickers/RRuleLocator_bounds.png trunk/matplotlib/test/test_numerix/ trunk/matplotlib/test/test_plots/ trunk/matplotlib/test/test_plots/TestAnnotation.py trunk/matplotlib/test/test_plots/TestPlot.py trunk/matplotlib/test/test_plots/TestPolar.py trunk/matplotlib/test/test_plots/TestSpan.py trunk/matplotlib/test/test_plots/baseline/ trunk/matplotlib/test/test_plots/baseline/TestAnnotation/ trunk/matplotlib/test/test_plots/baseline/TestAnnotation/offset_points.png trunk/matplotlib/test/test_plots/baseline/TestAnnotation/polar_axes.png trunk/matplotlib/test/test_plots/baseline/TestAnnotation/polar_coords.png trunk/matplotlib/test/test_plots/baseline/TestPlot/ trunk/matplotlib/test/test_plots/baseline/TestPlot/const_xy.png trunk/matplotlib/test/test_plots/baseline/TestPlot/shaped_data.png trunk/matplotlib/test/test_plots/baseline/TestPlot/single_date.png trunk/matplotlib/test/test_plots/baseline/TestPlot/single_point.png trunk/matplotlib/test/test_plots/baseline/TestPolar/ trunk/matplotlib/test/test_plots/baseline/TestPolar/polar_units.png trunk/matplotlib/test/test_plots/baseline/TestPolar/polar_wrap_180.png trunk/matplotlib/test/test_plots/baseline/TestPolar/polar_wrap_360.png trunk/matplotlib/test/test_plots/baseline/TestSpan/ trunk/matplotlib/test/test_plots/baseline/TestSpan/axhspan_epoch.png trunk/matplotlib/test/test_plots/baseline/TestSpan/axvspan_epoch.png trunk/matplotlib/test/test_pylab/ trunk/matplotlib/test/test_transforms/ Added: trunk/matplotlib/test/README.txt =================================================================== --- trunk/matplotlib/test/README.txt (rev 0) +++ trunk/matplotlib/test/README.txt 2009-02-18 19:44:29 UTC (rev 6922) @@ -0,0 +1,290 @@ +======================================================================== + matplotlib test structure +======================================================================== + +===== How To Use + += Running + +Run the 'run-mpl-test.py' script to execute the test harness. This must +be run with the version of python that you wish to test matplotlib with. +This means that it must have nose installed (and PIL if image comparison +is to be done). By default this will pick up whatever python is on your +path, so make sure it is the correct one. + +- Command-Line Options +In addition to the standard nose command-line options, there are several +specific to the matplotlib test harness. They are as follows: + + -t TAG, --with-tag=TAG + Will only run test cases that have the specified tag. + Each test case should have a 'tag' attribute (if a + case does not have one, then it is assumed to be an + empty list). The 'tag' attribute is a list of + strings, where each value is a representative propery + of the test case. Example tags are 'qt' or 'units'. + This can be specified multiple times. + --without-tag=TAG This will run those test cases that do not have the + specified tags. + --clean This will remove all output files and saved results. + If this is specified, no other processing will be + performed. + --all This will runn all test programs regardless of working + directory. + --keep Keep any generated output files in a directory called + 'saved-results'. This directory will be created if it + doesn't already exist. This directory is in the same + location as the test case whose results are being + saved. + --keep-failed This acts just like '--keep' except will only keeps + the results from tests that error or fail. + --make-test=testName + Creates a template test case file in the current + directory with the name TestFoo. Where 'Foo' is the + provided test name. + + +- Running Specific Tests +In order to can specify the exact test case you want to run use the +standard nose mechanism. For example, if you have the following setup: + +TestFoo.py + def test_func(): + ... + + class TestFoo: + def test_bar( self ): + ... + def test_bug( self ): + ... + +Then to test everything in TestFoo.py do the following: +$> run-mpl-test.py TestFoo.py + +To run all tests in the test class TestFoo do this: +$> run-mpl-test.py TestFoo.py:TestFoo + +To run the specific 'test_bar' methodd do the following: +$> run-mpl-test.py TestFoo.py:TestFoo.test_bar + + += Detecting Test Cases + +When running the matplotlib test script it will search for all tests +in the current working directory and below (unless '--all' is specified). +This is provided that the current working directory is a sub-directory +of the matplotlib test directory. In the event that it is not, then the +matplotlib root test directory will be used and all appropriate test cases +will be run. + +This will not search outside of the test structure and will not look in +the mplTest module. This will only search for test cases in the root +test directory and any of its sub-directories. + += Saving Results + +When using the keep flag any generated files in the 'output' directory +are copied to the 'saved-results/<classname>' directory, where <classname> +is the name of the unit-test class. This means that for each test case +within a given test class, all output files should have unique names. + +The 'saved-results' directory will always contain the results from the +last test run. This is considered a volatile directory since running +the test cases without the '--keep' flag will remove any existing +'saved-results' directory. This is to ensure the integrity of the +saved results, they will always match the last test run. + += Filtering Tests + +In the case of filtering via tags, a unit-test cane have multiple tags. +When running the test program if any tags are specified as 'skip' then +this will take precedence over any tags that might say 'process'. For +example, if a test case has both the 'gui' and 'qt' tag, but the command- +line is specified with the following flags: + '--with-tag=gui --without-tag=qt' +then the example test case will not be run because it matches the skip +tag. + + +===== Directory Structure + +There are several directories in the matplotlib test structure. The first +directory is the 'mplTest' directory. This is the matplotlib test module +and contains the various python scripts that the test harness needs to +run. The remaining directories are as follows and contain the various test +cases for matplotlib. + +mplTest + This directory does not contain any test cases, rather it is the location + of the matplotlib specific utilities for performing unit tests. + +test_artists + This directory contains tests that focus on the rendering aspects of + the various artists. Essentially the artist derived functionality. + +test_backends + This directory contains various tests that focus on making sure the + various backend targets work. + +test_basemap + This directory contains test cases that excercise the basemap add-on + module. + +test_cxx + This directoy contains tests that focus on testing the interface of + the compiled code contained in matplotlib. + +test_mathtext + This directory contains tests that focus on excercising the mathtext + sub-system. + +test_numerix + This directory contains tests that focus on validating the numerix + component. + +test_plots + This directory contains tests that validate the various plot funtions. + +test_pylab + This directory has pylab specific test cases. + +test_transforms + This directory has test cases that focus on testing the various + transformation and projection functions. + +test_matplotlib + This directory has all other test cases. This contins test that focus + on making sure that Axis, Axes, Figure, etc are all acting properly. This + has test cases that are general to the overall funtionality of matplotlib. + + +===== Writing Test Cases + += The Test Case + +As per the nose implementation, a test case is ultimately any function that +has the phrase 'test' in its name. The matplotlib cases however are grouped +into directories, by what is being tested, and from there are grouped into +classes (one class per file), by similarity. + +It is desireable that all matplotlib tests follow the same structure to +not only facilitate the writing of test cases, but to make things easier +for maintaining them and keeping things uniform. + +There is a class 'MplTestCase' provided to be the base class for all matplotlib +test classes. This class provides some extra functionality in the form of +verification functions and test data management. + += Comparison Functions + +There are several methods provided for testing whether or not a particular +test case should fail or succeed. The following methods are provided by +the base matplotlib test class: + +- MplTestCase.checkEq( expected, actual, msg = "" ) + Fail if the values are not equal, with the given message. + +- MplTestCase.checkNeq( expected, actual, msg = "" ) + Fail if the values are equal, with the given message. + +- MplTestCase.checkClose( expected, actual, relTol=None, absTol=None, msg="" ) + Fail if the floating point values are not close enough, with the given message. + You can specify a relative tolerance, absolute tolerance, or both. + +- MplTestCase.checkImage( filename, tol = 1.0e-3, msg = "" ) + Check to see if the image is similair to the one stored in the baseline + directory. filename can be a fully qualified name (via the 'outFile' method), + or it can be the name of the file (to be passed into the 'outFile' method). + The default tolerance is typically fine, but might need to be adjusted in some + cases (see the 'compareImages' function for more details). Fails with + the specified message. + +Note that several of the tests will perform image comparison for validation +of a specific plot. Though not 100% accurate it at least flags potential +failures and signals a human to come and take a closer look. If an image has +changed and after a human deems the change is acceptable, then updating the +baseline image with the appropriate image from the 'saved-results' directory +(when using the '--keep' or '--keep-failed' command-line arguments) will make +the test pass properly. + +Image comparison depends on the python imaging library (PIL) being installed. +If PIL is not installed, then any test cases that rely on it will not +pass. To not run these test cases, then pass the '--without-tag=PIL' +option on the command-line. + += Directories + +Input data files for a given test case should be place in a directory +called 'inputs' with the test case that uses it. A convienence function +is provided with each test class for accessing input files. + +For example if a test case has an input file of the name 'inputs.txt' +you can get the path to the file by calling 'self.inFile("inputs.txt")'. +This is to allow for a uniform convention that all test cases can follow. + +Output files are handled just like input files with the exception that +they are written to the 'output' directory and the path name can be +had by calling 'self.outFile'. It is more important to use this mechanism +for getting the pathname for an output file because it allows for the +management of cleaning up and saving generated output files (It also +significantly reduces the probability of typo errors when specifying +where to place the files). + +A Third and final directory used by the test cases is the 'baseline' +directory. This is where data files used for verifying test results +are stored. The path name can be had by using the 'self.baseFile' +method. + +Accessing these directories can be made simple (and reduce the chance of a +typo) via the following MplTestCase methods: + +- MplTestCase.inFile( filename ) + Returns the full pathname of filename in the input data directory. + +- MplTestCase.outFile( filename ) + Returns the full pathname of filename in the output data directory. + +- MplTestCase.baseFile( filename ) + Returns the full pathname of filename in the baseline data directory. + += Units + +Located in the mplTest directory is a set of unit classes. These classes +are provided for testing the various unitized data interfaces that matplotlib +supports (ie unit conversion). These are used because they provide a very +strict enforcement of unitized data which will test the entire spectrum of how +unitized data might be used (it is not always meaningful to convert to +a float without specific units given). This allows us to test for cases that +might accidentally be performing operations that really do not make sense +physically for unitized data. + +The provided classes are as follows: +- UnitDbl + UnitDbl is essentially a unitized floating point number. It has a + minimal set of supported units (enough for testing purposes). All + of the mathematical operation are provided to fully test any behaviour + that might occur with unitized data. Remeber that unitized data has + rules as to how it can be applied to one another (a value of distance + cannot be added to a value of time). Thus we need to guard against any + accidental "default" conversion that will strip away the meaning of the + data and render it neutered. + +- Epoch + Epoch is different than a UnitDbl of time. Time is something that can be + measured where an Epoch is a specific moment in time. Epochs are typically + referenced as an offset from some predetermined epoch. Conceptally an Epoch + is like saying 'January 1, 2000 at 12:00 UTC'. It is a specific + time, but more importantly it is a time with a frame. In the example + the frame is 'UTC'. This class is provided to test the functionality of + matplotlib's various routines and mechanisms for dealing with datetimes. + +- Duration + A difference of two epochs is a Duration. The distinction between a + Duration and a UnitDbl of time is made because an Epoch can have different + frames (or units). In the case of our test Epoch class the two allowed + frames are 'UTC' and 'ET' (Note that these are rough estimates provided for + testing purposes and should not be used in production code where accuracy + of time frames is desired). As such a Duration also has a frame of + reference and therefore needs to be called out as different that a simple + measurement of time since a delta-t in one frame may not be the same in another. + Property changes on: trunk/matplotlib/test/README.txt ___________________________________________________________________ Added: svn:eol-style + LF Added: trunk/matplotlib/test/mplTest/MplNosePlugin.py =================================================================== --- trunk/matplotlib/test/mplTest/MplNosePlugin.py (rev 0) +++ trunk/matplotlib/test/mplTest/MplNosePlugin.py 2009-02-18 19:44:29 UTC (rev 6922) @@ -0,0 +1,836 @@ +#======================================================================= + +import os +import sys +import shutil +import os.path +import optparse + +import nose.case +from nose.plugins import Plugin + +from path_utils import * +import directories as dirs +from MplTestCase import MplTestCase + +#======================================================================= + +__all__ = [ 'MplNosePlugin' ] + +#======================================================================= +def getInstance( test ): + """Given a nose test case, will return the actual unit test instance. + + We do this with a function call in case the method for getting the + actual unit test instance needs to change. + """ + assert isinstance( test, nose.case.Test ) + + if isinstance( test.test, nose.case.MethodTestCase ): + return test.test.inst + elif isinstance( test.test, nose.case.FunctionTestCase ): + return test.test.test + # elif isinstance( test.test, unittest.TestCase ): + else: + return test.test + + +#======================================================================= +class MplNosePlugin( Plugin ): + + enabled = True + name = "MplNosePlugin" + score = 0 + + KEEP_NONE = 0 + KEEP_FAIL = 1 + KEEP_ALL = 2 + + TEST_ERRORED = -1 + TEST_FAILED = 0 + TEST_PASSED = 1 + + #-------------------------------------------------------------------- + # Some 'property' functions + def getRootDir( self ): + # The bottom directory of the stack is the root directory. + return self.dirStack[0] + + def getInputDir( self ): + return os.path.join( self.currentDir, dirs.inputDirName ) + + def getOutputDir( self ): + return os.path.join( self.currentDir, dirs.outputDirName ) + + def getBaselineRootDir( self ): + return os.path.join( self.currentDir, dirs.baselineDirName ) + + def getSaveRootDir( self ): + return os.path.join( self.currentDir, dirs.saveDirName ) + + rootDir = property( getRootDir ) + inputDir = property( getInputDir ) + outputDir = property( getOutputDir ) + baselineRootDir = property( getBaselineRootDir ) + saveRootDir = property( getSaveRootDir ) + + def getBaselineDir( self, test ): + t = getInstance( test ) + return os.path.join( self.baselineRootDir, t.__class__.__name__ ) + + def getSaveDir( self, test ): + t = getInstance( test ) + return os.path.join( self.saveRootDir, t.__class__.__name__ ) + + #-------------------------------------------------------------------- + def saveResults( self, test ): + """Save the output directory for the gived test.""" + saveDir = self.getSaveDir( test ) + if not os.path.exists( saveDir ): + mkdir( saveDir, recursive = True ) + + outDir = getInstance( test ).outputDir + + for fname in walk( outDir ): + if os.path.isdir( fname ): + shutil.copytree( fname, saveDir ) + else: + shutil.copy( fname, saveDir ) + + #-------------------------------------------------------------------- + def filterTestItem( self, item ): + """Return true if you want the main test selector to collect tests from + this class, false if you don't, and None if you don't care. + + Parameters: + item : An instance of the testable item that has a 'tag' attribute. + """ + + reallyWant = False + reallyDontWant = False + + if hasattr( item, 'tags' ): + itemTags = item.tags + else: + itemTags = [] + + for tag in self.skipTags: + if tag in itemTags: + reallyDontWant = True + break + + for tag in self.includeTags: + if tag in itemTags: + reallyWant = True + else: + reallyDontWant = True + break + + if self.includeTags and not itemTags: + reallyDontWant = True + + if reallyDontWant: + return False + if reallyWant: + return True + + return None + + #-------------------------------------------------------------------- + def addError( self, test, err ): + """Called when a test raises an uncaught exception. DO NOT return a value + unless you want to stop other plugins from seeing that the test has + raised an error. + + Parameters: + test : nose.case.Test + the test case + err : 3-tuple + sys.exc_info() tuple + """ + self.testResults.append( (test, self.TEST_ERRORED, err) ) + + #-------------------------------------------------------------------- + def addFailure( self, test, err ): + """Called when a test fails. DO NOT return a value unless you want to + stop other plugins from seeing that the test has failed. + + Parameters: + test : nose.case.Test + the test case + err : 3-tuple + sys.exc_info() tuple + """ + self.testResults.append( (test, self.TEST_FAILED, err) ) + + #-------------------------------------------------------------------- + def addSuccess( self, test ): + """Called when a test passes. DO NOT return a value unless you want to + stop other plugins from seeing the passing test. + + Parameters: + test : nose.case.Test + the test case + """ + self.testResults.append( (test, self.TEST_PASSED, None) ) + + #-------------------------------------------------------------------- + def afterContext( self ): + """Called after a context (generally a module) has been lazy-loaded, + imported, setup, had its tests loaded and executed, and torn down. + """ + return None + + #-------------------------------------------------------------------- + def afterDirectory( self, path ): + """Called after all tests have been loaded from directory at path and run. + + Parameters: + path : string + the directory that has finished processing + """ + # Set the current directory to the previous directory + self.currentDir = self.dirStack.pop() + chdir( self.currentDir ) + return None + + #-------------------------------------------------------------------- + def afterImport( self, filename, module ): + """Called after module is imported from filename. afterImport is called + even if the import failed. + + Parameters: + filename : string + The file that was loaded + module : string + The name of the module + """ + return None + + #-------------------------------------------------------------------- + def afterTest( self, test ): + """Called after the test has been run and the result recorded + (after stopTest). + + Parameters: + test : nose.case.Test + the test case + """ + return None + + #-------------------------------------------------------------------- + def beforeContext( self ): + """Called before a context (generally a module) is examined. Since the + context is not yet loaded, plugins don't get to know what the + context is; so any context operations should use a stack that is + pushed in beforeContext and popped in afterContext to ensure they + operate symmetrically. + + beforeContext and afterContext are mainly useful for tracking and + restoring global state around possible changes from within a + context, whatever the context may be. If you need to operate on + contexts themselves, see startContext and stopContext, which are + passed the context in question, but are called after it has been + loaded (imported in the module case). + """ + return None + + #-------------------------------------------------------------------- + def beforeDirectory( self, path ): + """Called before tests are loaded from directory at path. + + Parameters: + path : string + the directory that is about to be processed + """ + # Save the cuurent directory and set to the new directory. + self.dirStack.append( self.currentDir ) + self.currentDir = path + chdir( self.currentDir ) + + # Remove any existing 'saved-results' directory + #NOTE: We must do this after setting 'self.currentDir' + rmdir( self.saveRootDir ) + + return None + + #-------------------------------------------------------------------- + def beforeImport( self, filename, module ): + """Called before module is imported from filename. + + Parameters: + filename : string + The file that will be loaded + module : string + The name of the module found in file + """ + return None + + #-------------------------------------------------------------------- + def beforeTest( self, test ): + """Called before the test is run (before startTest). + + Parameters: + test : nose.case.Test + the test case + """ + return None + + #-------------------------------------------------------------------- + def begin( self ): + """Called before any tests are collected or run. Use this to perform + any setup needed before testing begins. + """ + return None + + #-------------------------------------------------------------------- + def configure( self, options, conf ): + """Called after the command line has been parsed, with the parsed + options and the config container. Here, implement any config + storage or changes to state or operation that are set by command + line options. + + Do not return a value from this method unless you want to stop all + other plugins from being configured. + """ + self.includeTags = [ t for t in options.mpl_process_tags ] + self.skipTags = [ t for t in options.mpl_skip_tags ] + self.keepLevel = options.mpl_keep + + self.currentDir = os.getcwd() + self.dirStack = [] + + self.testResults = [] + + #-------------------------------------------------------------------- + def describeTest( self, test ): + """Return a test description. Called by nose.case.Test.shortDescription. + + Parameters: + test : nose.case.Test + the test case + """ + return None + + #-------------------------------------------------------------------- + def finalize( self, result ): + """Called after all report output, including output from all plugins, + has been sent to the stream. Use this to print final test results + or perform final cleanup. Return None to allow other plugins to + continue printing, any other value to stop them. + + Note + When tests are run under a test runner other than + nose.core.TextTestRunner, for example when tests are run via + 'python setup.py test', this method may be called before the default + report output is sent. + """ + return None + + #-------------------------------------------------------------------- + def formatError( self, test, err ): + """Called in result.addError, before plugin.addError. If you want to + replace or modify the error tuple, return a new error tuple. + + Parameters: + test : nose.case.Test + the test case + err : 3-tuple + sys.exc_info() tuple + """ + return err + + #-------------------------------------------------------------------- + def formatFailure( self, test, err ): + """Called in result.addFailure, before plugin.addFailure. If you want to + replace or modify the error tuple, return a new error tuple. Since + this method is chainable, you must return the test as well, so you + you'll return something like: + return (test, err) + + Parameters: + test : nose.case.Test + the test case + err : 3-tuple + sys.exc_info() tuple + """ + return None + + #-------------------------------------------------------------------- + def handleError( self, test, err ): + """Called on addError. To handle the error yourself and prevent normal + error processing, return a true value. + + Parameters: + test : nose.case.Test + the test case + err : 3-tuple + sys.exc_info() tuple + """ + if (self.keepLevel == self.KEEP_FAIL) or (self.keepLevel == self.KEEP_ALL): + self.saveResults( test ) + + return None + + #-------------------------------------------------------------------- + def handleFailure( self, test, err ): + """Called on addFailure. To handle the failure yourself and prevent + normal failure processing, return a true value. + + Parameters: + test : nose.case.Test + the test case + err : 3-tuple + sys.exc_info() tuple + """ + if (self.keepLevel == self.KEEP_FAIL) or (self.keepLevel == self.KEEP_ALL): + self.saveResults( test ) + + return None + + #-------------------------------------------------------------------- + def loadTestsFromDir( self, path ): + """Return iterable of tests from a directory. May be a generator. + Each item returned must be a runnable unittest.TestCase + (or subclass) instance or suite instance. Return None if your + plugin cannot collect any tests from directory. + + Parameters: + path : string + The path to the directory. + """ + return None + + #-------------------------------------------------------------------- + def loadTestsFromFile( self, filename ): + """Return tests in this file. Return None if you are not interested in + loading any tests, or an iterable if you are and can load some. May + be a generator. If you are interested in loading tests from the file + and encounter no errors, but find no tests, yield False or + return [False]. + + Parameters: + filename : string + The full path to the file or directory. + """ + return None + + #-------------------------------------------------------------------- + def loadTestsFromModule( self, module ): + """Return iterable of tests in a module. May be a generator. Each + item returned must be a runnable unittest.TestCase (or subclass) + instance. Return None if your plugin cannot collect any tests + from module. + + Parameters: + module : python module + The module object + """ + return None + + #-------------------------------------------------------------------- + def loadTestsFromName( self, name, module=None, importPath=None ): + """Return tests in this file or module. Return None if you are not able + to load any tests, or an iterable if you are. May be a generator. + + Parameters: + name : string + The test name. May be a file or module name plus a test + callable. Use split_test_name to split into parts. Or it might + be some crazy name of your own devising, in which case, do + whatever you want. + module : python module + Module from which the name is to be loaded + """ + return None + + #-------------------------------------------------------------------- + def loadTestsFromNames( self, names, module=None ): + """Return a tuple of (tests loaded, remaining names). Return None if you + are not able to load any tests. Multiple plugins may implement + loadTestsFromNames; the remaining name list from each will be passed + to the next as input. + + Parameters: + names : iterable + List of test names. + module : python module + Module from which the names are to be loaded + """ + return None + + #-------------------------------------------------------------------- + def loadTestsFromTestCase( self, cls ): + """Return tests in this test case class. Return None if you are not able + to load any tests, or an iterable if you are. May be a generator. + + Parameters: + cls : class + The test case class. Must be subclass of unittest.TestCase. + """ + return None + + #-------------------------------------------------------------------- + def loadTestsFromTestClass( self, cls ): + """Return tests in this test class. Class will not be a unittest.TestCase + subclass. Return None if you are not able to load any tests, an + iterable if you are. May be a generator. + + Parameters: + cls : class + The test class. Must NOT be subclass of unittest.TestCase. + """ + return None + + #-------------------------------------------------------------------- + def makeTest( self, obj, parent ): + """Given an object and its parent, return or yield one or more test + cases. Each test must be a unittest.TestCase (or subclass) instance. + This is called before default test loading to allow plugins to load + an alternate test case or cases for an object. May be a generator. + + Parameters: + obj : any object + The object to be made into a test + parent : class, module or other object + The parent of obj (eg, for a method, the class) + """ + return None + + #-------------------------------------------------------------------- + def options( self, parser, env = os.environ ): + """Called to allow plugin to register command line options with the parser. + + Do not return a value from this method unless you want to stop all other + plugins from setting their options. + + NOTE: By default, parser is a Python optparse.OptionParser instance. + """ + helpMsg = "The following are options specific to the matplotlib test harness" + group = optparse.OptionGroup( parser, "Matplotlib Options", helpMsg ) + + # Options to handle tags + helpMsg = "Will only run test cases that have the specified tag. Each " + helpMsg += "test case should have a 'tag' attribute (if a case does not h" + helpMsg += "ave one, then it is assumed to be an empty list). The 'tag' " + helpMsg += "attribute is a list of strings, where each value is a " + helpMsg += "representative propery of the test case. Example tags are " + helpMsg += "'qt' or 'units'. This can be specified multiple times." + group.add_option( '-t', '--with-tag', + action = 'append', type = 'string', dest = 'mpl_process_tags', + default = [], metavar = 'TAG', help = helpMsg ) + + helpMsg = "This will run those test cases that do not have the specified tags." + group.add_option( '--without-tag', + action = 'append', type = 'string', dest = 'mpl_skip_tags', + default = [], metavar = 'TAG', help = helpMsg ) + + + # Some Miscellaneous options + helpMsg = "This will remove all output files, saved results, and .pyc files. " + helpMsg += "If this is specified, no other processing will be performed." + group.add_option( '--clean', + action = "store_true", dest = "mpl_clean", + default = False, help = helpMsg ) + + helpMsg = "This will run all test programs regardless of working directory." + group.add_option( '--all', + action = "store_true", dest = "mpl_all", + default = False, help = helpMsg ) + + + # Options to handle generated data files + helpMsg = "Keep any generated output files in a directory called " + helpMsg += "'saved-results'. This directory will be created if it " + helpMsg += "doesn't already exist. This directory is in the same " + helpMsg += "location as the test case whose results are being saved." + group.add_option( '--keep', + action = "store_const", dest = "mpl_keep", + default = self.KEEP_NONE, const = self.KEEP_ALL, help = helpMsg ) + + helpMsg = "This acts just like '--keep' except will only keeps the results " + helpMsg += "from tests that error or fail." + group.add_option( '--keep-failed', + action = "store_const", dest = "mpl_keep", + default = self.KEEP_NONE, const = self.KEEP_FAIL, help = helpMsg ) + + + # Options to create a test case file + helpMsg = "Creates a template test case file in the current directory " + helpMsg += "with the name TestFoo. Where 'Foo' is the provided test name." + group.add_option( '--make-test', + action = 'store', dest = 'mpl_make_test', + default = False, metavar = 'testName', help = helpMsg ) + + + parser.add_option_group( group ) + + #-------------------------------------------------------------------- + def prepareTest( self, test ): + """Called before the test is run by the test runner. Please note the + article the in the previous sentence: prepareTest is called only once, + and is passed the test case or test suite that the test runner will + execute. It is not called for each individual test case. If you return + a non-None value, that return value will be run as the test. Use this + hook to wrap or decorate the test with another function. If you need + to modify or wrap individual test cases, use prepareTestCase instead. + + Parameters: + test : nose.case.Test + the test case + """ + return None + + #-------------------------------------------------------------------- + def prepareTestCase( self, test ): + """Prepare or wrap an individual test case. Called before execution of + the test. The test passed here is a nose.case.Test instance; the case + to be executed is in the test attribute of the passed case. To modify + the test to be run, you should return a callable that takes one + argument (the test result object) -- it is recommended that you do not + side-effect the nose.case.Test instance you have been passed. + + Keep in mind that when you replace the test callable you are replacing + the run() method of the test case -- including the exception handling + and result calls, etc. + + Parameters: + test : nose.case.Test + the test case + """ + # Save the dir names in the test class instance to make it available + # to the individual test cases. + t = getInstance( test ) + t.inputDir = self.inputDir + t.outputDir = self.outputDir + t.baselineDir = self.getBaselineDir( test ) + t.workingDir = self.currentDir + + return None + + #-------------------------------------------------------------------- + def prepareTestLoader( self, loader ): + """Called before tests are loaded. To replace the test loader, return a + test loader. To allow other plugins to process the test loader, + return None. Only one plugin may replace the test loader. Only valid + when using nose.TestProgram. + + Parameters: + loader : nose.loader.TestLoader or other loader instance + the test loader + """ + return None + + #-------------------------------------------------------------------- + def prepareTestResult( self, result ): + """Called before the first test is run. To use a different test result + handler for all tests than the given result, return a test result + handler. NOTE however that this handler will only be seen by tests, + that is, inside of the result proxy system. The TestRunner and + TestProgram -- whether nose's or other -- will continue to see the + original result handler. For this reason, it is usually better to + monkeypatch the result (for instance, if you want to handle some + exceptions in a unique way). Only one plugin may replace the result, + but many may monkeypatch it. If you want to monkeypatch and stop + other plugins from doing so, monkeypatch and return the patched result. + + Parameters: + result : nose.result.TextTestResult or other result instance + the test result + """ + return None + + #-------------------------------------------------------------------- + def prepareTestRunner( self, runner ): + """Called before tests are run. To replace the test runner, return a + test runner. To allow other plugins to process the test runner, + return None. Only valid when using nose.TestProgram. + + Parameters: + runner : nose.core.TextTestRunner or other runner instance + the test runner + """ + return None + + #-------------------------------------------------------------------- + def report( self, stream ): + """Called after all error output has been printed. Print your plugin's + report to the provided stream. Return None to allow other plugins to + print reports, any other value to stop them. + + Parameters: + stream : file-like object + stream object; send your output here + """ + return None + + #-------------------------------------------------------------------- + def setOutputStream( self, stream ): + """Called before test output begins. To direct test output to a new + stream, return a stream object, which must implement a write(msg) + method. If you only want to note the stream, not capture or redirect + it, then return None. + + Parameters: + stream : file-like object + the original output stream + """ + return None + + #-------------------------------------------------------------------- + def startContext( self, context ): + """Called before context setup and the running of tests in the context. + Note that tests have already been loaded from the context before this call. + + Parameters: + context : module, class or other object + the context about to be setup. May be a module or class, or + any other object that contains tests. + """ + return None + + #-------------------------------------------------------------------- + def startTest( self, test ): + """Called before each test is run. DO NOT return a value unless you want + to stop other plugins from seeing the test start. + + Parameters: + test : nose.case.Test + the test case + """ + # make sure there is a fresh output directory to use. + rmdir( self.outputDir ) + mkdir( self.outputDir, recursive = True ) + + # sys.stdout.write( "%s\n %s \n" % (test.id(), test.shortDescription()) ) + print "%s" % (test.id()) + print " %s" % (test.shortDescription()) + + #-------------------------------------------------------------------- + def stopContext( self, context ): + """Called after the tests in a context have run and the context has been + torn down. + + Parameters: + context : module, class or other object + the context that has just been torn down. + """ + return None + + #-------------------------------------------------------------------- + def stopTest( self, test ): + """Called after each test is run. DO NOT return a value unless you want + to stop other plugins from seeing that the test has stopped. + + Parameters: + test : nose.case.Test + the test case + """ + assert test == self.testResults[-1][0] + + if self.keepLevel == self.KEEP_ALL: + self.saveResults( test ) + + # KEEP_FAIL is handled by the 'handleError' and 'handleFailed' methods. + + rmdir( self.outputDir ) + + #-------------------------------------------------------------------- + def testName( self, test ): + """Return a short test name. Called by nose.case.Test.__str__. + + Parameters: + test : nose.case.Test + the test case + """ + return None + + #-------------------------------------------------------------------- + def wantClass( self, cls ): + """Return true if you want the main test selector to collect tests from + this class, false if you don't, and None if you don't care. + + Parameters: + cls : class + The class being examined by the selector + """ + # Filter out classes that do not inherit from MplTestCase + if not issubclass( cls, MplTestCase ): + return False + + return self.filterTestItem( cls ) + + #-------------------------------------------------------------------- + def wantDirectory( self, dirname ): + """Return true if you want test collection to descend into this + directory, false if you do not, and None if you don't care. + + Parameters: + dirname : string + Full path to directory being examined by the selector + """ + # Skip the unit-test utility module. + if dirname == os.path.join( self.rootDir, 'mplTest' ): + return False + + return None + + #-------------------------------------------------------------------- + def wantFile( self, file ): + """Return true if you want to collect tests from this file, false if + you do not and None if you don't care. + + Parameters: + file : string + Full path to file being examined by the selector + """ + # Skip anything not under the root test directory + if self.rootDir not in file: + return False + + return None + + #-------------------------------------------------------------------- + def wantFunction( self, function ): + """Return true to collect this function as a test, false to prevent it + from being collected, and None if you don't care. + + Parameters: + function : function + The function object being examined by the selector + """ + #TODO: Filter out functions that exist outside of the test-structure + name = function.__name__.lower() + if "disabled" in name: return False + return self.filterTestItem( function ) + + #-------------------------------------------------------------------- + def wantMethod( self, method ): + """Return true to collect this method as a test, false to prevent it + from being collected, and None if you don't care. + + Parameters: + method : unbound method + The method object being examined by the selector + """ + #TODO: Filter out methods that exist outside of the test-structure + name = method.__name__.lower() + if "disabled" in name: return False + return self.filterTestItem( method ) + + #-------------------------------------------------------------------- + def wantModule( self, module ): + """Return true if you want to collection to descend into this module, + false to prevent the collector from descending into the module, and + None if you don't care. + + Parameters: + module : python module + The module object being examined by the selector + """ + #TODO: Filter out modules that exist outside of the test-structure + name = module.__name__.lower() + if "disabled" in name: return False + return self.filterTestItem( module ) + + Added: trunk/matplotlib/test/mplTest/MplTestCase.py =================================================================== --- trunk/matplotlib/test/mplTest/MplTestCase.py (rev 0) +++ trunk/matplotlib/test/mplTest/MplTestCase.py 2009-02-18 19:44:29 UTC (rev 6922) @@ -0,0 +1,117 @@ +#======================================================================= +"""Defines the base matplotlib test-case.""" +#======================================================================= + +import os +import os.path +import unittest + +import compare +import path_utils + +#======================================================================= + +__all__ = [ 'MplTestCase' ] + +#======================================================================= +class MplTestCase( unittest.TestCase ): + """This is the base class for the matplotlib unit-tests. + + It provides a few utility functions for accessing managed directories: + - inputs - All input files for the test case are stored here. + - outputs - All output files for the test case are written here. + - baseline - All baseline files (those used for verifying results) for + athe test case are stored here. + """ + #-------------------------------------------------------------------- + def inFile( self, fname ): + """Returns the pathname of the specified input file.""" + return os.path.join( self.inputDir, fname ) + + def outFile( self, fname ): + """Returns the pathname of the specified output file.""" + return os.path.join( self.outputDir, fname ) + + def baseFile( self, fname ): + """Returns the pathname of the specified basline file.""" + return os.path.join( self.baselineDir, fname ) + + #-------------------------------------------------------------------- + def checkImage( self, outfname, tol = 1.0e-3, msg = "" ): + """Check to see if the image is similair to one stored in the + baseline directory. + """ + if self.outputDir in outfname: + # We are passed the path name and just want the file name. + actualImage = outfname + basename = path_utils.name( outfname ) + else: + basename = outfname + actualImage = self.outFile( basename ) + + baselineImage = self.baseFile( basename ) + + errorMessage = compare.compareImages( baselineImage, actualImage, tol ) + + if errorMessage: + self.fail( msg + "\n" + errorMessage ) + + #-------------------------------------------------------------------- + def checkEq( expected, actual, msg = "" ): + """Fail if the values are not equal, with the given message.""" + if not expected == actual: + expectedStr = str( expected ) + actualStr = str( actual ) + isMultiLine = ( "\n" in expectedStr or "\n" in actualStr or + len( expectedStr ) > 70 or len( actualStr ) > 70 ) + + if isMultiLine: + if msg: + msg += "\n\n" + msg += "Expected:\n" + msg += expectedStr + "\n\n" + msg += "Actual:\n" + msg += actualStr + "\n" + else: + if msg: + msg += "\n" + msg += " Expected: " + expectedStr + "\n" + msg += " Actual: " + actualStr + "\n" + + self.fail( msg ) + + #-------------------------------------------------------------------- + def checkNeq( expected, actual, msg = "" ): + """Fail is the values are equal, with the given message.""" + if expected == actual: + expectedStr = str( expected ) + isMultiLine = ( "\n" in expectedStr or len( expectedStr ) > 55 ) + + if isMultiLine: + if msg: + msg += "\n\n" + msg += "Expected and actual should not be equal.\n" + msg += "Expected and actual:\n" + msg += expectedStr + "\n" + else: + if msg: + msg += "\n" + msg += " Expected and actual should not be equal.\n" + msg += " Expected and actual: " + expectedStr + "\n" + + self.fail( msg ) + + #-------------------------------------------------------------------- + def checkClose( expected, actual, relTol = None, absTol = None, msg = "" ): + """Fail if the floating point values are not close enough, with + the givem message. + + You can specify a relative tolerance, absolute tolerance, or both. + """ + errorMessage = compare.compareFloat( expected, actual, relTol, absTol ) + + if errorMessage: + self.fail( msg + "\n" + errorMessage ) + + #-------------------------------------------------------------------- + Added: trunk/matplotlib/test/mplTest/TestTEMPLATE.py =================================================================== --- trunk/matplotlib/test/mplTest/TestTEMPLATE.py (rev 0) +++ trunk/matplotlib/test/mplTest/TestTEMPLATE.py 2009-02-18 19:44:29 UTC (rev 6922) @@ -0,0 +1,62 @@ +#======================================================================= +"""The UNITTEST unit-test class implementation.""" +#======================================================================= + +from mplTest import * + +#======================================================================= +# Add import modules below. +import matplotlib +matplotlib.use( "Agg", warn = False ) + +import pylab +import numpy as npy +# +#======================================================================= + +#======================================================================= +class TestUNITTEST( MplTestCase ): + """UNITTEST unit test class.""" + + # Uncomment any appropriate tags + tags = [ + # 'gui', # requires the creation of a gui window + # 'agg', # uses agg in the backend + # 'agg-only', # uses only agg in the backend + # 'wx', # uses wx in the backend + # 'qt', # uses qt in the backend + # 'ps', # uses the postscript backend + # 'pdf', # uses the PDF backend + # 'units', # uses units in the test + # 'PIL', # uses PIL for image comparison + ] + + #-------------------------------------------------------------------- + def setUp( self ): + """Setup any data needed for the unit test.""" + #TODO: Put set-up code here + pass + + #-------------------------------------------------------------------- + def tearDown( self ): + """Clean-up any generated files here.""" + #TODO: Put clean-up code here + pass + + #-------------------------------------------------------------------- + def test_case_001( self ): + """TODO: A very brief description of the test case.""" + #TODO: Put test-case code here + + fname = self.outFile( "test_case_001a" ) + fout = open( fname, 'w' ) + fout.write( "A UNITTEST.test_case_001 output file.\n" ) + fout.close() + + fname = self.outFile( "test_case_001b" ) + fout = open( fname, 'w' ) + fout.write( "Another UNITTEST.test_case_001 output file.\n" ) + fout.close() + + pass + Added: trunk/matplotlib/test/mplTest/__init__.py =================================================================== --- trunk/matplotlib/test/mplTest/__init__.py (rev 0) +++ trunk/matplotlib/test/mplTest/__init__.py 2009-02-18 19:44:29 UTC (rev 6922) @@ -0,0 +1,13 @@ + +""" +A matplotlib unit test module. This module provides several utilities for +performing unit-tests on matplotlib. Theis module depends on a properly +installed version of 'nose'. +""" + +from directories import * + +from mplTest.MplNosePlugin import MplNosePlugin +from mplTest.MplTestCase import MplTestCase + +import mplTest.units as units Added: trunk/matplotlib/test/mplTest/compare.py =================================================================== --- trunk/matplotlib/test/mplTest/compare.py (rev 0) +++ trunk/matplotlib/test/mplTest/compare.py 2009-02-18 19:44:29 UTC (rev 6922) @@ -0,0 +1,121 @@ +#======================================================================= +""" A set of utilities for comparing results. +""" +#======================================================================= + +import math +import operator + +#======================================================================= + +__all__ = [ + 'compareFloat', + 'compareImages', + ] + +#----------------------------------------------------------------------- +def compareFloat( expected, actual, relTol = None, absTol = None ): + """Fail if the floating point values are not close enough, with + the givem message. + + You can specify a relative tolerance, absolute tolerance, or both. + """ + if relTol is None and absTol is None: + exMsg = "You haven't specified a 'relTol' relative tolerance " + exMsg += "or a 'absTol' absolute tolerance function argument. " + exMsg += "You must specify one." + raise ValueError, exMsg + + msg = "" + + if absTol is not None: + absDiff = abs( expected - actual ) + if absTol < absDiff: + expectedStr = str( expected ) + actualStr = str( actual ) + absDiffStr = str( absDiff ) + absTolStr = str( absTol ) + + msg += "\n" + msg += " Expected: " + expectedStr + "\n" + msg += " Actual: " + actualStr + "\n" + msg += " Abs Diff: " + absDiffStr + "\n" + msg += " Abs Tol: " + absTolStr + "\n" + + if relTol is not None: + # The relative difference of the two values. If the expected value is + # zero, then return the absolute value of the difference. + relDiff = abs( expected - actual ) + if expected: + relDiff = relDiff / abs( expected ) + + if relTol < relDiff: + + # The relative difference is a ratio, so it's always unitless. + relDiffStr = str( relDiff ) + relTolStr = str( relTol ) + + expectedStr = str( expected ) + actualStr = str( actual ) + + msg += "\n" + msg += " Expected: " + expectedStr + "\n" + msg += " Actual: " + actualStr + "\n" + msg += " Rel Diff: " + relDiffStr + "\n" + msg += " Rel Tol: " + relTolStr + "\n" + + if msg: + return msg + else: + return None + +#----------------------------------------------------------------------- +def compareImages( expected, ac... [truncated message content] |