From: <ds...@us...> - 2007-10-19 14:43:50
|
Revision: 3967 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=3967&view=rev Author: dsdale Date: 2007-10-19 07:43:21 -0700 (Fri, 19 Oct 2007) Log Message: ----------- removed a gsave/grestore pair surrounding _draw_ps Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/lib/matplotlib/backends/backend_ps.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-10-19 14:23:48 UTC (rev 3966) +++ trunk/matplotlib/CHANGELOG 2007-10-19 14:43:21 UTC (rev 3967) @@ -1,3 +1,8 @@ +2007-10-19 Removed a gsave/grestore pair surrounding _draw_ps, which + was causing a loss graphics state info (see "EPS output + problem - scatter & edgecolors" on mpl-dev, 2007-10-29) + - DSD + 2007-10-15 Fixed a bug in patches.Ellipse that was broken for aspect='auto'. Scale free ellipses now work properly for equal and auto on Agg and PS, and they fall back on a Modified: trunk/matplotlib/lib/matplotlib/backends/backend_ps.py =================================================================== --- trunk/matplotlib/lib/matplotlib/backends/backend_ps.py 2007-10-19 14:23:48 UTC (rev 3966) +++ trunk/matplotlib/lib/matplotlib/backends/backend_ps.py 2007-10-19 14:43:21 UTC (rev 3967) @@ -915,7 +915,6 @@ """ # local variable eliminates all repeated attribute lookups write = self._pswriter.write - write('gsave\n') if debugPS and command: write("% "+command+"\n") @@ -949,7 +948,6 @@ write("stroke\n") if cliprect: write("grestore\n") - write('grestore\n') def push_gc(self, gc, store=1): """ This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jd...@us...> - 2007-10-21 16:02:11
|
Revision: 3975 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=3975&view=rev Author: jdh2358 Date: 2007-10-21 09:02:07 -0700 (Sun, 21 Oct 2007) Log Message: ----------- added a show method for gtk fig manager Modified Paths: -------------- trunk/matplotlib/lib/matplotlib/__init__.py trunk/matplotlib/lib/matplotlib/backends/backend_gtk.py trunk/matplotlib/setup.py trunk/matplotlib/setupext.py Modified: trunk/matplotlib/lib/matplotlib/__init__.py =================================================================== --- trunk/matplotlib/lib/matplotlib/__init__.py 2007-10-20 21:57:26 UTC (rev 3974) +++ trunk/matplotlib/lib/matplotlib/__init__.py 2007-10-21 16:02:07 UTC (rev 3975) @@ -55,7 +55,7 @@ """ from __future__ import generators -NEWCONFIG = True +NEWCONFIG = False __version__ = '0.90.1' __revision__ = '$Revision$' Modified: trunk/matplotlib/lib/matplotlib/backends/backend_gtk.py =================================================================== --- trunk/matplotlib/lib/matplotlib/backends/backend_gtk.py 2007-10-20 21:57:26 UTC (rev 3974) +++ trunk/matplotlib/lib/matplotlib/backends/backend_gtk.py 2007-10-21 16:02:07 UTC (rev 3975) @@ -390,7 +390,7 @@ self.window = gtk.Window() self.window.set_title("Figure %d" % num) - + self.vbox = gtk.VBox() self.window.add(self.vbox) self.vbox.show() @@ -439,7 +439,10 @@ gtk.main_level() >= 1: gtk.main_quit() - + def show(self): + # show the figure window + self.window.show() + def full_screen_toggle (self): self._full_screen_flag = not self._full_screen_flag if self._full_screen_flag: Modified: trunk/matplotlib/setup.py =================================================================== --- trunk/matplotlib/setup.py 2007-10-20 21:57:26 UTC (rev 3974) +++ trunk/matplotlib/setup.py 2007-10-21 16:02:07 UTC (rev 3975) @@ -24,8 +24,8 @@ # it. It makes very nice antialiased output and also supports alpha # blending BUILD_AGG = 1 -BUILD_GTKAGG = 'auto' -BUILD_GTK = 'auto' +BUILD_GTKAGG = 1 +BUILD_GTK = 1 # build TK GUI with Agg renderer ; requires Tkinter Python extension # and Tk includes @@ -271,7 +271,7 @@ # packagers: set rc['numerix'] and rc['backend'] here to override the auto # defaults, eg #rc['numerix'] = numpy -#rc['backend'] = GTKAgg +#rc['backend'] = 'GTKAgg' if sys.platform=='win32': rc = dict(backend='TkAgg', numerix='numpy') template = file('matplotlibrc.template').read() Modified: trunk/matplotlib/setupext.py =================================================================== --- trunk/matplotlib/setupext.py 2007-10-20 21:57:26 UTC (rev 3974) +++ trunk/matplotlib/setupext.py 2007-10-21 16:02:07 UTC (rev 3975) @@ -43,9 +43,10 @@ import os + basedir = { 'win32' : ['win32_static',], - 'linux2' : ['/usr/local', '/usr',], + 'linux2' : ['/usr/local', '/usr'], 'linux' : ['/usr/local', '/usr',], 'cygwin' : ['/usr/local', '/usr',], 'darwin' : ['/sw/lib/freetype2', '/sw/lib/freetype219', '/usr/local', @@ -170,6 +171,7 @@ if sys.platform == 'win32': has_pkgconfig.cache = False else: + #print 'environ', os.environ['PKG_CONFIG_PATH'] status, output = commands.getstatusoutput("pkg-config --help") has_pkgconfig.cache = (status == 0) return has_pkgconfig.cache @@ -192,6 +194,9 @@ status, output = commands.getstatusoutput( "%s %s %s" % (pkg_config_exec, flags, packages)) + #if packages.startswith('pygtk'): + # print 'status', status, output + # raise SystemExit if status == 0: for token in output.split(): attr = _flags.get(token[:2], None) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ef...@us...> - 2007-10-24 22:15:01
|
Revision: 3999 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=3999&view=rev Author: efiring Date: 2007-10-24 15:14:57 -0700 (Wed, 24 Oct 2007) Log Message: ----------- Added ax kwarg to pyplot.colorbar and Figure.colorbar Modified Paths: -------------- trunk/matplotlib/API_CHANGES trunk/matplotlib/CHANGELOG trunk/matplotlib/lib/matplotlib/colorbar.py trunk/matplotlib/lib/matplotlib/figure.py trunk/matplotlib/lib/matplotlib/pyplot.py Modified: trunk/matplotlib/API_CHANGES =================================================================== --- trunk/matplotlib/API_CHANGES 2007-10-24 19:42:49 UTC (rev 3998) +++ trunk/matplotlib/API_CHANGES 2007-10-24 22:14:57 UTC (rev 3999) @@ -1,6 +1,11 @@ - Changed cbook.reversed so it yields a tuple rather than a + Added ax kwarg to pyplot.colorbar and Figure.colorbar so that + one can specify the axes object from which space for the colorbar + is to be taken, if one does not want to make the colorbar axes + manually. + + Changed cbook.reversed so it yields a tuple rather than a (index, tuple). This agrees with the python reversed builtin, - and cbook only defines reversed if python doesnt provide the + and cbook only defines reversed if python doesnt provide the builtin. Made skiprows=1 the default on csv2rec Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-10-24 19:42:49 UTC (rev 3998) +++ trunk/matplotlib/CHANGELOG 2007-10-24 22:14:57 UTC (rev 3999) @@ -1,3 +1,5 @@ +2007-10-24 Added ax kwarg to Figure.colorbar and pyplot.colorbar - EF + 2007-10-19 Removed a gsave/grestore pair surrounding _draw_ps, which was causing a loss graphics state info (see "EPS output problem - scatter & edgecolors" on mpl-dev, 2007-10-29) @@ -12,7 +14,7 @@ unit/ellipse_compare.py to compare spline with vertex approx for both aspects. JDH -2007-10-05 remove generator expressions from texmanager and mpltraits. +2007-10-05 remove generator expressions from texmanager and mpltraits. generator expressions are not supported by python-2.3 - DSD 2007-10-01 Made matplotlib.use() raise an exception if called after Modified: trunk/matplotlib/lib/matplotlib/colorbar.py =================================================================== --- trunk/matplotlib/lib/matplotlib/colorbar.py 2007-10-24 19:42:49 UTC (rev 3998) +++ trunk/matplotlib/lib/matplotlib/colorbar.py 2007-10-24 22:14:57 UTC (rev 3999) @@ -70,21 +70,27 @@ colorbar_doc = ''' Add a colorbar to a plot. -Function signatures: +Function signatures for the pyplot interface; all but the first are +also method signatures for the Figure.colorbar method: colorbar(**kwargs) - colorbar(mappable, **kwargs) + colorbar(mappable, cax=cax, **kwargs) + colorbar(mappable, ax=ax, **kwargs) - colorbar(mappable, cax, **kwargs) + arguments: + mappable: the image, ContourSet, etc. to which the colorbar applies; + this argument is mandatory for the Figure.colorbar + method but optional for the pyplot.colorbar function, + which sets the default to the current image. -The optional arguments mappable and cax may be included in the kwargs; -they are image, ContourSet, etc. to which the colorbar applies, and -the axes object in which the colorbar will be drawn. Defaults are -the current image and a new axes object created next to that image -after resizing the image. + keyword arguments: + cax: None | axes object into which the colorbar will be drawn + ax: None | parent axes object from which space for a new + colorbar axes will be stolen -kwargs are in two groups: + +**kwargs are in two groups: axes properties: %s colorbar properties: Modified: trunk/matplotlib/lib/matplotlib/figure.py =================================================================== --- trunk/matplotlib/lib/matplotlib/figure.py 2007-10-24 19:42:49 UTC (rev 3998) +++ trunk/matplotlib/lib/matplotlib/figure.py 2007-10-24 22:14:57 UTC (rev 3999) @@ -769,9 +769,9 @@ self.canvas.print_figure(*args, **kwargs) - def colorbar(self, mappable, cax=None, **kw): - orientation = kw.get('orientation', 'vertical') - ax = self.gca() + def colorbar(self, mappable, cax=None, ax=None, **kw): + if ax is None: + ax = self.gca() if cax is None: cax, kw = cbar.make_axes(ax, **kw) cb = cbar.Colorbar(cax, mappable, **kw) Modified: trunk/matplotlib/lib/matplotlib/pyplot.py =================================================================== --- trunk/matplotlib/lib/matplotlib/pyplot.py 2007-10-24 19:42:49 UTC (rev 3998) +++ trunk/matplotlib/lib/matplotlib/pyplot.py 2007-10-24 22:14:57 UTC (rev 3999) @@ -1080,10 +1080,10 @@ from matplotlib.colorbar import colorbar_doc -def colorbar(mappable = None, cax=None,**kw): +def colorbar(mappable=None, cax=None, ax=None, **kw): if mappable is None: mappable = gci() - ret = gcf().colorbar(mappable, cax = cax, **kw) + ret = gcf().colorbar(mappable, cax = cax, ax=ax, **kw) draw_if_interactive() return ret colorbar.__doc__ = colorbar_doc This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <jd...@us...> - 2007-10-29 18:53:06
|
Revision: 4061 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4061&view=rev Author: jdh2358 Date: 2007-10-29 11:52:41 -0700 (Mon, 29 Oct 2007) Log Message: ----------- fixed some examples bugs Modified Paths: -------------- trunk/matplotlib/examples/keypress_demo.py trunk/matplotlib/examples/units/bar_unit_demo.py trunk/matplotlib/lib/matplotlib/axes.py trunk/matplotlib/lib/matplotlib/mlab.py trunk/matplotlib/lib/matplotlib/patches.py Modified: trunk/matplotlib/examples/keypress_demo.py =================================================================== --- trunk/matplotlib/examples/keypress_demo.py 2007-10-29 18:37:17 UTC (rev 4060) +++ trunk/matplotlib/examples/keypress_demo.py 2007-10-29 18:52:41 UTC (rev 4061) @@ -1,21 +1,23 @@ #!/usr/bin/env python """ Show how to connect to keypress events - -Note, on the wx backend on some platforms (eg linux), you have to -first click on the figure before the keypress events are activated. -If you know how to fix this, please email us! """ -from pylab import * +import numpy as n +from pylab import figure, show def press(event): print 'press', event.key - if event.key=='g': - grid() - draw() + if event.key=='x': + visible = xl.get_visible() + xl.set_visible(not visible) + fig.canvas.draw() -connect('key_press_event', press) +fig = figure() +ax = fig.add_subplot(111) -title('press g to toggle grid') -plot(rand(12), rand(12), 'go') +fig.canvas.mpl_connect('key_press_event', press) + +ax.plot(n.random.rand(12), n.random.rand(12), 'go') +xl = ax.set_xlabel('easy come, easy go') + show() Modified: trunk/matplotlib/examples/units/bar_unit_demo.py =================================================================== --- trunk/matplotlib/examples/units/bar_unit_demo.py 2007-10-29 18:37:17 UTC (rev 4060) +++ trunk/matplotlib/examples/units/bar_unit_demo.py 2007-10-29 18:52:41 UTC (rev 4061) @@ -4,15 +4,16 @@ N = 5 menMeans = (150*cm, 160*cm, 146*cm, 172*cm, 155*cm) -menStd = (20*cm, 30*cm, 32*cm, 10*cm, 20*cm) +menStd = ( 20*cm, 30*cm, 32*cm, 10*cm, 20*cm) fig = figure() ax = fig.add_subplot(111) ind = nx.arange(N) # the x locations for the groups -width = 0.35 # the width of the bars +width = 0.35 # the width of the bars p1 = ax.bar(ind, menMeans, width, color='r', bottom=0*cm, yerr=menStd) + womenMeans = (145*cm, 149*cm, 172*cm, 165*cm, 200*cm) womenStd = (30*cm, 25*cm, 20*cm, 31*cm, 22*cm) p2 = ax.bar(ind+width, womenMeans, width, color='y', bottom=0*cm, yerr=womenStd) Modified: trunk/matplotlib/lib/matplotlib/axes.py =================================================================== --- trunk/matplotlib/lib/matplotlib/axes.py 2007-10-29 18:37:17 UTC (rev 4060) +++ trunk/matplotlib/lib/matplotlib/axes.py 2007-10-29 18:52:41 UTC (rev 4061) @@ -1173,6 +1173,8 @@ # Otherwise, it will compute the bounds of it's current data # and the data in xydata xys = npy.asarray(xys) + + self.dataLim.update_numerix_xy(xys, -1) @@ -3242,22 +3244,7 @@ patches = [] - # lets do some conversions now - if self.xaxis is not None: - xconv = self.xaxis.converter - if xconv is not None: - units = self.xaxis.get_units() - left = xconv.convert( left, units ) - width = xconv.convert( width, units ) - if self.yaxis is not None: - yconv = self.yaxis.converter - if yconv is not None : - units = self.yaxis.get_units() - bottom = yconv.convert( bottom, units ) - height = yconv.convert( height, units ) - - if align == 'edge': pass elif align == 'center': @@ -3645,23 +3632,24 @@ a list of error bar cap lines, the third element is a list of line collections for the horizontal and vertical error ranges """ + self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) if not self._hold: self.cla() - # make sure all the args are iterable arrays - if not iterable(x): x = npy.array([x]) - else: x = npy.asarray(x) + # make sure all the args are iterable; use lists not arrays to preserve units + if not iterable(x): + x = [x] - if not iterable(y): y = npy.array([y]) - else: y = npy.asarray(y) + if not iterable(y): + y = [y] if xerr is not None: - if not iterable(xerr): xerr = npy.array([xerr]) - else: xerr = npy.asarray(xerr) + if not iterable(xerr): + xerr = [xerr] if yerr is not None: - if not iterable(yerr): yerr = npy.array([yerr]) - else: yerr = npy.asarray(yerr) + if not iterable(yerr): + yerr = [yerr] l0 = None @@ -3677,7 +3665,9 @@ if 'lw' in kwargs: lines_kw['lw']=kwargs['lw'] - if not iterable(lolims): lolims = npy.array([lolims]*len(x), bool) + # arrays fine here, they are booleans and hence not units + if not iterable(lolims): + lolims = npy.asarray([lolims]*len(x), bool) else: lolims = npy.asarray(lolims, bool) if not iterable(uplims): uplims = npy.array([uplims]*len(x), bool) @@ -3699,12 +3689,14 @@ plot_kw['mew']=kwargs['mew'] if xerr is not None: - if len(xerr.shape) == 1: - left = x-xerr - right = x+xerr + if iterable(xerr) and len(xerr)==2: + # using list comps rather than arrays to preserve units + left = [thisx-thiserr for (thisx, thiserr) in zip(x,xerr[0])] + right = [thisx+thiserr for (thisx, thiserr) in zip(x,xerr[1])] else: - left = x-xerr[0] - right = x+xerr[1] + # using list comps rather than arrays to preserve units + left = [thisx-thiserr for (thisx, thiserr) in zip(x,xerr)] + right = [thisx+thiserr for (thisx, thiserr) in zip(x,xerr)] barcols.append( self.hlines(y, left, right, **lines_kw ) ) if capsize > 0: @@ -3723,12 +3715,14 @@ caplines.extend( self.plot(right, y, 'k|', **plot_kw) ) if yerr is not None: - if len(yerr.shape) == 1: - lower = y-yerr - upper = y+yerr + if iterable(yerr) and len(yerr)==2: + # using list comps rather than arrays to preserve units + lower = [thisy-thiserr for (thisy, thiserr) in zip(y,yerr[0])] + upper = [thisy+thiserr for (thisy, thiserr) in zip(y,yerr[1])] else: - lower = y-yerr[0] - upper = y+yerr[1] + # using list comps rather than arrays to preserve units + lower = [thisy-thiserr for (thisy, thiserr) in zip(y,yerr)] + upper = [thisy+thiserr for (thisy, thiserr) in zip(y,yerr)] barcols.append( self.vlines(x, lower, upper, **lines_kw) ) if capsize > 0: Modified: trunk/matplotlib/lib/matplotlib/mlab.py =================================================================== --- trunk/matplotlib/lib/matplotlib/mlab.py 2007-10-29 18:37:17 UTC (rev 4060) +++ trunk/matplotlib/lib/matplotlib/mlab.py 2007-10-29 18:52:41 UTC (rev 4061) @@ -1455,7 +1455,116 @@ for row in r: writer.writerow(map(str, row)) fh.close() + +try: + import pyExcelerator as excel +except ImportError: + pass +else: + class Format: + xlstyle = None + def convert(self, x): + return x + + class FormatFloat(Format): + def __init__(self, precision=4): + self.xlstyle = excel.XFStyle() + zeros = ''.join(['0']*precision) + self.xlstyle.num_format_str = '#,##0.%s;[RED]-#,##0.%s'%(zeros, zeros) + + class FormatInt(Format): + convert = int + def __init__(self): + + self.xlstyle = excel.XFStyle() + self.xlstyle.num_format_str = '#,##;[RED]-#,##' + + class FormatPercent(Format): + def __init__(self, precision=4): + self.xlstyle = excel.XFStyle() + zeros = ''.join(['0']*precision) + self.xlstyle.num_format_str = '0.%s%;[RED]-0.%s%'%(zeros, zeros) + + class FormatThousands(FormatFloat): + def __init__(self, precision=1): + FormatFloat.__init__(self, precision) + + def convert(self, x): + return x/1e3 + + class FormatMillions(FormatFloat): + def __init__(self, precision=1): + FormatFloat.__init__(self, precision) + + def convert(self, x): + return x/1e6 + + class FormatDate(Format): + def __init__(self, fmt='%Y-%m-%d'): + self.fmt = fmt + + def convert(self, val): + return val.strftime(self.fmt) + + class FormatDatetime(Format): + def __init__(self, fmt='%Y-%m-%d %H:%M:%S'): + self.fmt = fmt + + def convert(self, val): + return val.strftime(self.fmt) + + class FormatObject(Format): + + def convert(self, x): + return str(x) + + def rec2excel(ws, r, formatd=None, rownum=0): + """ + save record array r to excel pyExcelerator worksheet ws + starting at rownum + + formatd is a dictionary mapping dtype name -> Format instances + """ + + if formatd is None: + formatd = dict() + + formats = [] + for i, name in enumerate(r.dtype.names): + dt = r.dtype[name] + format = formatd.get(name) + if format is None: + format = rec2excel.formatd.get(dt.type, FormatObject()) + + ws.write(rownum, i, name) + formats.append(format) + + rownum+=1 + + ind = npy.arange(len(r.dtype.names)) + for row in r: + for i in ind: + val = row[i] + format = formats[i] + val = format.convert(val) + if format.xlstyle is None: + ws.write(rownum, i, val) + else: + ws.write(rownum, i, val, format.xlstyle) + rownum += 1 + rec2excel.formatd = { + npy.int16 : FormatInt(), + npy.int32 : FormatInt(), + npy.int64 : FormatInt(), + npy.float32 : FormatFloat(), + npy.float64 : FormatFloat(), + npy.object_ : FormatObject(), + npy.string_ : Format(), + } + + + # some record array helpers def rec_append_field(rec, name, arr, dtype=None): 'return a new record array with field name populated with data from array arr' Modified: trunk/matplotlib/lib/matplotlib/patches.py =================================================================== --- trunk/matplotlib/lib/matplotlib/patches.py 2007-10-29 18:37:17 UTC (rev 4060) +++ trunk/matplotlib/lib/matplotlib/patches.py 2007-10-29 18:52:41 UTC (rev 4061) @@ -350,8 +350,10 @@ Return the vertices of the rectangle """ x, y = self.xy - left, right = self.convert_xunits((x, x + self.width)) - bottom, top = self.convert_yunits((y, y + self.height)) + left = self.convert_xunits(x) + right = self.convert_xunits(x + self.width) + bottom = self.convert_yunits(y) + top = self.convert_yunits(y+self.height) return ( (left, bottom), (left, top), (right, top), (right, bottom), @@ -806,8 +808,15 @@ def get_verts(self): xcenter, ycenter = self.center + width, height = self.width, self.height - width, height = self.width, self.height + xcenter = self.convert_xunits(xcenter) + width = self.convert_xunits(width) + ycenter = self.convert_yunits(ycenter) + height = self.convert_xunits(height) + + + angle = self.angle theta = npy.arange(0.0, 360.0, 1.0)*npy.pi/180.0 @@ -820,8 +829,6 @@ [npy.sin(rtheta), npy.cos(rtheta)], ]) - x = self.convert_xunits(x) - y = self.convert_yunits(y) x, y = npy.dot(R, npy.array([x, y])) x += xcenter @@ -857,6 +864,8 @@ x, y = self.center x = self.convert_xunits(x) y = self.convert_yunits(y) + w = self.convert_xunits(self.width)/2. + h = self.convert_yunits(self.height)/2. theta = self.angle * npy.pi/180. T = npy.array([ @@ -864,10 +873,8 @@ [0, 1, y], [0, 0, 1]]) - w, h = self.width/2, self.height/2. - w = self.convert_xunits(w) - h = self.convert_yunits(h) + S = npy.array([ [w, 0, 0], This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-10-31 19:53:53
|
Revision: 4073 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4073&view=rev Author: dsdale Date: 2007-10-31 12:53:35 -0700 (Wed, 31 Oct 2007) Log Message: ----------- add STIX license agreement Added Paths: ----------- trunk/matplotlib/lib/matplotlib/mpl-data/fonts/otf/LICENSE_STIX trunk/matplotlib/license/LICENSE_STIX Added: trunk/matplotlib/lib/matplotlib/mpl-data/fonts/otf/LICENSE_STIX =================================================================== --- trunk/matplotlib/lib/matplotlib/mpl-data/fonts/otf/LICENSE_STIX (rev 0) +++ trunk/matplotlib/lib/matplotlib/mpl-data/fonts/otf/LICENSE_STIX 2007-10-31 19:53:35 UTC (rev 4073) @@ -0,0 +1,71 @@ +TERMS AND CONDITIONS + + 1. Permission is hereby granted, free of charge, to any person +obtaining a copy of the STIX Fonts-TM set accompanying this license +(collectively, the "Fonts") and the associated documentation files +(collectively with the Fonts, the "Font Software"), to reproduce and +distribute the Font Software, including the rights to use, copy, merge +and publish copies of the Font Software, and to permit persons to whom +the Font Software is furnished to do so same, subject to the following +terms and conditions (the "License"). + + 2. The following copyright and trademark notice and these Terms and +Conditions shall be included in all copies of one or more of the Font +typefaces and any derivative work created as permitted under this +License: + + Copyright (c) 2001-2005 by the STI Pub Companies, consisting of +the American Institute of Physics, the American Chemical Society, the +American Mathematical Society, the American Physical Society, Elsevier, +Inc., and The Institute of Electrical and Electronic Engineers, Inc. +Portions copyright (c) 1998-2003 by MicroPress, Inc. Portions copyright +(c) 1990 by Elsevier, Inc. All rights reserved. STIX Fonts-TM is a +trademark of The Institute of Electrical and Electronics Engineers, Inc. + + 3. You may (a) convert the Fonts from one format to another (e.g., +from TrueType to PostScript), in which case the normal and reasonable +distortion that occurs during such conversion shall be permitted and (b) +embed or include a subset of the Fonts in a document for the purposes of +allowing users to read text in the document that utilizes the Fonts. In +each case, you may use the STIX Fonts-TM mark to designate the resulting +Fonts or subset of the Fonts. + + 4. You may also (a) add glyphs or characters to the Fonts, or modify +the shape of existing glyphs, so long as the base set of glyphs is not +removed and (b) delete glyphs or characters from the Fonts, provided +that the resulting font set is distributed with the following +disclaimer: "This [name] font does not include all the Unicode points +covered in the STIX Fonts-TM set but may include others." In each case, +the name used to denote the resulting font set shall not include the +term "STIX" or any similar term. + + 5. You may charge a fee in connection with the distribution of the +Font Software, provided that no copy of one or more of the individual +Font typefaces that form the STIX Fonts-TM set may be sold by itself. + + 6. THE FONT SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK OR OTHER RIGHT. IN NO EVENT SHALL +MICROPRESS OR ANY OF THE STI PUB COMPANIES BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, INCLUDING, BUT NOT LIMITED TO, ANY GENERAL, +SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM OR OUT OF THE USE OR +INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT +SOFTWARE. + + 7. Except as contained in the notice set forth in Section 2, the +names MicroPress Inc. and STI Pub Companies, as well as the names of the +companies/organizations that compose the STI Pub Companies, shall not be +used in advertising or otherwise to promote the sale, use or other +dealings in the Font Software without the prior written consent of the +respective company or organization. + + 8. This License shall become null and void in the event of any +material breach of the Terms and Conditions herein by licensee. + + 9. A substantial portion of the STIX Fonts set was developed by +MicroPress Inc. for the STI Pub Companies. To obtain additional +mathematical fonts, please contact MicroPress, Inc., 68-30 Harrow +Street, Forest Hills, NY 11375, USA - Phone: (718) 575-1816. + Added: trunk/matplotlib/license/LICENSE_STIX =================================================================== --- trunk/matplotlib/license/LICENSE_STIX (rev 0) +++ trunk/matplotlib/license/LICENSE_STIX 2007-10-31 19:53:35 UTC (rev 4073) @@ -0,0 +1,71 @@ +TERMS AND CONDITIONS + + 1. Permission is hereby granted, free of charge, to any person +obtaining a copy of the STIX Fonts-TM set accompanying this license +(collectively, the "Fonts") and the associated documentation files +(collectively with the Fonts, the "Font Software"), to reproduce and +distribute the Font Software, including the rights to use, copy, merge +and publish copies of the Font Software, and to permit persons to whom +the Font Software is furnished to do so same, subject to the following +terms and conditions (the "License"). + + 2. The following copyright and trademark notice and these Terms and +Conditions shall be included in all copies of one or more of the Font +typefaces and any derivative work created as permitted under this +License: + + Copyright (c) 2001-2005 by the STI Pub Companies, consisting of +the American Institute of Physics, the American Chemical Society, the +American Mathematical Society, the American Physical Society, Elsevier, +Inc., and The Institute of Electrical and Electronic Engineers, Inc. +Portions copyright (c) 1998-2003 by MicroPress, Inc. Portions copyright +(c) 1990 by Elsevier, Inc. All rights reserved. STIX Fonts-TM is a +trademark of The Institute of Electrical and Electronics Engineers, Inc. + + 3. You may (a) convert the Fonts from one format to another (e.g., +from TrueType to PostScript), in which case the normal and reasonable +distortion that occurs during such conversion shall be permitted and (b) +embed or include a subset of the Fonts in a document for the purposes of +allowing users to read text in the document that utilizes the Fonts. In +each case, you may use the STIX Fonts-TM mark to designate the resulting +Fonts or subset of the Fonts. + + 4. You may also (a) add glyphs or characters to the Fonts, or modify +the shape of existing glyphs, so long as the base set of glyphs is not +removed and (b) delete glyphs or characters from the Fonts, provided +that the resulting font set is distributed with the following +disclaimer: "This [name] font does not include all the Unicode points +covered in the STIX Fonts-TM set but may include others." In each case, +the name used to denote the resulting font set shall not include the +term "STIX" or any similar term. + + 5. You may charge a fee in connection with the distribution of the +Font Software, provided that no copy of one or more of the individual +Font typefaces that form the STIX Fonts-TM set may be sold by itself. + + 6. THE FONT SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY +KIND, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT +OF COPYRIGHT, PATENT, TRADEMARK OR OTHER RIGHT. IN NO EVENT SHALL +MICROPRESS OR ANY OF THE STI PUB COMPANIES BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, INCLUDING, BUT NOT LIMITED TO, ANY GENERAL, +SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES, WHETHER IN AN +ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM OR OUT OF THE USE OR +INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT +SOFTWARE. + + 7. Except as contained in the notice set forth in Section 2, the +names MicroPress Inc. and STI Pub Companies, as well as the names of the +companies/organizations that compose the STI Pub Companies, shall not be +used in advertising or otherwise to promote the sale, use or other +dealings in the Font Software without the prior written consent of the +respective company or organization. + + 8. This License shall become null and void in the event of any +material breach of the Terms and Conditions herein by licensee. + + 9. A substantial portion of the STIX Fonts set was developed by +MicroPress Inc. for the STI Pub Companies. To obtain additional +mathematical fonts, please contact MicroPress, Inc., 68-30 Harrow +Street, Forest Hills, NY 11375, USA - Phone: (718) 575-1816. + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ef...@us...> - 2007-11-01 07:21:11
|
Revision: 4081 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4081&view=rev Author: efiring Date: 2007-11-01 00:21:00 -0700 (Thu, 01 Nov 2007) Log Message: ----------- Made contour auto level generation work with log color scale Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/lib/matplotlib/contour.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-01 03:40:06 UTC (rev 4080) +++ trunk/matplotlib/CHANGELOG 2007-11-01 07:21:00 UTC (rev 4081) @@ -1,3 +1,6 @@ +2007-10-31 Made log color scale easier to use with contourf; + automatic level generation now works. - EF + 2007-10-24 Added ax kwarg to Figure.colorbar and pyplot.colorbar - EF 2007-10-19 Removed a gsave/grestore pair surrounding _draw_ps, which Modified: trunk/matplotlib/lib/matplotlib/contour.py =================================================================== --- trunk/matplotlib/lib/matplotlib/contour.py 2007-11-01 03:40:06 UTC (rev 4080) +++ trunk/matplotlib/lib/matplotlib/contour.py 2007-11-01 07:21:00 UTC (rev 4081) @@ -401,6 +401,15 @@ self.antialiased = kwargs.get('antialiased', True) self.nchunk = kwargs.get('nchunk', 0) self.locator = kwargs.get('locator', None) + if (isinstance(norm, colors.LogNorm) + or isinstance(self.locator, ticker.LogLocator)): + self.logscale = True + if norm is None: + norm = colors.LogNorm() + if self.extend is not 'neither': + raise ValueError('extend kwarg does not work yet with log scale') + else: + self.logscale = False if self.origin is not None: assert(self.origin in ['lower', 'upper', 'image']) @@ -493,7 +502,10 @@ three levels to provide boundaries for both regions. ''' if self.locator is None: - self.locator = ticker.MaxNLocator(N+1) + if self.logscale: + self.locator = ticker.LogLocator() + else: + self.locator = ticker.MaxNLocator(N+1) locator = self.locator zmax = self.zmax zmin = self.zmin @@ -503,7 +515,10 @@ if zmax >= lev[-1]: lev[-1] += zmargin if zmin <= lev[0]: - lev[0] -= zmargin + if self.logscale: + lev[0] = 0.99 * zmin + else: + lev[0] -= zmargin self._auto = True if self.filled: return lev @@ -589,6 +604,10 @@ raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn)) self.zmax = ma.maximum(z) self.zmin = ma.minimum(z) + if self.logscale and self.zmin <= 0: + z = ma.masked_where(z <= 0, z) + warnings.warn('Log scale: values of z <=0 have been masked') + self.zmin = z.min() self._auto = False if self.levels is None: if Nargs == 1 or Nargs == 3: This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-02 16:37:38
|
Revision: 4096 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4096&view=rev Author: dsdale Date: 2007-11-02 09:37:37 -0700 (Fri, 02 Nov 2007) Log Message: ----------- commit patch 1599876, fixes to qt4agg backend and qt4 blitting demo. Thanks to Phil Thompson. Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/examples/animation_blit_qt4.py trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-02 13:13:40 UTC (rev 4095) +++ trunk/matplotlib/CHANGELOG 2007-11-02 16:37:37 UTC (rev 4096) @@ -1,3 +1,6 @@ +2007-11-02 Commited Phil Thompson's patch 1599876, fixes to Qt4Agg + backend and qt4 blitting demo - DSD + 2007-10-31 Made log color scale easier to use with contourf; automatic level generation now works. - EF Modified: trunk/matplotlib/examples/animation_blit_qt4.py =================================================================== --- trunk/matplotlib/examples/animation_blit_qt4.py 2007-11-02 13:13:40 UTC (rev 4095) +++ trunk/matplotlib/examples/animation_blit_qt4.py 2007-11-02 16:37:37 UTC (rev 4096) @@ -15,10 +15,14 @@ class BlitQT(QtCore.QObject): def __init__(self): - QtCore.QObject.__init__(self, None) - self.ax = p.subplot(111) self.canvas = self.ax.figure.canvas + + # By making this a child of the canvas we make sure that it is + # destroyed first and avoids a possible exception when the user clicks + # on the window's close box. + QtCore.QObject.__init__(self, self.canvas) + self.cnt = 0 # create the initial line @@ -26,9 +30,14 @@ self.line, = p.plot(self.x, npy.sin(self.x), animated=True, lw=2) self.background = None + self.old_size = 0, 0 def timerEvent(self, evt): - if self.background is None: + # See if the size has changed since last time round. + current_size = self.ax.bbox.width(), self.ax.bbox.height() + + if self.old_size != current_size: + self.old_size = current_size self.background = self.canvas.copy_from_bbox(self.ax.bbox) # restore the clean slate background Modified: trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py =================================================================== --- trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py 2007-11-02 13:13:40 UTC (rev 4095) +++ trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py 2007-11-02 16:37:37 UTC (rev 4096) @@ -6,8 +6,6 @@ import os, sys import matplotlib -from matplotlib import verbose -from matplotlib.cbook import enumerate from matplotlib.figure import Figure from backend_agg import FigureCanvasAgg @@ -61,7 +59,7 @@ self.drawRect = False self.rect = [] self.replot = True - self.pixmap = QtGui.QPixmap() + self.setAttribute(QtCore.Qt.WA_OpaquePaintEvent) def resizeEvent( self, e ): FigureCanvasQT.resizeEvent( self, e ) @@ -86,26 +84,25 @@ # only replot data when needed if type(self.replot) is bool: # might be a bbox for blitting - if ( self.replot ): - #stringBuffer = str( self.buffer_rgba(0,0) ) - FigureCanvasAgg.draw( self ) + if self.replot: + FigureCanvasAgg.draw(self) - # matplotlib is in rgba byte order. - # qImage wants to put the bytes into argb format and - # is in a 4 byte unsigned int. little endian system is LSB first - # and expects the bytes in reverse order (bgra). - if ( QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian ): - stringBuffer = self.renderer._renderer.tostring_bgra() - else: - stringBuffer = self.renderer._renderer.tostring_argb() - qImage = QtGui.QImage( stringBuffer, self.renderer.width, - self.renderer.height, - QtGui.QImage.Format_ARGB32) - self.pixmap = self.pixmap.fromImage( qImage ) - p.drawPixmap( QtCore.QPoint( 0, 0 ), self.pixmap ) + # matplotlib is in rgba byte order. QImage wants to put the bytes + # into argb format and is in a 4 byte unsigned int. Little endian + # system is LSB first and expects the bytes in reverse order + # (bgra). + if QtCore.QSysInfo.ByteOrder == QtCore.QSysInfo.LittleEndian: + stringBuffer = self.renderer._renderer.tostring_bgra() + else: + stringBuffer = self.renderer._renderer.tostring_argb() + qImage = QtGui.QImage(stringBuffer, self.renderer.width, + self.renderer.height, + QtGui.QImage.Format_ARGB32) + p.drawPixmap(QtCore.QPoint(0, 0), QtGui.QPixmap.fromImage(qImage)) + # draw the zoom rectangle to the QPainter - if ( self.drawRect ): + if self.drawRect: p.setPen( QtGui.QPen( QtCore.Qt.black, 1, QtCore.Qt.DotLine ) ) p.drawRect( self.rect[0], self.rect[1], self.rect[2], self.rect[3] ) @@ -117,8 +114,8 @@ reg = self.copy_from_bbox(bbox) stringBuffer = reg.to_string() qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32) - self.pixmap = self.pixmap.fromImage( qImage ) - p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), self.pixmap) + pixmap = QtGui.QPixmap.fromImage(qImage) + p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap) p.end() self.replot = False This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <md...@us...> - 2007-11-05 17:30:28
|
Revision: 4110 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4110&view=rev Author: mdboom Date: 2007-11-05 09:30:08 -0800 (Mon, 05 Nov 2007) Log Message: ----------- Make STIX fonts work. Modified Paths: -------------- trunk/matplotlib/examples/mathtext_examples.py trunk/matplotlib/lib/matplotlib/_mathtext_data.py trunk/matplotlib/lib/matplotlib/config/mplconfig.py trunk/matplotlib/lib/matplotlib/config/rcsetup.py trunk/matplotlib/lib/matplotlib/mathtext.py trunk/matplotlib/lib/matplotlib/rcsetup.py trunk/matplotlib/matplotlibrc.template Modified: trunk/matplotlib/examples/mathtext_examples.py =================================================================== --- trunk/matplotlib/examples/mathtext_examples.py 2007-11-05 16:50:11 UTC (rev 4109) +++ trunk/matplotlib/examples/mathtext_examples.py 2007-11-05 17:30:08 UTC (rev 4110) @@ -48,7 +48,8 @@ r'$\mathcal{H} = \int d \tau \left(\epsilon E^2 + \mu H^2\right)$', r'$\widehat{abc}\widetilde{def}$', r'$\Gamma \Delta \Theta \Lambda \Xi \Pi \Sigma \Upsilon \Phi \Psi \Omega$', - r'$\alpha \beta \gamma \delta \epsilon \zeta \eta \theta \iota \lambda \mu \nu \xi \pi \kappa \rho \sigma \tau \upsilon \phi \chi \psi$' + r'$\alpha \beta \gamma \delta \epsilon \zeta \eta \theta \iota \lambda \mu \nu \xi \pi \kappa \rho \sigma \tau \upsilon \phi \chi \psi$', + ur'Generic symbol: $\u23ce \mathrm{\ue0f2}$' ] from pylab import * Modified: trunk/matplotlib/lib/matplotlib/_mathtext_data.py =================================================================== --- trunk/matplotlib/lib/matplotlib/_mathtext_data.py 2007-11-05 16:50:11 UTC (rev 4109) +++ trunk/matplotlib/lib/matplotlib/_mathtext_data.py 2007-11-05 17:30:08 UTC (rev 4110) @@ -1755,7 +1755,10 @@ uni2type1 = dict([(v,k) for k,v in type12uni.items()]) -tex2uni = {'doteq': 8784, +tex2uni = { +'widehat': 0x0302, +'widetilde': 0x0303, +'doteq': 8784, 'partial': 8706, 'gg': 8811, 'asymp': 8781, Modified: trunk/matplotlib/lib/matplotlib/config/mplconfig.py =================================================================== --- trunk/matplotlib/lib/matplotlib/config/mplconfig.py 2007-11-05 16:50:11 UTC (rev 4109) +++ trunk/matplotlib/lib/matplotlib/config/mplconfig.py 2007-11-05 17:30:08 UTC (rev 4110) @@ -166,7 +166,7 @@ it = T.Trait('serif:oblique' , mplT.FontconfigPatternHandler()) bf = T.Trait('serif:bold' , mplT.FontconfigPatternHandler()) sf = T.Trait('sans' , mplT.FontconfigPatternHandler()) - use_cm = T.true + fontset = T.Trait('cm', 'cm', 'stix', 'custom') fallback_to_cm = T.true class axes(TConfig): @@ -344,7 +344,7 @@ 'mathtext.it' : (self.tconfig.mathtext, 'it'), 'mathtext.bf' : (self.tconfig.mathtext, 'bf'), 'mathtext.sf' : (self.tconfig.mathtext, 'sf'), - 'mathtext.use_cm' : (self.tconfig.mathtext, 'use_cm'), + 'mathtext.fontset' : (self.tconfig.mathtext, 'fontset'), 'mathtext.fallback_to_cm' : (self.tconfig.mathtext, 'fallback_to_cm'), 'image.aspect' : (self.tconfig.image, 'aspect'), Modified: trunk/matplotlib/lib/matplotlib/config/rcsetup.py =================================================================== --- trunk/matplotlib/lib/matplotlib/config/rcsetup.py 2007-11-05 16:50:11 UTC (rev 4109) +++ trunk/matplotlib/lib/matplotlib/config/rcsetup.py 2007-11-05 17:30:08 UTC (rev 4110) @@ -203,6 +203,8 @@ parse_fontconfig_pattern(s) return s +validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'custom']) + validate_verbose = ValidateInStrings('verbose',[ 'silent', 'helpful', 'debug', 'debug-annoying', ]) @@ -365,7 +367,7 @@ 'mathtext.it' : ['serif:italic', validate_font_properties], 'mathtext.bf' : ['serif:bold', validate_font_properties], 'mathtext.sf' : ['sans\-serif', validate_font_properties], - 'mathtext.use_cm' : [True, validate_bool], + 'mathtext.fontset' : ['cm', validate_fontset], 'mathtext.fallback_to_cm' : [True, validate_bool], 'image.aspect' : ['equal', validate_aspect], # equal, auto, a number Modified: trunk/matplotlib/lib/matplotlib/mathtext.py =================================================================== --- trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-05 16:50:11 UTC (rev 4109) +++ trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-05 17:30:08 UTC (rev 4110) @@ -528,7 +528,7 @@ A generic base class for all font setups that use Truetype fonts (through ft2font) """ - basepath = os.path.join( get_data_path(), 'fonts', 'ttf' ) + basepath = os.path.join( get_data_path(), 'fonts' ) class CachedFont: def __init__(self, font): @@ -665,7 +665,7 @@ TruetypeFonts.__init__(self, *args, **kwargs) if not len(self.fontmap): for key, val in self._fontmap.iteritems(): - fullpath = os.path.join(self.basepath, val + ".ttf") + fullpath = os.path.join(self.basepath, 'ttf', val + ".ttf") self.fontmap[key] = fullpath self.fontmap[val] = fullpath @@ -750,7 +750,8 @@ """ fontmap = {} - + use_cmex = True + def __init__(self, *args, **kwargs): # This must come first so the backend's owner is set correctly if rcParams['mathtext.fallback_to_cm']: @@ -772,18 +773,20 @@ def _get_glyph(self, fontname, sym, fontsize): found_symbol = False - uniindex = latex_to_cmex.get(sym) - if uniindex is not None: - fontname = 'ex' - found_symbol = True - else: + if self.use_cmex: + uniindex = latex_to_cmex.get(sym) + if uniindex is not None: + fontname = 'ex' + found_symbol = True + + if not found_symbol: try: uniindex = get_unicode_index(sym) found_symbol = True except ValueError: uniindex = ord('?') warn("No TeX to unicode mapping for '%s'" % - sym.encode('ascii', 'replace'), + sym.encode('ascii', 'backslashreplace'), MathTextWarning) # Only characters in the "Letter" class should be italicized in 'it' @@ -804,7 +807,7 @@ except KeyError: warn("Font '%s' does not have a glyph for '%s'" % (cached_font.font.postscript_name, - sym.encode('ascii', 'replace')), + sym.encode('ascii', 'backslashreplace')), MathTextWarning) found_symbol = False @@ -815,6 +818,7 @@ return self.cm_fallback._get_glyph(fontname, sym, fontsize) else: warn("Substituting with a dummy symbol.", MathTextWarning) + fontname = 'rm' new_fontname = fontname cached_font = self._get_font(fontname) uniindex = 0xA4 # currency character, for lack of anything better @@ -829,6 +833,71 @@ return self.cm_fallback.get_sized_alternatives_for_symbol( fontname, sym) return [(fontname, sym)] + +class StixFonts(UnicodeFonts): + _fontmap = { 'rm' : ('STIXGeneral', 'otf'), + 'tt' : ('VeraMono', 'ttf'), + 'it' : ('STIXGeneralItalic', 'otf'), + 'bf' : ('STIXGeneralBol', 'otf'), + 'sf' : ('Vera', 'ttf'), + 'nonunirm' : ('STIXNonUni', 'otf'), + 'nonuniit' : ('STIXNonUniIta', 'otf'), + 'nonunibf' : ('STIXNonUniBol', 'otf'), + + 0 : ('STIXGeneral', 'otf'), + 1 : ('STIXSiz1Sym', 'otf'), + 2 : ('STIXSiz2Sym', 'otf'), + 3 : ('STIXSiz3Sym', 'otf'), + 4 : ('STIXSiz4Sym', 'otf'), + 5 : ('STIXSiz5Sym', 'otf') + } + fontmap = {} + use_cmex = False + cm_fallback = False + + def __init__(self, *args, **kwargs): + TruetypeFonts.__init__(self, *args, **kwargs) + if not len(self.fontmap): + for key, (name, ext) in self._fontmap.iteritems(): + fullpath = os.path.join(self.basepath, ext, name + "." + ext) + self.fontmap[key] = fullpath + self.fontmap[name] = fullpath + + def _get_glyph(self, fontname, sym, fontsize): + # Handle calligraphic letters + if fontname == 'cal': + if len(sym) != 1 or ord(sym) < ord('A') or ord(sym) > ord('Z'): + raise ValueError(r"Sym '%s' is not available in \mathcal font" % sym) + fontname = 'nonuniit' + sym = unichr(ord(sym) + 0xe22d - ord('A')) + + # Handle private use area glyphs + if (fontname in ('it', 'rm', 'bf') and + len(sym) == 1 and ord(sym) >= 0xe000 and ord(sym) <= 0xf8ff): + fontname = 'nonuni' + fontname + + return UnicodeFonts._get_glyph(self, fontname, sym, fontsize) + + _size_alternatives = {} + def get_sized_alternatives_for_symbol(self, fontname, sym): + alternatives = self._size_alternatives.get(sym) + if alternatives: + return alternatives + + alternatives = [] + try: + uniindex = get_unicode_index(sym) + except ValueError: + return [(fontname, sym)] + + for i in range(6): + cached_font = self._get_font(i) + glyphindex = cached_font.charmap.get(uniindex) + if glyphindex is not None: + alternatives.append((i, unichr(uniindex))) + + self._size_alternatives[sym] = alternatives + return alternatives class StandardPsFonts(Fonts): """ @@ -1091,7 +1160,7 @@ Node.__init__(self) self.c = c self.font_output = state.font_output - assert isinstance(state.font, str) + assert isinstance(state.font, (str, unicode, int)) self.font = state.font self.fontsize = state.fontsize self.dpi = state.dpi @@ -1876,7 +1945,7 @@ ) | Error(r"Expected \hspace{n}")) ).setParseAction(self.customspace).setName('customspace') - symbol =(Regex(r"([a-zA-Z0-9 +\-*/<>=:,.;!'@()])|(\\[%${}\[\]])") + symbol =(Regex(ur"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\u0080-\uffff])|(\\[%${}\[\]])") | Combine( bslash + oneOf(tex2uni.keys()) @@ -2508,10 +2577,15 @@ font_output = StandardPsFonts(prop) else: backend = self._backend_mapping[self._output]() - if rcParams['mathtext.use_cm']: + fontset = rcParams['mathtext.fontset'] + if fontset == 'cm': font_output = BakomaFonts(prop, backend) + elif fontset == 'stix': + font_output = StixFonts(prop, backend) + elif fontset == 'custom': + font_output = UnicodeFonts(prop, backend) else: - font_output = UnicodeFonts(prop, backend) + raise ValueError("mathtext.fontset must be either 'cm', 'stix', or 'custom'") fontsize = prop.get_size_in_points() Modified: trunk/matplotlib/lib/matplotlib/rcsetup.py =================================================================== --- trunk/matplotlib/lib/matplotlib/rcsetup.py 2007-11-05 16:50:11 UTC (rev 4109) +++ trunk/matplotlib/lib/matplotlib/rcsetup.py 2007-11-05 17:30:08 UTC (rev 4110) @@ -202,7 +202,9 @@ def validate_font_properties(s): parse_fontconfig_pattern(s) return s - + +validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'custom']) + validate_verbose = ValidateInStrings('verbose',[ 'silent', 'helpful', 'debug', 'debug-annoying', ]) @@ -365,7 +367,7 @@ 'mathtext.it' : ['serif:italic', validate_font_properties], 'mathtext.bf' : ['serif:bold', validate_font_properties], 'mathtext.sf' : ['sans\-serif', validate_font_properties], - 'mathtext.use_cm' : [True, validate_bool], + 'mathtext.fontset' : [True, validate_fontset], 'mathtext.fallback_to_cm' : [True, validate_bool], 'image.aspect' : ['equal', validate_aspect], # equal, auto, a number Modified: trunk/matplotlib/matplotlibrc.template =================================================================== --- trunk/matplotlib/matplotlibrc.template 2007-11-05 16:50:11 UTC (rev 4109) +++ trunk/matplotlib/matplotlibrc.template 2007-11-05 17:30:08 UTC (rev 4110) @@ -160,20 +160,18 @@ # processing. # The following settings allow you to select the fonts in math mode. -# They map from a TeX font name to a set of arguments for the FontProperties constructor. -# (See FontProperties for more details). -# These settings are only used if mathtext.use_cm is False, otherwise, the -# Bakoma TeX Computer Modern fonts are used. +# They map from a TeX font name to a fontconfig font pattern. +# These settings are only used if mathtext.fontset is 'custom'. #mathtext.cal : cursive #mathtext.rm : serif #mathtext.tt : monospace #mathtext.it : serif:italic #mathtext.bf : serif:bold #mathtext.sf : sans -#mathtext.use_cm : True +#mathtext.fontset : cm # Should be 'cm' (Computer Modern), 'stix' or 'custom' #mathtext.fallback_to_cm : True # When True, use symbols from the Computer Modern # fonts when a symbol can not be found in one of - # the user-specified math fonts. + # the custom math fonts. ### AXES # default face and edge color, default tick sizes, This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ef...@us...> - 2007-11-05 20:13:03
|
Revision: 4114 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4114&view=rev Author: efiring Date: 2007-11-05 12:13:00 -0800 (Mon, 05 Nov 2007) Log Message: ----------- Make safezip accept more args; quadmesh_demo cleanup Modified Paths: -------------- trunk/matplotlib/examples/quadmesh_demo.py trunk/matplotlib/lib/matplotlib/cbook.py Modified: trunk/matplotlib/examples/quadmesh_demo.py =================================================================== --- trunk/matplotlib/examples/quadmesh_demo.py 2007-11-05 19:54:49 UTC (rev 4113) +++ trunk/matplotlib/examples/quadmesh_demo.py 2007-11-05 20:13:00 UTC (rev 4114) @@ -2,23 +2,26 @@ """ pcolormesh uses a QuadMesh, a faster generalization of pcolor, but with some restrictions. + +This demo illustrates a bug in quadmesh with masked data. """ -import numpy as nx -from pylab import figure,show +import numpy as npy +from matplotlib.pyplot import figure, show from matplotlib import cm, colors +from matplotlib.numerix import npyma as ma n = 56 -x = nx.linspace(-1.5,1.5,n) -X,Y = nx.meshgrid(x,x); -Qx = nx.cos(Y) - nx.cos(X) -Qz = nx.sin(Y) + nx.sin(X) +x = npy.linspace(-1.5,1.5,n) +X,Y = npy.meshgrid(x,x); +Qx = npy.cos(Y) - npy.cos(X) +Qz = npy.sin(Y) + npy.sin(X) Qx = (Qx + 1.1) -Z = nx.sqrt(X**2 + Y**2)/5; -Z = (Z - nx.amin(Z)) / (nx.amax(Z) - nx.amin(Z)) +Z = npy.sqrt(X**2 + Y**2)/5; +Z = (Z - Z.min()) / (Z.max() - Z.min()) # The color array can include masked values: -Zm = nx.ma.masked_where(nx.fabs(Qz) < 0.5*nx.amax(Qz), Z) +Zm = ma.masked_where(npy.fabs(Qz) < 0.5*npy.amax(Qz), Z) fig = figure() Modified: trunk/matplotlib/lib/matplotlib/cbook.py =================================================================== --- trunk/matplotlib/lib/matplotlib/cbook.py 2007-11-05 19:54:49 UTC (rev 4113) +++ trunk/matplotlib/lib/matplotlib/cbook.py 2007-11-05 20:13:00 UTC (rev 4114) @@ -557,7 +557,7 @@ # expressions. However, this function accounted for almost 30% of # matplotlib startup time, so it is worthy of optimization at all # costs. - + if not s: # includes case of s is None return '' @@ -576,7 +576,7 @@ if unindent is None: unindent = re.compile("\n\r? {0,%d}" % nshift) _dedent_regex[nshift] = unindent - + result = unindent.sub("\n", s).strip() return result @@ -844,15 +844,15 @@ return mem +_safezip_msg = 'In safezip, len(args[0])=%d but len(args[%d])=%d' +def safezip(*args): + 'make sure args are equal len before zipping' + Nx = len(args[0]) + for i, arg in enumerate(args[1:]): + if len(arg) != Nx: + raise ValueError(_safezip_msg % (Nx, i+1, len(arg))) + return zip(*args) -def safezip(x, y): - 'make sure x and y are equal len before zipping' - Nx = len(x) - Ny = len(y) - if Nx!=Ny: - raise RuntimeError('x and y must be equal length; found len(x)=%d and len(y)=%d'% - (Nx, Ny)) - return zip(x, y) class MemoryMonitor: def __init__(self, nmax=20000): self._nmax = nmax This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <md...@us...> - 2007-11-06 18:32:32
|
Revision: 4126 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4126&view=rev Author: mdboom Date: 2007-11-06 10:32:30 -0800 (Tue, 06 Nov 2007) Log Message: ----------- Prevent errors when using OpenType CFF fonts. This means turning off subsetting on backend_pdf, and raising an exception in backend_ps. Modified Paths: -------------- trunk/matplotlib/examples/mathtext_examples.py trunk/matplotlib/lib/matplotlib/backends/backend_pdf.py trunk/matplotlib/lib/matplotlib/backends/backend_ps.py trunk/matplotlib/lib/matplotlib/font_manager.py trunk/matplotlib/lib/matplotlib/mathtext.py Modified: trunk/matplotlib/examples/mathtext_examples.py =================================================================== --- trunk/matplotlib/examples/mathtext_examples.py 2007-11-06 18:23:13 UTC (rev 4125) +++ trunk/matplotlib/examples/mathtext_examples.py 2007-11-06 18:32:30 UTC (rev 4126) @@ -49,7 +49,7 @@ r'$\widehat{abc}\widetilde{def}$', r'$\Gamma \Delta \Theta \Lambda \Xi \Pi \Sigma \Upsilon \Phi \Psi \Omega$', r'$\alpha \beta \gamma \delta \epsilon \zeta \eta \theta \iota \lambda \mu \nu \xi \pi \kappa \rho \sigma \tau \upsilon \phi \chi \psi$', - ur'Generic symbol: $\u23ce \mathrm{\ue0f2}$' + ur'Generic symbol: $\u23ce \mathrm{\ue0f2 \U0001D538}$' ] from pylab import * Modified: trunk/matplotlib/lib/matplotlib/backends/backend_pdf.py =================================================================== --- trunk/matplotlib/lib/matplotlib/backends/backend_pdf.py 2007-11-06 18:23:13 UTC (rev 4125) +++ trunk/matplotlib/lib/matplotlib/backends/backend_pdf.py 2007-11-06 18:32:30 UTC (rev 4126) @@ -9,6 +9,7 @@ import re import sys import time +import warnings import zlib import numpy as npy @@ -25,7 +26,7 @@ FigureManagerBase, FigureCanvasBase from matplotlib.cbook import Bunch, enumerate, is_string_like, reverse_dict, get_realpath_and_stat from matplotlib.figure import Figure -from matplotlib.font_manager import findfont +from matplotlib.font_manager import findfont, is_opentype_cff_font from matplotlib.afm import AFM import matplotlib.type1font as type1font import matplotlib.dviread as dviread @@ -786,7 +787,8 @@ glyph = font.load_char(ccode, flags=LOAD_NO_HINTING) # Why divided by 3.0 ??? Wish I knew... MGD widths.append((ccode, cvt(glyph.horiAdvance) / 3.0)) - cid_to_gid_map[ccode] = unichr(gind) + if ccode < 65536: + cid_to_gid_map[ccode] = unichr(gind) max_ccode = max(ccode, max_ccode) widths.sort() cid_to_gid_map = cid_to_gid_map[:max_ccode + 1] @@ -876,6 +878,15 @@ 'StemV' : 0 # ??? } + # The font subsetting to a Type 3 font does not work for + # OpenType (.otf) that embed a Postscript CFF font, so avoid that -- + # save as a (non-subsetted) Type 42 font instead. + if is_opentype_cff_font(filename): + fonttype = 42 + warnings.warn(("'%s' can not be subsetted into a Type 3 font. " + + "The entire font will be embedded in the output.") % + os.path.basename(filename)) + if fonttype == 3: return embedTTFType3(font, characters, descriptor) elif fonttype == 42: @@ -1134,10 +1145,6 @@ self.truetype_font_cache = {} self.afm_font_cache = {} self.file.used_characters = self.used_characters = {} - if rcParams['pdf.fonttype'] == 3: - self.encode_string = self.encode_string_type3 - else: - self.encode_string = self.encode_string_type42 self.mathtext_parser = MathTextParser("Pdf") self.image_magnification = dpi/72.0 self.tex_font_map = None @@ -1344,7 +1351,7 @@ # When using Type 3 fonts, we can't use character codes higher # than 255, so we use the "Do" command to render those # instead. - fonttype = rcParams['pdf.fonttype'] + global_fonttype = rcParams['pdf.fonttype'] # Set up a global transformation matrix for the whole math expression a = angle / 180.0 * pi @@ -1357,6 +1364,11 @@ prev_font = None, None oldx, oldy = 0, 0 for ox, oy, fontname, fontsize, num, symbol_name in glyphs: + if is_opentype_cff_font(fontname): + fonttype = 42 + else: + fonttype = global_fonttype + if fonttype == 42 or num <= 255: self._setup_textpos(ox, oy, 0, oldx, oldy) oldx, oldy = ox, oy @@ -1364,14 +1376,19 @@ self.file.output(self.file.fontName(fontname), fontsize, Op.selectfont) prev_font = fontname, fontsize - self.file.output(self.encode_string(unichr(num)), Op.show) + self.file.output(self.encode_string(unichr(num), fonttype), Op.show) self.file.output(Op.end_text) # If using Type 3 fonts, render all of the two-byte characters # as XObjects using the 'Do' command. - if fonttype == 3: + if global_fonttype == 3: for ox, oy, fontname, fontsize, num, symbol_name in glyphs: - if num > 255: + if is_opentype_cff_font(fontname): + fonttype = 42 + else: + fonttype = global_fonttype + + if fonttype == 3 and num > 255: self.file.output(Op.gsave, 0.001 * fontsize, 0, 0, 0.001 * fontsize, @@ -1471,10 +1488,9 @@ self.draw_polygon(boxgc, gc._rgb, ((x1,y1), (x2,y2), (x3,y3), (x4,y4))) - def encode_string_type3(self, s): - return s.encode('cp1252', 'replace') - - def encode_string_type42(self, s): + def encode_string(self, s, fonttype): + if fonttype == 3: + return s.encode('cp1252', 'replace') return s.encode('utf-16be', 'replace') def draw_text(self, gc, x, y, s, prop, angle, ismath=False): @@ -1500,20 +1516,29 @@ font = self._get_font_afm(prop) l, b, w, h = font.get_str_bbox(s) y -= b * fontsize / 1000 + fonttype = 42 else: font = self._get_font_ttf(prop) self.track_characters(font, s) font.set_text(s, 0.0, flags=LOAD_NO_HINTING) y += font.get_descent() / 64.0 + fonttype = rcParams['pdf.fonttype'] + + # We can't subset all OpenType fonts, so switch to Type 42 + # in that case. + if is_opentype_cff_font(font.fname): + fonttype = 42 + def check_simple_method(s): """Determine if we should use the simple or woven method - to output this text, and chunks the string into 1-bit and - 2-bit sections if necessary.""" + to output this text, and chunks the string into 1-byte and + 2-byte sections if necessary.""" use_simple_method = True chunks = [] - if rcParams['pdf.fonttype'] == 3: - if not isinstance(s, str) and len(s) != 0: + + if not rcParams['pdf.use14corefonts']: + if fonttype == 3 and not isinstance(s, str) and len(s) != 0: # Break the string into chunks where each chunk is either # a string of chars <= 255, or a single character > 255. s = unicode(s) @@ -1537,7 +1562,7 @@ prop.get_size_in_points(), Op.selectfont) self._setup_textpos(x, y, angle) - self.file.output(self.encode_string(s), Op.show, Op.end_text) + self.file.output(self.encode_string(s, fonttype), Op.show, Op.end_text) def draw_text_woven(chunks): """Outputs text using the woven method, alternating @@ -1567,7 +1592,7 @@ for chunk_type, chunk in chunks: if mode == 1 and chunk_type == 1: self._setup_textpos(newx, 0, 0, oldx, 0, 0) - self.file.output(self.encode_string(chunk), Op.show) + self.file.output(self.encode_string(chunk, fonttype), Op.show) oldx = newx lastgind = None Modified: trunk/matplotlib/lib/matplotlib/backends/backend_ps.py =================================================================== --- trunk/matplotlib/lib/matplotlib/backends/backend_ps.py 2007-11-06 18:23:13 UTC (rev 4125) +++ trunk/matplotlib/lib/matplotlib/backends/backend_ps.py 2007-11-06 18:32:30 UTC (rev 4126) @@ -18,7 +18,7 @@ from matplotlib.cbook import is_string_like, izip, get_realpath_and_stat from matplotlib.figure import Figure -from matplotlib.font_manager import findfont +from matplotlib.font_manager import findfont, is_opentype_cff_font from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, LOAD_NO_HINTING from matplotlib.ttconv import convert_ttf_to_ps from matplotlib.mathtext import MathTextParser @@ -1030,7 +1030,7 @@ else: self._print_figure(outfile, format, dpi, facecolor, edgecolor, orientation, isLandscape, papertype) - + def _print_figure(self, outfile, format, dpi=72, facecolor='w', edgecolor='w', orientation='portrait', isLandscape=False, papertype=None): """ @@ -1134,7 +1134,15 @@ for c in chars: gind = cmap.get(ord(c)) or 0 glyph_ids.append(gind) - convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids) + # The ttf to ps (subsetting) support doesn't work for + # OpenType fonts that are Postscript inside (like the + # STIX fonts). This will simply turn that off to avoid + # errors. + if is_opentype_cff_font(font_filename): + raise RuntimeError("OpenType CFF fonts can not be saved using the internal Postscript backend at this time.\nConsider using the Cairo backend.") + else: + fonttype = rcParams['ps.fonttype'] + convert_ttf_to_ps(font_filename, fh, rcParams['ps.fonttype'], glyph_ids) print >>fh, "end" print >>fh, "%%EndProlog" Modified: trunk/matplotlib/lib/matplotlib/font_manager.py =================================================================== --- trunk/matplotlib/lib/matplotlib/font_manager.py 2007-11-06 18:23:13 UTC (rev 4125) +++ trunk/matplotlib/lib/matplotlib/font_manager.py 2007-11-06 18:32:30 UTC (rev 4126) @@ -1059,6 +1059,25 @@ return self.defaultFont return fname + +_is_opentype_cff_font_cache = {} +def is_opentype_cff_font(filename): + """ + Returns True if the given font is a Postscript Compact Font Format + Font embedded in an OpenType wrapper. + """ + if os.path.splitext(filename)[1].lower() == '.otf': + result = _is_opentype_cff_font_cache.get(filename) + if result is None: + fd = open(filename, 'rb') + tag = fd.read(4) + fd.close() + result = (tag == 'OTTO') + _is_opentype_cff_font_cache[filename] = result + return result + return False + + if USE_FONTCONFIG and sys.platform != 'win32': import re Modified: trunk/matplotlib/lib/matplotlib/mathtext.py =================================================================== --- trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-06 18:23:13 UTC (rev 4125) +++ trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-06 18:32:30 UTC (rev 4126) @@ -1945,7 +1945,8 @@ ) | Error(r"Expected \hspace{n}")) ).setParseAction(self.customspace).setName('customspace') - symbol =(Regex(ur"([a-zA-Z0-9 +\-*/<>=:,.;!'@()\u0080-\uffff])|(\\[%${}\[\]])") + unicode_range = u"\U00000080-\U0001ffff" + symbol =(Regex(UR"([a-zA-Z0-9 +\-*/<>=:,.;!'@()%s])|(\\[%%${}\[\]])" % unicode_range) | Combine( bslash + oneOf(tex2uni.keys()) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-08 00:09:21
|
Revision: 4155 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4155&view=rev Author: dsdale Date: 2007-11-07 16:09:17 -0800 (Wed, 07 Nov 2007) Log Message: ----------- move configobj.py to lib/, install in site-packages only if required Modified Paths: -------------- trunk/matplotlib/examples/rc_traits.py trunk/matplotlib/lib/matplotlib/__init__.py trunk/matplotlib/setup.py trunk/matplotlib/setupext.py Added Paths: ----------- trunk/matplotlib/lib/configobj.py Removed Paths: ------------- trunk/matplotlib/lib/matplotlib/config/configobj.py Modified: trunk/matplotlib/examples/rc_traits.py =================================================================== --- trunk/matplotlib/examples/rc_traits.py 2007-11-07 21:20:45 UTC (rev 4154) +++ trunk/matplotlib/examples/rc_traits.py 2007-11-08 00:09:17 UTC (rev 4155) @@ -7,7 +7,7 @@ # below. import sys, os, re -import matplotlib.enthought.traits as traits +import matplotlib.enthought.traits.api as traits from matplotlib.cbook import is_string_like from matplotlib.artist import Artist Added: trunk/matplotlib/lib/configobj.py =================================================================== --- trunk/matplotlib/lib/configobj.py (rev 0) +++ trunk/matplotlib/lib/configobj.py 2007-11-08 00:09:17 UTC (rev 4155) @@ -0,0 +1,2279 @@ +# configobj.py +# A config file reader/writer that supports nested sections in config files. +# Copyright (C) 2005-2006 Michael Foord, Nicola Larosa +# E-mail: fuzzyman AT voidspace DOT org DOT uk +# nico AT tekNico DOT net + +# ConfigObj 4 +# http://www.voidspace.org.uk/python/configobj.html + +# Released subject to the BSD License +# Please see http://www.voidspace.org.uk/python/license.shtml + +# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml +# For information about bugfixes, updates and support, please join the +# ConfigObj mailing list: +# http://lists.sourceforge.net/lists/listinfo/configobj-develop +# Comments, suggestions and bug reports welcome. + +from __future__ import generators + +import sys +INTP_VER = sys.version_info[:2] +if INTP_VER < (2, 2): + raise RuntimeError("Python v.2.2 or later needed") + +import os, re +compiler = None +try: + import compiler +except ImportError: + # for IronPython + pass +from types import StringTypes +from warnings import warn +try: + from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE +except ImportError: + # Python 2.2 does not have these + # UTF-8 + BOM_UTF8 = '\xef\xbb\xbf' + # UTF-16, little endian + BOM_UTF16_LE = '\xff\xfe' + # UTF-16, big endian + BOM_UTF16_BE = '\xfe\xff' + if sys.byteorder == 'little': + # UTF-16, native endianness + BOM_UTF16 = BOM_UTF16_LE + else: + # UTF-16, native endianness + BOM_UTF16 = BOM_UTF16_BE + +# A dictionary mapping BOM to +# the encoding to decode with, and what to set the +# encoding attribute to. +BOMS = { + BOM_UTF8: ('utf_8', None), + BOM_UTF16_BE: ('utf16_be', 'utf_16'), + BOM_UTF16_LE: ('utf16_le', 'utf_16'), + BOM_UTF16: ('utf_16', 'utf_16'), + } +# All legal variants of the BOM codecs. +# TODO: the list of aliases is not meant to be exhaustive, is there a +# better way ? +BOM_LIST = { + 'utf_16': 'utf_16', + 'u16': 'utf_16', + 'utf16': 'utf_16', + 'utf-16': 'utf_16', + 'utf16_be': 'utf16_be', + 'utf_16_be': 'utf16_be', + 'utf-16be': 'utf16_be', + 'utf16_le': 'utf16_le', + 'utf_16_le': 'utf16_le', + 'utf-16le': 'utf16_le', + 'utf_8': 'utf_8', + 'u8': 'utf_8', + 'utf': 'utf_8', + 'utf8': 'utf_8', + 'utf-8': 'utf_8', + } + +# Map of encodings to the BOM to write. +BOM_SET = { + 'utf_8': BOM_UTF8, + 'utf_16': BOM_UTF16, + 'utf16_be': BOM_UTF16_BE, + 'utf16_le': BOM_UTF16_LE, + None: BOM_UTF8 + } + +try: + from validate import VdtMissingValue +except ImportError: + VdtMissingValue = None + +try: + enumerate +except NameError: + def enumerate(obj): + """enumerate for Python 2.2.""" + i = -1 + for item in obj: + i += 1 + yield i, item + +try: + True, False +except NameError: + True, False = 1, 0 + + +__version__ = '4.4.0' + +__revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $' + +__docformat__ = "restructuredtext en" + +__all__ = ( + '__version__', + 'DEFAULT_INDENT_TYPE', + 'DEFAULT_INTERPOLATION', + 'ConfigObjError', + 'NestingError', + 'ParseError', + 'DuplicateError', + 'ConfigspecError', + 'ConfigObj', + 'SimpleVal', + 'InterpolationError', + 'InterpolationLoopError', + 'MissingInterpolationOption', + 'RepeatSectionError', + 'UnreprError', + 'UnknownType', + '__docformat__', + 'flatten_errors', +) + +DEFAULT_INTERPOLATION = 'configparser' +DEFAULT_INDENT_TYPE = ' ' +MAX_INTERPOL_DEPTH = 10 + +OPTION_DEFAULTS = { + 'interpolation': True, + 'raise_errors': False, + 'list_values': True, + 'create_empty': False, + 'file_error': False, + 'configspec': None, + 'stringify': True, + # option may be set to one of ('', ' ', '\t') + 'indent_type': None, + 'encoding': None, + 'default_encoding': None, + 'unrepr': False, + 'write_empty_values': False, +} + + +def getObj(s): + s = "a=" + s + if compiler is None: + raise ImportError('compiler module not available') + p = compiler.parse(s) + return p.getChildren()[1].getChildren()[0].getChildren()[1] + +class UnknownType(Exception): + pass + +class Builder: + + def build(self, o): + m = getattr(self, 'build_' + o.__class__.__name__, None) + if m is None: + raise UnknownType(o.__class__.__name__) + return m(o) + + def build_List(self, o): + return map(self.build, o.getChildren()) + + def build_Const(self, o): + return o.value + + def build_Dict(self, o): + d = {} + i = iter(map(self.build, o.getChildren())) + for el in i: + d[el] = i.next() + return d + + def build_Tuple(self, o): + return tuple(self.build_List(o)) + + def build_Name(self, o): + if o.name == 'None': + return None + if o.name == 'True': + return True + if o.name == 'False': + return False + + # An undefinted Name + raise UnknownType('Undefined Name') + + def build_Add(self, o): + real, imag = map(self.build_Const, o.getChildren()) + try: + real = float(real) + except TypeError: + raise UnknownType('Add') + if not isinstance(imag, complex) or imag.real != 0.0: + raise UnknownType('Add') + return real+imag + + def build_Getattr(self, o): + parent = self.build(o.expr) + return getattr(parent, o.attrname) + + def build_UnarySub(self, o): + return -self.build_Const(o.getChildren()[0]) + + def build_UnaryAdd(self, o): + return self.build_Const(o.getChildren()[0]) + +def unrepr(s): + if not s: + return s + return Builder().build(getObj(s)) + +def _splitlines(instring): + """Split a string on lines, without losing line endings or truncating.""" + + +class ConfigObjError(SyntaxError): + """ + This is the base class for all errors that ConfigObj raises. + It is a subclass of SyntaxError. + """ + def __init__(self, message='', line_number=None, line=''): + self.line = line + self.line_number = line_number + self.message = message + SyntaxError.__init__(self, message) + +class NestingError(ConfigObjError): + """ + This error indicates a level of nesting that doesn't match. + """ + +class ParseError(ConfigObjError): + """ + This error indicates that a line is badly written. + It is neither a valid ``key = value`` line, + nor a valid section marker line. + """ + +class DuplicateError(ConfigObjError): + """ + The keyword or section specified already exists. + """ + +class ConfigspecError(ConfigObjError): + """ + An error occured whilst parsing a configspec. + """ + +class InterpolationError(ConfigObjError): + """Base class for the two interpolation errors.""" + +class InterpolationLoopError(InterpolationError): + """Maximum interpolation depth exceeded in string interpolation.""" + + def __init__(self, option): + InterpolationError.__init__( + self, + 'interpolation loop detected in value "%s".' % option) + +class RepeatSectionError(ConfigObjError): + """ + This error indicates additional sections in a section with a + ``__many__`` (repeated) section. + """ + +class MissingInterpolationOption(InterpolationError): + """A value specified for interpolation was missing.""" + + def __init__(self, option): + InterpolationError.__init__( + self, + 'missing option "%s" in interpolation.' % option) + +class UnreprError(ConfigObjError): + """An error parsing in unrepr mode.""" + + +class InterpolationEngine(object): + """ + A helper class to help perform string interpolation. + + This class is an abstract base class; its descendants perform + the actual work. + """ + + # compiled regexp to use in self.interpolate() + _KEYCRE = re.compile(r"%\(([^)]*)\)s") + + def __init__(self, section): + # the Section instance that "owns" this engine + self.section = section + + def interpolate(self, key, value): + def recursive_interpolate(key, value, section, backtrail): + """The function that does the actual work. + + ``value``: the string we're trying to interpolate. + ``section``: the section in which that string was found + ``backtrail``: a dict to keep track of where we've been, + to detect and prevent infinite recursion loops + + This is similar to a depth-first-search algorithm. + """ + # Have we been here already? + if backtrail.has_key((key, section.name)): + # Yes - infinite loop detected + raise InterpolationLoopError(key) + # Place a marker on our backtrail so we won't come back here again + backtrail[(key, section.name)] = 1 + + # Now start the actual work + match = self._KEYCRE.search(value) + while match: + # The actual parsing of the match is implementation-dependent, + # so delegate to our helper function + k, v, s = self._parse_match(match) + if k is None: + # That's the signal that no further interpolation is needed + replacement = v + else: + # Further interpolation may be needed to obtain final value + replacement = recursive_interpolate(k, v, s, backtrail) + # Replace the matched string with its final value + start, end = match.span() + value = ''.join((value[:start], replacement, value[end:])) + new_search_start = start + len(replacement) + # Pick up the next interpolation key, if any, for next time + # through the while loop + match = self._KEYCRE.search(value, new_search_start) + + # Now safe to come back here again; remove marker from backtrail + del backtrail[(key, section.name)] + + return value + + # Back in interpolate(), all we have to do is kick off the recursive + # function with appropriate starting values + value = recursive_interpolate(key, value, self.section, {}) + return value + + def _fetch(self, key): + """Helper function to fetch values from owning section. + + Returns a 2-tuple: the value, and the section where it was found. + """ + # switch off interpolation before we try and fetch anything ! + save_interp = self.section.main.interpolation + self.section.main.interpolation = False + + # Start at section that "owns" this InterpolationEngine + current_section = self.section + while True: + # try the current section first + val = current_section.get(key) + if val is not None: + break + # try "DEFAULT" next + val = current_section.get('DEFAULT', {}).get(key) + if val is not None: + break + # move up to parent and try again + # top-level's parent is itself + if current_section.parent is current_section: + # reached top level, time to give up + break + current_section = current_section.parent + + # restore interpolation to previous value before returning + self.section.main.interpolation = save_interp + if val is None: + raise MissingInterpolationOption(key) + return val, current_section + + def _parse_match(self, match): + """Implementation-dependent helper function. + + Will be passed a match object corresponding to the interpolation + key we just found (e.g., "%(foo)s" or "$foo"). Should look up that + key in the appropriate config file section (using the ``_fetch()`` + helper function) and return a 3-tuple: (key, value, section) + + ``key`` is the name of the key we're looking for + ``value`` is the value found for that key + ``section`` is a reference to the section where it was found + + ``key`` and ``section`` should be None if no further + interpolation should be performed on the resulting value + (e.g., if we interpolated "$$" and returned "$"). + """ + raise NotImplementedError + + +class ConfigParserInterpolation(InterpolationEngine): + """Behaves like ConfigParser.""" + _KEYCRE = re.compile(r"%\(([^)]*)\)s") + + def _parse_match(self, match): + key = match.group(1) + value, section = self._fetch(key) + return key, value, section + + +class TemplateInterpolation(InterpolationEngine): + """Behaves like string.Template.""" + _delimiter = '$' + _KEYCRE = re.compile(r""" + \$(?: + (?P<escaped>\$) | # Two $ signs + (?P<named>[_a-z][_a-z0-9]*) | # $name format + {(?P<braced>[^}]*)} # ${name} format + ) + """, re.IGNORECASE | re.VERBOSE) + + def _parse_match(self, match): + # Valid name (in or out of braces): fetch value from section + key = match.group('named') or match.group('braced') + if key is not None: + value, section = self._fetch(key) + return key, value, section + # Escaped delimiter (e.g., $$): return single delimiter + if match.group('escaped') is not None: + # Return None for key and section to indicate it's time to stop + return None, self._delimiter, None + # Anything else: ignore completely, just return it unchanged + return None, match.group(), None + +interpolation_engines = { + 'configparser': ConfigParserInterpolation, + 'template': TemplateInterpolation, +} + +class Section(dict): + """ + A dictionary-like object that represents a section in a config file. + + It does string interpolation if the 'interpolation' attribute + of the 'main' object is set to True. + + Interpolation is tried first from this object, then from the 'DEFAULT' + section of this object, next from the parent and its 'DEFAULT' section, + and so on until the main object is reached. + + A Section will behave like an ordered dictionary - following the + order of the ``scalars`` and ``sections`` attributes. + You can use this to change the order of members. + + Iteration follows the order: scalars, then sections. + """ + + def __init__(self, parent, depth, main, indict=None, name=None): + """ + * parent is the section above + * depth is the depth level of this section + * main is the main ConfigObj + * indict is a dictionary to initialise the section with + """ + if indict is None: + indict = {} + dict.__init__(self) + # used for nesting level *and* interpolation + self.parent = parent + # used for the interpolation attribute + self.main = main + # level of nesting depth of this Section + self.depth = depth + # the sequence of scalar values in this Section + self.scalars = [] + # the sequence of sections in this Section + self.sections = [] + # purely for information + self.name = name + # for comments :-) + self.comments = {} + self.inline_comments = {} + # for the configspec + self.configspec = {} + self._order = [] + self._configspec_comments = {} + self._configspec_inline_comments = {} + self._cs_section_comments = {} + self._cs_section_inline_comments = {} + # for defaults + self.defaults = [] + # + # we do this explicitly so that __setitem__ is used properly + # (rather than just passing to ``dict.__init__``) + for entry in indict: + self[entry] = indict[entry] + + def _interpolate(self, key, value): + try: + # do we already have an interpolation engine? + engine = self._interpolation_engine + except AttributeError: + # not yet: first time running _interpolate(), so pick the engine + name = self.main.interpolation + if name == True: # note that "if name:" would be incorrect here + # backwards-compatibility: interpolation=True means use default + name = DEFAULT_INTERPOLATION + name = name.lower() # so that "Template", "template", etc. all work + class_ = interpolation_engines.get(name, None) + if class_ is None: + # invalid value for self.main.interpolation + self.main.interpolation = False + return value + else: + # save reference to engine so we don't have to do this again + engine = self._interpolation_engine = class_(self) + # let the engine do the actual work + return engine.interpolate(key, value) + + def __getitem__(self, key): + """Fetch the item and do string interpolation.""" + val = dict.__getitem__(self, key) + if self.main.interpolation and isinstance(val, StringTypes): + return self._interpolate(key, val) + return val + + def __setitem__(self, key, value, unrepr=False): + """ + Correctly set a value. + + Making dictionary values Section instances. + (We have to special case 'Section' instances - which are also dicts) + + Keys must be strings. + Values need only be strings (or lists of strings) if + ``main.stringify`` is set. + + `unrepr`` must be set when setting a value to a dictionary, without + creating a new sub-section. + """ + if not isinstance(key, StringTypes): + raise ValueError, 'The key "%s" is not a string.' % key + # add the comment + if not self.comments.has_key(key): + self.comments[key] = [] + self.inline_comments[key] = '' + # remove the entry from defaults + if key in self.defaults: + self.defaults.remove(key) + # + if isinstance(value, Section): + if not self.has_key(key): + self.sections.append(key) + dict.__setitem__(self, key, value) + elif isinstance(value, dict) and not unrepr: + # First create the new depth level, + # then create the section + if not self.has_key(key): + self.sections.append(key) + new_depth = self.depth + 1 + dict.__setitem__( + self, + key, + Section( + self, + new_depth, + self.main, + indict=value, + name=key)) + else: + if not self.has_key(key): + self.scalars.append(key) + if not self.main.stringify: + if isinstance(value, StringTypes): + pass + elif isinstance(value, (list, tuple)): + for entry in value: + if not isinstance(entry, StringTypes): + raise TypeError, ( + 'Value is not a string "%s".' % entry) + else: + raise TypeError, 'Value is not a string "%s".' % value + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + """Remove items from the sequence when deleting.""" + dict. __delitem__(self, key) + if key in self.scalars: + self.scalars.remove(key) + else: + self.sections.remove(key) + del self.comments[key] + del self.inline_comments[key] + + def get(self, key, default=None): + """A version of ``get`` that doesn't bypass string interpolation.""" + try: + return self[key] + except KeyError: + return default + + def update(self, indict): + """ + A version of update that uses our ``__setitem__``. + """ + for entry in indict: + self[entry] = indict[entry] + + def pop(self, key, *args): + """ """ + val = dict.pop(self, key, *args) + if key in self.scalars: + del self.comments[key] + del self.inline_comments[key] + self.scalars.remove(key) + elif key in self.sections: + del self.comments[key] + del self.inline_comments[key] + self.sections.remove(key) + if self.main.interpolation and isinstance(val, StringTypes): + return self._interpolate(key, val) + return val + + def popitem(self): + """Pops the first (key,val)""" + sequence = (self.scalars + self.sections) + if not sequence: + raise KeyError, ": 'popitem(): dictionary is empty'" + key = sequence[0] + val = self[key] + del self[key] + return key, val + + def clear(self): + """ + A version of clear that also affects scalars/sections + Also clears comments and configspec. + + Leaves other attributes alone : + depth/main/parent are not affected + """ + dict.clear(self) + self.scalars = [] + self.sections = [] + self.comments = {} + self.inline_comments = {} + self.configspec = {} + + def setdefault(self, key, default=None): + """A version of setdefault that sets sequence if appropriate.""" + try: + return self[key] + except KeyError: + self[key] = default + return self[key] + + def items(self): + """ """ + return zip((self.scalars + self.sections), self.values()) + + def keys(self): + """ """ + return (self.scalars + self.sections) + + def values(self): + """ """ + return [self[key] for key in (self.scalars + self.sections)] + + def iteritems(self): + """ """ + return iter(self.items()) + + def iterkeys(self): + """ """ + return iter((self.scalars + self.sections)) + + __iter__ = iterkeys + + def itervalues(self): + """ """ + return iter(self.values()) + + def __repr__(self): + return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key]))) + for key in (self.scalars + self.sections)]) + + __str__ = __repr__ + + # Extra methods - not in a normal dictionary + + def dict(self): + """ + Return a deepcopy of self as a dictionary. + + All members that are ``Section`` instances are recursively turned to + ordinary dictionaries - by calling their ``dict`` method. + + >>> n = a.dict() + >>> n == a + 1 + >>> n is a + 0 + """ + newdict = {} + for entry in self: + this_entry = self[entry] + if isinstance(this_entry, Section): + this_entry = this_entry.dict() + elif isinstance(this_entry, list): + # create a copy rather than a reference + this_entry = list(this_entry) + elif isinstance(this_entry, tuple): + # create a copy rather than a reference + this_entry = tuple(this_entry) + newdict[entry] = this_entry + return newdict + + def merge(self, indict): + """ + A recursive update - useful for merging config files. + + >>> a = '''[section1] + ... option1 = True + ... [[subsection]] + ... more_options = False + ... # end of file'''.splitlines() + >>> b = '''# File is user.ini + ... [section1] + ... option1 = False + ... # end of file'''.splitlines() + >>> c1 = ConfigObj(b) + >>> c2 = ConfigObj(a) + >>> c2.merge(c1) + >>> c2 + {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}} + """ + for key, val in indict.items(): + if (key in self and isinstance(self[key], dict) and + isinstance(val, dict)): + self[key].merge(val) + else: + self[key] = val + + def rename(self, oldkey, newkey): + """ + Change a keyname to another, without changing position in sequence. + + Implemented so that transformations can be made on keys, + as well as on values. (used by encode and decode) + + Also renames comments. + """ + if oldkey in self.scalars: + the_list = self.scalars + elif oldkey in self.sections: + the_list = self.sections + else: + raise KeyError, 'Key "%s" not found.' % oldkey + pos = the_list.index(oldkey) + # + val = self[oldkey] + dict.__delitem__(self, oldkey) + dict.__setitem__(self, newkey, val) + the_list.remove(oldkey) + the_list.insert(pos, newkey) + comm = self.comments[oldkey] + inline_comment = self.inline_comments[oldkey] + del self.comments[oldkey] + del self.inline_comments[oldkey] + self.comments[newkey] = comm + self.inline_comments[newkey] = inline_comment + + def walk(self, function, raise_errors=True, + call_on_sections=False, **keywargs): + """ + Walk every member and call a function on the keyword and value. + + Return a dictionary of the return values + + If the function raises an exception, raise the errror + unless ``raise_errors=False``, in which case set the return value to + ``False``. + + Any unrecognised keyword arguments you pass to walk, will be pased on + to the function you pass in. + + Note: if ``call_on_sections`` is ``True`` then - on encountering a + subsection, *first* the function is called for the *whole* subsection, + and then recurses into it's members. This means your function must be + able to handle strings, dictionaries and lists. This allows you + to change the key of subsections as well as for ordinary members. The + return value when called on the whole subsection has to be discarded. + + See the encode and decode methods for examples, including functions. + + .. caution:: + + You can use ``walk`` to transform the names of members of a section + but you mustn't add or delete members. + + >>> config = '''[XXXXsection] + ... XXXXkey = XXXXvalue'''.splitlines() + >>> cfg = ConfigObj(config) + >>> cfg + {'XXXXsection': {'XXXXkey': 'XXXXvalue'}} + >>> def transform(section, key): + ... val = section[key] + ... newkey = key.replace('XXXX', 'CLIENT1') + ... section.rename(key, newkey) + ... if isinstance(val, (tuple, list, dict)): + ... pass + ... else: + ... val = val.replace('XXXX', 'CLIENT1') + ... section[newkey] = val + >>> cfg.walk(transform, call_on_sections=True) + {'CLIENT1section': {'CLIENT1key': None}} + >>> cfg + {'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}} + """ + out = {} + # scalars first + for i in range(len(self.scalars)): + entry = self.scalars[i] + try: + val = function(self, entry, **keywargs) + # bound again in case name has changed + entry = self.scalars[i] + out[entry] = val + except Exception: + if raise_errors: + raise + else: + entry = self.scalars[i] + out[entry] = False + # then sections + for i in range(len(self.sections)): + entry = self.sections[i] + if call_on_sections: + try: + function(self, entry, **keywargs) + except Exception: + if raise_errors: + raise + else: + entry = self.sections[i] + out[entry] = False + # bound again in case name has changed + entry = self.sections[i] + # previous result is discarded + out[entry] = self[entry].walk( + function, + raise_errors=raise_errors, + call_on_sections=call_on_sections, + **keywargs) + return out + + def decode(self, encoding): + """ + Decode all strings and values to unicode, using the specified encoding. + + Works with subsections and list values. + + Uses the ``walk`` method. + + Testing ``encode`` and ``decode``. + >>> m = ConfigObj(a) + >>> m.decode('ascii') + >>> def testuni(val): + ... for entry in val: + ... if not isinstance(entry, unicode): + ... print >> sys.stderr, type(entry) + ... raise AssertionError, 'decode failed.' + ... if isinstance(val[entry], dict): + ... testuni(val[entry]) + ... elif not isinstance(val[entry], unicode): + ... raise AssertionError, 'decode failed.' + >>> testuni(m) + >>> m.encode('ascii') + >>> a == m + 1 + """ + warn('use of ``decode`` is deprecated.', DeprecationWarning) + def decode(section, key, encoding=encoding, warn=True): + """ """ + val = section[key] + if isinstance(val, (list, tuple)): + newval = [] + for entry in val: + newval.append(entry.decode(encoding)) + elif isinstance(val, dict): + newval = val + else: + newval = val.decode(encoding) + newkey = key.decode(encoding) + section.rename(key, newkey) + section[newkey] = newval + # using ``call_on_sections`` allows us to modify section names + self.walk(decode, call_on_sections=True) + + def encode(self, encoding): + """ + Encode all strings and values from unicode, + using the specified encoding. + + Works with subsections and list values. + Uses the ``walk`` method. + """ + warn('use of ``encode`` is deprecated.', DeprecationWarning) + def encode(section, key, encoding=encoding): + """ """ + val = section[key] + if isinstance(val, (list, tuple)): + newval = [] + for entry in val: + newval.append(entry.encode(encoding)) + elif isinstance(val, dict): + newval = val + else: + newval = val.encode(encoding) + newkey = key.encode(encoding) + section.rename(key, newkey) + section[newkey] = newval + self.walk(encode, call_on_sections=True) + + def istrue(self, key): + """A deprecated version of ``as_bool``.""" + warn('use of ``istrue`` is deprecated. Use ``as_bool`` method ' + 'instead.', DeprecationWarning) + return self.as_bool(key) + + def as_bool(self, key): + """ + Accepts a key as input. The corresponding value must be a string or + the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to + retain compatibility with Python 2.2. + + If the string is one of ``True``, ``On``, ``Yes``, or ``1`` it returns + ``True``. + + If the string is one of ``False``, ``Off``, ``No``, or ``0`` it returns + ``False``. + + ``as_bool`` is not case sensitive. + + Any other input will raise a ``ValueError``. + + >>> a = ConfigObj() + >>> a['a'] = 'fish' + >>> a.as_bool('a') + Traceback (most recent call last): + ValueError: Value "fish" is neither True nor False + >>> a['b'] = 'True' + >>> a.as_bool('b') + 1 + >>> a['b'] = 'off' + >>> a.as_bool('b') + 0 + """ + val = self[key] + if val == True: + return True + elif val == False: + return False + else: + try: + if not isinstance(val, StringTypes): + raise KeyError + else: + return self.main._bools[val.lower()] + except KeyError: + raise ValueError('Value "%s" is neither True nor False' % val) + + def as_int(self, key): + """ + A convenience method which coerces the specified value to an integer. + + If the value is an invalid literal for ``int``, a ``ValueError`` will + be raised. + + >>> a = ConfigObj() + >>> a['a'] = 'fish' + >>> a.as_int('a') + Traceback (most recent call last): + ValueError: invalid literal for int(): fish + >>> a['b'] = '1' + >>> a.as_int('b') + 1 + >>> a['b'] = '3.2' + >>> a.as_int('b') + Traceback (most recent call last): + ValueError: invalid literal for int(): 3.2 + """ + return int(self[key]) + + def as_float(self, key): + """ + A convenience method which coerces the specified value to a float. + + If the value is an invalid literal for ``float``, a ``ValueError`` will + be raised. + + >>> a = ConfigObj() + >>> a['a'] = 'fish' + >>> a.as_float('a') + Traceback (most recent call last): + ValueError: invalid literal for float(): fish + >>> a['b'] = '1' + >>> a.as_float('b') + 1.0 + >>> a['b'] = '3.2' + >>> a.as_float('b') + 3.2000000000000002 + """ + return float(self[key]) + + +class ConfigObj(Section): + """An object to read, create, and write config files.""" + + _keyword = re.compile(r'''^ # line start + (\s*) # indentation + ( # keyword + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'"=].*?) # no quotes + ) + \s*=\s* # divider + (.*) # value (including list values and comments) + $ # line end + ''', + re.VERBOSE) + + _sectionmarker = re.compile(r'''^ + (\s*) # 1: indentation + ((?:\[\s*)+) # 2: section marker open + ( # 3: section name open + (?:"\s*\S.*?\s*")| # at least one non-space with double quotes + (?:'\s*\S.*?\s*')| # at least one non-space with single quotes + (?:[^'"\s].*?) # at least one non-space unquoted + ) # section name close + ((?:\s*\])+) # 4: section marker close + \s*(\#.*)? # 5: optional comment + $''', + re.VERBOSE) + + # this regexp pulls list values out as a single string + # or single values and comments + # FIXME: this regex adds a '' to the end of comma terminated lists + # workaround in ``_handle_value`` + _valueexp = re.compile(r'''^ + (?: + (?: + ( + (?: + (?: + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\#][^,\#]*?) # unquoted + ) + \s*,\s* # comma + )* # match all list items ending in a comma (if any) + ) + ( + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\#\s][^,]*?)| # unquoted + (?:(?<!,)) # Empty value + )? # last item in a list - or string value + )| + (,) # alternatively a single comma - empty list + ) + \s*(\#.*)? # optional comment + $''', + re.VERBOSE) + + # use findall to get the members of a list value + _listvalueexp = re.compile(r''' + ( + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'",\#].*?) # unquoted + ) + \s*,\s* # comma + ''', + re.VERBOSE) + + # this regexp is used for the value + # when lists are switched off + _nolistvalue = re.compile(r'''^ + ( + (?:".*?")| # double quotes + (?:'.*?')| # single quotes + (?:[^'"\#].*?)| # unquoted + (?:) # Empty value + ) + \s*(\#.*)? # optional comment + $''', + re.VERBOSE) + + # regexes for finding triple quoted values on one line + _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$") + _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$') + _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$") + _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$') + + _triple_quote = { + "'''": (_single_line_single, _multi_line_single), + '"""': (_single_line_double, _multi_line_double), + } + + # Used by the ``istrue`` Section method + _bools = { + 'yes': True, 'no': False, + 'on': True, 'off': False, + '1': True, '0': False, + 'true': True, 'false': False, + } + + def __init__(self, infile=None, options=None, **kwargs): + """ + Parse or create a config file object. + + ``ConfigObj(infile=None, options=None, **kwargs)`` + """ + if infile is None: + infile = [] + if options is None: + options = {} + else: + options = dict(options) + # keyword arguments take precedence over an options dictionary + options.update(kwargs) + # init the superclass + Section.__init__(self, self, 0, self) + # + defaults = OPTION_DEFAULTS.copy() + for entry in options.keys(): + if entry not in defaults.keys(): + raise TypeError, 'Unrecognised option "%s".' % entry + # TODO: check the values too. + # + # Add any explicit options to the defaults + defaults.update(options) + # + # initialise a few variables + self.filename = None + self._errors = [] + self.raise_errors = defaults['raise_errors'] + self.interpolation = defaults['interpolation'] + self.list_values = defaults['list_values'] + self.create_empty = defaults['create_empty'] + self.file_error = defaults['file_error'] + self.stringify = defaults['stringify'] + self.indent_type = defaults['indent_type'] + self.encoding = defaults['encoding'] + self.default_encoding = defaults['default_encoding'] + self.BOM = False + self.newlines = None + self.write_empty_values = defaults['write_empty_values'] + self.unrepr = defaults['unrepr'] + # + self.initial_comment = [] + self.final_comment = [] + # + self._terminated = False + # + if isinstance(infile, StringTypes): + self.filename = infile + if os.path.isfile(infile): + infile = open(infile).read() or [] + elif self.file_error: + # raise an error if the file doesn't exist + raise IOError, 'Config file not found: "%s".' % self.filename + else: + # file doesn't already exist + if self.create_empty: + # this is a good test that the filename specified + # isn't impossible - like on a non existent device + h = open(infile, 'w') + h.write('') + h.close() + infile = [] + elif isinstance(infile, (list, tuple)): + infile = list(infile) + elif isinstance(infile, dict): + # initialise self + # the Section class handles creating subsections + if isinstance(infile, ConfigObj): + # get a copy of our ConfigObj + infile = infile.dict() + for entry in infile: + self[entry] = infile[entry] + del self._errors + if defaults['configspec'] is not None: + self._handle_configspec(defaults['configspec']) + else: + self.configspec = None + return + elif hasattr(infile, 'read'): + # This supports file like objects + infile = infile.read() or [] + # needs splitting into lines - but needs doing *after* decoding + # in case it's not an 8 bit encoding + else: + raise TypeError, ('infile must be a filename,' + ' file like object, or list of lines.') + # + if infile: + # don't do it for the empty ConfigObj + infile = self._handle_bom(infile) + # infile is now *always* a list + # + # Set the newlines attribute (first line ending it finds) + # and strip trailing '\n' or '\r' from lines + for line in infile: + if (not line) or (line[-1] not in ('\r', '\n', '\r\n')): + continue + for end in ('\r\n', '\n', '\r'): + if line.endswith(end): + self.newlines = end + break + break + if infile[-1] and infile[-1] in ('\r', '\n', '\r\n'): + self._terminated = True + infile = [line.rstrip('\r\n') for line in infile] + # + self._parse(infile) + # if we had any errors, now is the time to raise them + if self._errors: + info = "at line %s." % self._errors[0].line_number + if len(self._errors) > 1: + msg = ("Parsing failed with several errors.\nFirst error %s" % + info) + error = ConfigObjError(msg) + else: + error = self._errors[0] + # set the errors attribute; it's a list of tuples: + # (error_type, message, line_number) + error.errors = self._errors + # set the config attribute + error.config = self + raise error + # delete private attributes + del self._errors + # + if defaults['configspec'] is None: + self.configspec = None + else: + self._handle_configspec(defaults['configspec']) + + def __repr__(self): + return 'ConfigObj({%s})' % ', '.join( + [('%s: %s' % (repr(key), repr(self[key]))) for key in + (self.scalars + self.sections)]) + + def _handle_bom(self, infile): + """ + Handle any BOM, and decode if necessary. + + If an encoding is specified, that *must* be used - but the BOM should + still be removed (and the BOM attribute set). + + (If the encoding is wrongly specified, then a BOM for an alternative + encoding won't be discovered or removed.) + + If an encoding is not specified, UTF8 or UTF16 BOM will be detected and + removed. The BOM attribute will be set. UTF16 will be decoded to + unicode. + + NOTE: This method must not be called with an empty ``infile``. + + Specifying the *wrong* encoding is likely to cause a + ``UnicodeDecodeError``. + + ``infile`` must always be returned as a list of lines, but may be + passed in as a single string. + """ + if ((self.encoding is not None) and + (self.encoding.lower() not in BOM_LIST)): + # No need to check for a BOM + # the encoding specified doesn't have one + # just decode + return self._decode(infile, self.encoding) + # + if isinstance(infile, (list, tuple)): + line = infile[0] + else: + line = infile + if self.encoding is not None: + # encoding explicitly supplied + # And it could have an associated BOM + # TODO: if encoding is just UTF16 - we ought to check for both + # TODO: big endian and little endian versions. + enc = BOM_LIST[self.encoding.lower()] + if enc == 'utf_16': + # For UTF16 we try big endian and little endian + for BOM, (encoding, final_encoding) in BOMS.items(): + if not final_encoding: + # skip UTF8 + continue + if infile.startswith(BOM): + ### BOM discovered + ##self.BOM = True + # Don't need to remove BOM + return self._decode(infile, encoding) + # + # If we get this far, will *probably* raise a DecodeError + # As it doesn't appear to start with a BOM + return self._decode(infile, self.encoding) + # + # Must be UTF8 + BOM = BOM_SET[enc] + if not line.startswith(BOM): + return self._decode(infile, self.encoding) + # + newline = line[len(BOM):] + # + # BOM removed + if isinstance(infile, (list, tuple)): + infile[0] = newline + else: + infile = newline + self.BOM = True + return self._decode(infile, self.encoding) + # + # No encoding specified - so we need to check for UTF8/UTF16 + for BOM, (encoding, final_encoding) in BOMS.items(): + if not line.startswith(BOM): + continue + else: + # BOM discovered + self.encoding = final_encoding + if not final_encoding: + self.BOM = True + # UTF8 + # remove BOM + newline = line[len(BOM):] + if isinstance(infile, (list, tuple)): + infile[0] = newline + else: + infile = newline + # UTF8 - don't decode + if isinstance(infile, StringTypes): + return infile.splitlines(True) + else: + return infile + # UTF16 - have to decode + return self._decode(infile, encoding) + # + # No BOM discovered and no encoding specified, just return + if isinstance(infile, StringTypes): + # infile read from a file will be a single string + return infile.splitlines(True) + else: + return infile + + def _a_to_u(self, aString): + """Decode ASCII strings to unicode if a self.encoding is specified.""" + if self.encoding: + return aString.decode('ascii') + else: + return aString + + def _decode(self, infile, encoding): + """ + Decode infile to unicode. Using the specified encoding. + + if is a string, it also needs converting to a list. + """ + if isinstance(infile, StringTypes): + # can't be unicode + # NOTE: Could raise a ``UnicodeDecodeError`` + return infile.decode(encoding).splitlines(True) + for i, line in enumerate(infile): + if not isinstance(line, unicode): + # NOTE: The isinstance test here handles mixed lists of unicode/string + # NOTE: But the decode will break on any non-string values + # NOTE: Or could raise a ``UnicodeDecodeError`` + infile[i] = line.decode(encoding) + return infile + + def _decode_element(self, line): + """Decode element to unicode if necessary.""" + if not self.encoding: + return line + if isinstance(line, str) and self.default_encoding: + return line.decode(self.default_encoding) + return line + + def _str(self, value): + """ + Used by ``stringify`` within validate, to turn non-string values + into strings. + """ + if not isinstance(value, StringTypes): + return str(value) + else: + return value + + def _parse(self, infile): + """Actually parse the config file.""" + temp_list_values = self.list_values + if self.unrepr: + self.list_values = False + comment_list = [] + done_start = False + this_section = self + maxline = len(infile) - 1 + cur_index = -1 + reset_comment = False + while cur_index < maxline: + if reset_comment: + comment_list = [] + cur_index += 1 + line = infile[cur_index] + sline = line.strip() + # do we have anything on the line ? + if not sline or sline.startswith('#'): + reset_comment = False + comment_list.append(line) + continue + if not done_start: + # preserve initial comment + self.initial_comment = comment_list + comment_list = [] + done_start = True + reset_comment = True + # first we check if it's a section marker + mat = self._sectionmarker.match(line) + if mat is not None: + # is a section line + (indent, sect_open, sect_name, sect_close, comment) = ( + mat.groups()) + if indent and (self.indent_type is None): + self.indent_type = indent + cur_depth = sect_open.count('[') + if cur_depth != sect_close.count(']'): + self._handle_error( + "Cannot compute the section depth at line %s.", + NestingError, infile, cur_index) + continue + # + if cur_depth < this_section.depth: + # the new section is dropping back to a previous level + try: + parent = self._match_depth( + this_section, + cur_depth).parent + except SyntaxError: + self._handle_error( + "Cannot compute nesting level at line %s.", + NestingError, infile, cur_index) + continue + elif cur_depth == this_section.depth: + # the new section is a sibling of the current section + parent = this_section.parent + elif cur_depth == this_section.depth + 1: + # the new section is a child the current section + parent = this_section + else: + self._handle_error( + "Section too nested at line %s.", + NestingError, infile, cur_index) + # + sect_name = self._unquote(sect_name) + if parent.has_key(sect_name): + self._handle_error( + 'Duplicate section name at line %s.', + DuplicateError, infile, cur_index) + continue + # create the new section + this_section = Section( + parent, + cur_depth, + self, + name=sect_name) + parent[sect_name] = this_section + parent.inline_comments[sect_name] = comment + parent.comments[sect_name] = comment_list + continue + # + # it's not a section marker, + # so it should be a valid ``key = value`` line + mat = self._keyword.match(line) + if mat is None: + # it neither matched as a keyword + # or a section marker + self._handle_error( + 'Invalid line at line "%s".', + ParseError, infile, cur_index) + else: + # is a keyword value + # value will include any inline comment + (indent, key, value) = mat.groups() + if indent and (self.indent_type is None): + self.indent_type = indent + # check for a multiline value + if value[:3] in ['"""', "'''"]: + try: + (value, comment, cur_index) = self._multiline( + value, infile, cur_index, maxline) + except SyntaxError: + self._handle_error( + 'Parse error in value at line %s.', + ParseError, infile, cur_index) + continue + else: + if self.unrepr: + comment = '' + try: + value = unrepr(value) + except Exception, e: + if type(e) == UnknownType: + msg = 'Unknown name or type in value at line %s.' + else: + msg = 'Parse error in value at line %s.' + self._handle_error(msg, UnreprError, infile, + cur_index) + continue + else: + if self.unrepr: + comment = '' + try: + value = unrepr(value) + except Exception, e: + if isinstance(e, UnknownType): + msg = 'Unknown name or type in value at line %s.' + else: + msg = 'Parse error in value at line %s.' + self._handle_error(msg, UnreprError, infile, + cur_index) + continue + else: + # extract comment and lists + try: + (value, comment) = self._handle_value(value) + except SyntaxError: + self._handle_error( + 'Parse error in value at line %s.', + ParseError, infile, cur_index) + continue + # + key = self._unquote(key) + if this_section.has_key(key): + self._handle_error( + 'Duplicate keyword name at line %s.', + DuplicateError, infile, cur_index) + continue + # add the key. + # we set unrepr because if we have got this far we will never + # be creating a new section + this_section.__setitem__(key, value, unrepr=True) + this_section.inline_comments[key] = comment + this_section.comments[key] = comment_list + continue + # + if self.indent_type is None: + # no indentation used, set the type accordingly + self.indent_type = '' + # + if self._terminated: + comment_list.append('') + # preserve the final comment + if not self and not self.initial_comment: + self.initial_comment = comment_list + elif not reset_comment: + self.final_comment = comment_list + self.list_values = temp_list_values + + def _match_depth(self, sect, depth): + """ + Given a section and a depth level, walk back through the sections + parents to see if the depth level matches a previous section. + + Return a reference to the right section, + or raise a SyntaxError. + """ + while depth < sect.depth: + if sect is sect.parent: + # we've reached the top level already + raise SyntaxError + sect = sect.parent + if sect.depth == depth: + return sect + # shouldn't get here + raise SyntaxError + + def _handle_error(self, text, ErrorClass, infile, cur_index): + """ + Handle an error according to the error settings. + + Either raise the error or store it. + The error will have occured at ``cur_index`` + """ + line = infile[cur_index] + cur_index += 1 + message = text % cur_index + error = ErrorClass(message, cur_index, line) + if self.raise_errors: + # raise the error - parsing stops here + raise error + # store the error + # reraise when parsing has finished + self._errors.append(error) + + def _unquote(self, value): + """Return an unquoted version of a value""" + if (value[0] == value[-1]) and (value[0] in ('"', "'")): + value = value[1:-1] + return value + + def _quote(self, value, multiline=True): + """ + Return a safely quoted version of a value. + + Raise a ConfigObjError if the value cannot be safely quoted. + If multiline is ``True`` (default) then use triple quotes + if necessary. + + Don't quote values that don't need it. + Recursively quote members of a list and return a comma joined list. + Multiline is ``False`` for... [truncated message content] |
From: <ds...@us...> - 2007-11-08 00:25:00
|
Revision: 4156 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4156&view=rev Author: dsdale Date: 2007-11-07 16:24:57 -0800 (Wed, 07 Nov 2007) Log Message: ----------- update enthought package to version 2.6b1, stripped of setuptools installed in site-packages, if not already present Modified Paths: -------------- trunk/matplotlib/examples/rc_traits.py trunk/matplotlib/lib/matplotlib/__init__.py trunk/matplotlib/setupext.py Added Paths: ----------- trunk/matplotlib/lib/enthought/ trunk/matplotlib/lib/enthought/__init__.py trunk/matplotlib/lib/enthought/etsconfig/ trunk/matplotlib/lib/enthought/etsconfig/__init__.py trunk/matplotlib/lib/enthought/etsconfig/api.py trunk/matplotlib/lib/enthought/etsconfig/etsconfig.py trunk/matplotlib/lib/enthought/etsconfig/tests/ trunk/matplotlib/lib/enthought/etsconfig/tests/etsconfig_test_case.py trunk/matplotlib/lib/enthought/etsconfig/version.py trunk/matplotlib/lib/enthought/traits/ trunk/matplotlib/lib/enthought/traits/MANIFEST.in trunk/matplotlib/lib/enthought/traits/README.txt trunk/matplotlib/lib/enthought/traits/__init__.py trunk/matplotlib/lib/enthought/traits/__init__.pyc trunk/matplotlib/lib/enthought/traits/api.py trunk/matplotlib/lib/enthought/traits/category.py trunk/matplotlib/lib/enthought/traits/core.py trunk/matplotlib/lib/enthought/traits/core_traits.py trunk/matplotlib/lib/enthought/traits/ctraits.c trunk/matplotlib/lib/enthought/traits/has_dynamic_views.py trunk/matplotlib/lib/enthought/traits/has_traits.py trunk/matplotlib/lib/enthought/traits/images/ trunk/matplotlib/lib/enthought/traits/images/list_editor.gif trunk/matplotlib/lib/enthought/traits/info_traits.py trunk/matplotlib/lib/enthought/traits/plugins/ trunk/matplotlib/lib/enthought/traits/plugins/enthought_traits_vet.py trunk/matplotlib/lib/enthought/traits/standard.py trunk/matplotlib/lib/enthought/traits/tests/ trunk/matplotlib/lib/enthought/traits/tests/__init__.py trunk/matplotlib/lib/enthought/traits/tests/array_test_case.py trunk/matplotlib/lib/enthought/traits/tests/category_test_case.py trunk/matplotlib/lib/enthought/traits/tests/clone_test_case.py trunk/matplotlib/lib/enthought/traits/tests/container_events_test_case.py trunk/matplotlib/lib/enthought/traits/tests/delegate_test_case.py trunk/matplotlib/lib/enthought/traits/tests/images/ trunk/matplotlib/lib/enthought/traits/tests/images/bottom_left_origin.gif trunk/matplotlib/lib/enthought/traits/tests/images/bottom_right_origin.gif trunk/matplotlib/lib/enthought/traits/tests/images/top_left_origin.gif trunk/matplotlib/lib/enthought/traits/tests/images/top_right_origin.gif trunk/matplotlib/lib/enthought/traits/tests/keyword_args_test_case.py trunk/matplotlib/lib/enthought/traits/tests/list_test_case.py trunk/matplotlib/lib/enthought/traits/tests/other.py trunk/matplotlib/lib/enthought/traits/tests/range_test_case.py trunk/matplotlib/lib/enthought/traits/tests/regression/ trunk/matplotlib/lib/enthought/traits/tests/regression/__init__.py trunk/matplotlib/lib/enthought/traits/tests/regression/pickle_validated_dict_test_case.py trunk/matplotlib/lib/enthought/traits/tests/rich_compare_test_case.py trunk/matplotlib/lib/enthought/traits/tests/simple.py trunk/matplotlib/lib/enthought/traits/tests/simple_test_case.py trunk/matplotlib/lib/enthought/traits/tests/test_copy_traits.py trunk/matplotlib/lib/enthought/traits/tests/test_copyable_trait_names.py trunk/matplotlib/lib/enthought/traits/tests/test_event_order.py trunk/matplotlib/lib/enthought/traits/tests/test_events.py trunk/matplotlib/lib/enthought/traits/tests/test_listeners.py trunk/matplotlib/lib/enthought/traits/tests/test_property_notifications.py trunk/matplotlib/lib/enthought/traits/tests/test_str_handler.py trunk/matplotlib/lib/enthought/traits/tests/test_timing.py trunk/matplotlib/lib/enthought/traits/tests/test_trait_cycle.py trunk/matplotlib/lib/enthought/traits/tests/test_traits.py trunk/matplotlib/lib/enthought/traits/tests/test_ui.py trunk/matplotlib/lib/enthought/traits/tests/test_ui3.py trunk/matplotlib/lib/enthought/traits/tests/test_ui4.py trunk/matplotlib/lib/enthought/traits/tests/test_ui5.py trunk/matplotlib/lib/enthought/traits/tests/undefined_test_case.py trunk/matplotlib/lib/enthought/traits/trait_base.py trunk/matplotlib/lib/enthought/traits/trait_db.py trunk/matplotlib/lib/enthought/traits/trait_errors.py trunk/matplotlib/lib/enthought/traits/trait_handlers.py trunk/matplotlib/lib/enthought/traits/trait_notifiers.py trunk/matplotlib/lib/enthought/traits/trait_numeric.py trunk/matplotlib/lib/enthought/traits/traits.py trunk/matplotlib/lib/enthought/traits/ui/ trunk/matplotlib/lib/enthought/traits/ui/__init__.py trunk/matplotlib/lib/enthought/traits/ui/__init__.pyc trunk/matplotlib/lib/enthought/traits/ui/api.py trunk/matplotlib/lib/enthought/traits/ui/delegating_handler.py trunk/matplotlib/lib/enthought/traits/ui/dockable_view_element.py trunk/matplotlib/lib/enthought/traits/ui/editor.py trunk/matplotlib/lib/enthought/traits/ui/editor_factory.py trunk/matplotlib/lib/enthought/traits/ui/editors.py trunk/matplotlib/lib/enthought/traits/ui/extras/ trunk/matplotlib/lib/enthought/traits/ui/extras/__init__.py trunk/matplotlib/lib/enthought/traits/ui/extras/checkbox_column.py trunk/matplotlib/lib/enthought/traits/ui/extras/core.py trunk/matplotlib/lib/enthought/traits/ui/group.py trunk/matplotlib/lib/enthought/traits/ui/handler.py trunk/matplotlib/lib/enthought/traits/ui/help.py trunk/matplotlib/lib/enthought/traits/ui/help_template.py trunk/matplotlib/lib/enthought/traits/ui/helper.py trunk/matplotlib/lib/enthought/traits/ui/images/ trunk/matplotlib/lib/enthought/traits/ui/images/array_node.png trunk/matplotlib/lib/enthought/traits/ui/images/bool_node.png trunk/matplotlib/lib/enthought/traits/ui/images/complex_node.png trunk/matplotlib/lib/enthought/traits/ui/images/dict_node.png trunk/matplotlib/lib/enthought/traits/ui/images/float_node.png trunk/matplotlib/lib/enthought/traits/ui/images/int_node.png trunk/matplotlib/lib/enthought/traits/ui/images/list_node.png trunk/matplotlib/lib/enthought/traits/ui/images/none_node.png trunk/matplotlib/lib/enthought/traits/ui/images/object_node.png trunk/matplotlib/lib/enthought/traits/ui/images/other_node.png trunk/matplotlib/lib/enthought/traits/ui/images/string_node.png trunk/matplotlib/lib/enthought/traits/ui/images/traits_node.png trunk/matplotlib/lib/enthought/traits/ui/images/tuple_node.png trunk/matplotlib/lib/enthought/traits/ui/include.py trunk/matplotlib/lib/enthought/traits/ui/instance_choice.py trunk/matplotlib/lib/enthought/traits/ui/item.py trunk/matplotlib/lib/enthought/traits/ui/key_bindings.py trunk/matplotlib/lib/enthought/traits/ui/menu.py trunk/matplotlib/lib/enthought/traits/ui/message.py trunk/matplotlib/lib/enthought/traits/ui/null/ trunk/matplotlib/lib/enthought/traits/ui/null/__init__.py trunk/matplotlib/lib/enthought/traits/ui/null/color_trait.py trunk/matplotlib/lib/enthought/traits/ui/null/font_trait.py trunk/matplotlib/lib/enthought/traits/ui/null/rgb_color_trait.py trunk/matplotlib/lib/enthought/traits/ui/null/toolkit.py trunk/matplotlib/lib/enthought/traits/ui/table_column.py trunk/matplotlib/lib/enthought/traits/ui/table_filter.py trunk/matplotlib/lib/enthought/traits/ui/tests/ trunk/matplotlib/lib/enthought/traits/ui/tests/array_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/buttons_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/check_list_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/check_list_editor_test2.py trunk/matplotlib/lib/enthought/traits/ui/tests/code_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/enum_dynamic_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/html_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/instance_drag_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/instance_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/instance_editor_test2.py trunk/matplotlib/lib/enthought/traits/ui/tests/instance_editor_test3.py trunk/matplotlib/lib/enthought/traits/ui/tests/instance_editor_test4.py trunk/matplotlib/lib/enthought/traits/ui/tests/instance_editor_test5.py trunk/matplotlib/lib/enthought/traits/ui/tests/instance_editor_test6.py trunk/matplotlib/lib/enthought/traits/ui/tests/list_traits_ui_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/set_dynamic_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/shell_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/table_editor_color_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/table_editor_focus_bug.py trunk/matplotlib/lib/enthought/traits/ui/tests/table_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/table_editor_test2.py trunk/matplotlib/lib/enthought/traits/ui/tests/table_list_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tests/tree_editor_test.py trunk/matplotlib/lib/enthought/traits/ui/tk/ trunk/matplotlib/lib/enthought/traits/ui/tk/__init__.py trunk/matplotlib/lib/enthought/traits/ui/tk/boolean_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/button_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/check_list_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/color_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/compound_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/constants.py trunk/matplotlib/lib/enthought/traits/ui/tk/directory_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/editor_factory.py trunk/matplotlib/lib/enthought/traits/ui/tk/enum_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/file_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/font_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/helper.py trunk/matplotlib/lib/enthought/traits/ui/tk/image_control.py trunk/matplotlib/lib/enthought/traits/ui/tk/image_enum_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/instance_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/list_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/menu.py trunk/matplotlib/lib/enthought/traits/ui/tk/range_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/rgb_color_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/text_editor.py trunk/matplotlib/lib/enthought/traits/ui/tk/toolkit.py trunk/matplotlib/lib/enthought/traits/ui/tk/ui_modal.py trunk/matplotlib/lib/enthought/traits/ui/tk/ui_nonmodal.py trunk/matplotlib/lib/enthought/traits/ui/tk/ui_panel.py trunk/matplotlib/lib/enthought/traits/ui/tk/ui_wizard.py trunk/matplotlib/lib/enthought/traits/ui/tk/view_application.py trunk/matplotlib/lib/enthought/traits/ui/toolkit.py trunk/matplotlib/lib/enthought/traits/ui/traits.py trunk/matplotlib/lib/enthought/traits/ui/tree_node.py trunk/matplotlib/lib/enthought/traits/ui/tuidb.py trunk/matplotlib/lib/enthought/traits/ui/ui.py trunk/matplotlib/lib/enthought/traits/ui/ui_info.py trunk/matplotlib/lib/enthought/traits/ui/ui_traits.py trunk/matplotlib/lib/enthought/traits/ui/undo.py trunk/matplotlib/lib/enthought/traits/ui/value_tree.py trunk/matplotlib/lib/enthought/traits/ui/view.py trunk/matplotlib/lib/enthought/traits/ui/view_element.py trunk/matplotlib/lib/enthought/traits/ui/view_elements.py trunk/matplotlib/lib/enthought/traits/version.py Removed Paths: ------------- trunk/matplotlib/lib/matplotlib/enthought/ Modified: trunk/matplotlib/examples/rc_traits.py =================================================================== --- trunk/matplotlib/examples/rc_traits.py 2007-11-08 00:09:17 UTC (rev 4155) +++ trunk/matplotlib/examples/rc_traits.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -7,7 +7,7 @@ # below. import sys, os, re -import matplotlib.enthought.traits.api as traits +import enthought.traits.api as traits from matplotlib.cbook import is_string_like from matplotlib.artist import Artist Added: trunk/matplotlib/lib/enthought/__init__.py =================================================================== --- trunk/matplotlib/lib/enthought/__init__.py (rev 0) +++ trunk/matplotlib/lib/enthought/__init__.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,11 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2007 by Enthought, Inc. +# All rights reserved. +#------------------------------------------------------------------------------ + +try: + pass +except: + pass + + Added: trunk/matplotlib/lib/enthought/etsconfig/__init__.py =================================================================== Added: trunk/matplotlib/lib/enthought/etsconfig/api.py =================================================================== --- trunk/matplotlib/lib/enthought/etsconfig/api.py (rev 0) +++ trunk/matplotlib/lib/enthought/etsconfig/api.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,4 @@ +from enthought.etsconfig.version import version, version as __version__ + + +from etsconfig import ETSConfig Added: trunk/matplotlib/lib/enthought/etsconfig/etsconfig.py =================================================================== --- trunk/matplotlib/lib/enthought/etsconfig/etsconfig.py (rev 0) +++ trunk/matplotlib/lib/enthought/etsconfig/etsconfig.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,385 @@ +""" Enthought Tool Suite configuration information. """ + + +# Standard library imports. +import sys +import os +from os import path + + +class ETSConfig(object): + """ + Enthought Tool Suite configuration information. + + This class should not use ANY other package in the tool suite so that it + will always work no matter which other packages are present. + + """ + + ########################################################################### + # 'object' interface. + ########################################################################### + + #### operator methods ##################################################### + + def __init__(self): + """ + Constructor. + + Note that this constructor can only ever be called from within this + module, since we don't expose the class. + + """ + + # Shadow attributes for properties. + self._application_data = None + self._application_home = None + self._company = None + self._toolkit = None + self._user_data = None + + return + + + ########################################################################### + # 'ETSConfig' interface. + ########################################################################### + + #### properties ########################################################### + + def _get_application_data(self): + """ + Property getter. + + This is a directory that applications and packages can safely write + non-user accessible data to i.e. configuration information, preferences + etc. + + Do not put anything in here that the user might want to navigate to + e.g. projects, user data files etc. + + The actual location differs between operating systems. + + """ + + if self._application_data is None: + self._application_data = self._initialize_application_data() + + return self._application_data + + + def _set_application_data(self, application_data): + """ + Property setter. + + """ + + self._application_data = application_data + + return + + + application_data = property(_get_application_data, _set_application_data) + + + def _get_application_home(self): + """ + Property getter. + + This is a directory named after the current, running application that + imported this module that applications and packages can safely write + non-user accessible data to i.e. configuration information, preferences + etc. It is a sub-directory of self.application_data, named after the + directory that contains the "main" python script that started the + process. For example, if application foo is started with a script named + "run.py" in a directory named "foo", then the application home would be: + <ETSConfig.application_data>/foo, regardless of if it was launched + with "python <path_to_foo>/run.py" or "cd <path_to_foo>; python run.py" + + This is useful for library modules used in apps that need to store + state, preferences, etc. for the specific app only, and not for all apps + which use that library module. If the library module uses + ETSConfig.application_home, they can store prefs for the app all in + one place and do not need to know the details of where each app might + reside. + + Do not put anything in here that the user might want to navigate to + e.g. projects, user home files etc. + + The actual location differs between operating systems. + + """ + + if self._application_home is None: + self._application_home = path.join(self.application_data, + self._get_application_dirname()) + + return self._application_home + + + def _set_application_home(self, application_home): + """ + Property setter. + + """ + + self._application_home = application_home + + return + + + application_home = property(_get_application_home, _set_application_home) + + + def _get_company(self): + """ + Property getter. + + """ + + if self._company is None: + self._company = self._initialize_company() + + return self._company + + + def _set_company(self, company): + """ + Property setter for the company name. + + """ + + self._company = company + + return + + + company = property(_get_company, _set_company) + + + def _get_toolkit(self): + """ + Property getter for the GUI toolkit. The value returned is, in order + of preference: the value set by the application; the value passed on + the command line using the '-toolkit' option; the value specified by + the 'ETS_TOOLKIT' environment variable; otherwise the empty string. + + """ + + if self._toolkit is None: + self._toolkit = self._initialize_toolkit() + + return self._toolkit + + + def _set_toolkit(self, toolkit): + """ + Property setter for the GUI toolkit. The toolkit can be set more than + once, but only if it is the same one each time. An application that is + written for a particular toolkit can explicitly set it before any other + module that gets the value is imported. + + """ + + if self._toolkit and self._toolkit != toolkit: + raise ValueError, "cannot set toolkit to %s because it has already been set to %s" % (toolkit, self._toolkit) + + self._toolkit = toolkit + + return + + + toolkit = property(_get_toolkit, _set_toolkit) + + + def _get_user_data(self): + """ + Property getter. + + This is a directory that users can safely write user accessible data + to i.e. user-defined functions, edited functions, etc. + + The actual location differs between operating systems. + + """ + + if self._user_data is None: + self._user_data = self._initialize_user_data() + + return self._user_data + + + def _set_user_data(self, user_data): + """ + Property setter. + + """ + + self._user_data = user_data + + return + + + user_data = property(_get_user_data, _set_user_data) + + + #### private methods ##################################################### + + # fixme: In future, these methods could allow the properties to be set + # via the (as yet non-existent) preference/configuration mechanism. This + # would allow configuration via (in order of precedence):- + # + # - a configuration file + # - environment variables + # - the command line + + def _get_application_dirname(self): + """ + Return the name of the directory (not a path) that the "main" + Python script which started this process resides in, or "" if it could + not be determined or is not appropriate. + + For example, if the script that started the current process was named + "run.py" in a directory named "foo", and was launched with "python + run.py", the name "foo" would be returned (this assumes the directory + name is the name of the app, which seems to be as good of an assumption + as any). + + """ + + dirname = "" + + main_mod = sys.modules.get('__main__', None) + if main_mod is not None: + if hasattr(main_mod, '__file__'): + main_mod_file = path.abspath(main_mod.__file__) + dirname = path.basename(path.dirname(main_mod_file)) + + return dirname + + + def _initialize_application_data(self): + """ + Initializes the (default) application data directory. + + """ + + if sys.platform == 'win32': + environment_variable = 'APPDATA' + directory_name = self.company + + else: + environment_variable = 'HOME' + directory_name = '.' + self.company.lower() + + # Lookup the environment variable. + parent_directory = os.environ.get(environment_variable, None) + if parent_directory is None: + raise ValueError( + 'Environment variable "%s" not set' % environment_variable + ) + + application_data = os.path.join(parent_directory, directory_name) + + # If a file already exists with this name then make sure that it is + # a directory! + if os.path.exists(application_data): + if not os.path.isdir(application_data): + raise ValueError('File "%s" already exists' % application_data) + + # Otherwise, create the directory. + else: + os.makedirs(application_data) + + return application_data + + + def _initialize_company(self): + """ + Initializes the (default) company. + + """ + + return 'Enthought' + + + def _initialize_toolkit(self): + """ + Initializes the toolkit. + + """ + + # We handle the command line option even though it doesn't have the + # highest precedence because we always want to remove it from the + # command line. + if '-toolkit' in sys.argv: + opt_idx = sys.argv.index('-toolkit') + + try: + opt_toolkit = sys.argv[opt_idx + 1] + except IndexError: + raise ValueError, "the -toolkit command line argument must be followed by a toolkit name" + + # Remove the option. + del sys.argv[opt_idx:opt_idx + 1] + else: + opt_toolkit = None + + if self._toolkit is not None: + toolkit = self._toolkit + elif opt_toolkit is not None: + toolkit = opt_toolkit + else: + toolkit = os.environ.get('ETS_TOOLKIT', '') + + return toolkit + + + def _initialize_user_data(self): + """ + Initializes the (default) user data directory. + + """ + + # We check what the os.path.expanduser returns + parent_directory = os.path.expanduser('~') + directory_name = self.company + + + if sys.platform == 'win32': + # Check if the usr_dir is C:\\John Doe\\Documents and Settings. + # If yes, then we should modify the usr_dir to be 'My Documents'. + # If no, then the user must have modified the os.environ + # variables and the directory chosen is a desirable one. + desired_dir = os.path.join(parent_directory, 'My Documents') + + if os.path.exists(desired_dir): + parent_directory = desired_dir + + else: + directory_name = directory_name.lower() + + # The final directory. + usr_dir = os.path.join(parent_directory, directory_name) + + # If a file already exists with this name then make sure that it is + # a directory! + if os.path.exists(usr_dir): + if not os.path.isdir(usr_dir): + raise ValueError('File "%s" already exists' % usr_dir) + + # Otherwise, create the directory. + else: + os.makedirs(usr_dir) + + return usr_dir + + + +# We very purposefully only have one object and do not export the class. We +# could have just made everything class methods, but that always seems a bit +# gorpy, especially with properties etc. +ETSConfig = ETSConfig() + + +#### EOF ###################################################################### Added: trunk/matplotlib/lib/enthought/etsconfig/tests/etsconfig_test_case.py =================================================================== --- trunk/matplotlib/lib/enthought/etsconfig/tests/etsconfig_test_case.py (rev 0) +++ trunk/matplotlib/lib/enthought/etsconfig/tests/etsconfig_test_case.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,241 @@ +""" Tests the 'ETSConfig' configuration object. """ + + +# Standard library imports. +import os, time, unittest + +# Enthought library imports. +from enthought.etsconfig.api import ETSConfig + + +class ETSConfigTestCase(unittest.TestCase): + """ Tests the 'ETSConfig' configuration object. """ + + ########################################################################### + # 'TestCase' interface. + ########################################################################### + + #### public methods ####################################################### + + def setUp(self): + """ + Prepares the test fixture before each test method is called. + + """ + + return + + def tearDown(self): + """ + Called immediately after each test method has been called. + + """ + + return + + + ########################################################################### + # 'ETSConfigTestCase' interface. + ########################################################################### + + #### public methods ####################################################### + + def test_application_data(self): + """ + application data + + """ + + dirname = ETSConfig.application_data + + self.assertEqual(os.path.exists(dirname), True) + self.assertEqual(os.path.isdir(dirname), True) + + return + + def test_set_application_data(self): + """ + set application data + + """ + + old = ETSConfig.application_data + + ETSConfig.application_data = 'foo' + self.assertEqual('foo', ETSConfig.application_data) + + ETSConfig.application_data = old + self.assertEqual(old, ETSConfig.application_data) + + return + + + def test_application_data_is_idempotent(self): + """ + application data is idempotent + + """ + + # Just do the previous test again! + self.test_application_data() + + return + + + def test_write_to_application_data_directory(self): + """ + write to application data directory + + """ + + ETSConfig.company = 'Blah' + dirname = ETSConfig.application_data + + path = os.path.join(dirname, 'dummy.txt') + data = str(time.time()) + + f = file(path, 'w') + f.write(data) + f.close() + + self.assertEqual(os.path.exists(path), True) + + f = file(path) + result = f.read() + f.close() + + os.remove(path) + + self.assertEqual(data, result) + + return + + + def test_default_company(self): + """ + default company + + """ + + self.assertEqual(ETSConfig.company, 'Enthought') + + return + + + def test_set_company(self): + """ + set company + + """ + + old = ETSConfig.company + + ETSConfig.company = 'foo' + self.assertEqual('foo', ETSConfig.company) + + ETSConfig.company = old + self.assertEqual(old, ETSConfig.company) + + return + + + def _test_default_application_home(self): + """ + application home + + """ + + # This test is only valid when run with the 'main' at the end of this + # file: "python app_dat_locator_test_case.py", in which case the + # app_name will be the directory this file is in ('tests'). + app_home = ETSConfig.application_home + (dirname, app_name) = os.path.split(app_home) + + self.assertEqual(dirname, ETSConfig.application_data) + self.assertEqual(app_name, 'tests') + + + def test_user_data(self): + """ + user data + + """ + + dirname = ETSConfig.user_data + + self.assertEqual(os.path.exists(dirname), True) + self.assertEqual(os.path.isdir(dirname), True) + + return + + + def test_set_user_data(self): + """ + set user data + + """ + + old = ETSConfig.user_data + + ETSConfig.user_data = 'foo' + self.assertEqual('foo', ETSConfig.user_data) + + ETSConfig.user_data = old + self.assertEqual(old, ETSConfig.user_data) + + return + + + def test_user_data_is_idempotent(self): + """ + user data is idempotent + + """ + + # Just do the previous test again! + self.test_user_data() + + return + + + def test_write_to_user_data_directory(self): + """ + write to user data directory + + """ + + ETSConfig.company = 'Blah' + dirname = ETSConfig.user_data + + path = os.path.join(dirname, 'dummy.txt') + data = str(time.time()) + + f = file(path, 'w') + f.write(data) + f.close() + + self.assertEqual(os.path.exists(path), True) + + f = file(path) + result = f.read() + f.close() + + os.remove(path) + + self.assertEqual(data, result) + + return + + +# For running as an individual set of tests. +if __name__ == '__main__': + + # Add the non-default test of application_home...non-default because it must + # be run using this module as a script to be valid. + suite = unittest.TestLoader().loadTestsFromTestCase(ETSConfigTestCase) + suite.addTest(ETSConfigTestCase('_test_default_application_home')) + + unittest.TextTestRunner(verbosity=2).run(suite) + + +#### EOF ###################################################################### Added: trunk/matplotlib/lib/enthought/etsconfig/version.py =================================================================== --- trunk/matplotlib/lib/enthought/etsconfig/version.py (rev 0) +++ trunk/matplotlib/lib/enthought/etsconfig/version.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,10 @@ +# Wrapped in a try/except in those situations where someone hasn't installed +# as an egg. What do we do then? For now, we just punt since we don't want +# to define the version number in two places. +#try: +# import pkg_resources +# version = pkg_resources.require('enthought.etsconfig')[0].version +#except: +# version = '' +version = '2.6b1-mpl' + Added: trunk/matplotlib/lib/enthought/traits/MANIFEST.in =================================================================== --- trunk/matplotlib/lib/enthought/traits/MANIFEST.in (rev 0) +++ trunk/matplotlib/lib/enthought/traits/MANIFEST.in 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1 @@ +exclude *_test*.py Added: trunk/matplotlib/lib/enthought/traits/README.txt =================================================================== --- trunk/matplotlib/lib/enthought/traits/README.txt (rev 0) +++ trunk/matplotlib/lib/enthought/traits/README.txt 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,67 @@ +Introduction +------------ + +'Traits' is a Python package for creating 'manifestly'-typed Python attributes. + +Installation +------------ + +The Traits package is installed using the standard Python 'distutils' package. + +Enter the following command in the 'traits-1.0' directory: + + python setup.py install + +This will perform a normal install of the Traits package into your Python +installation. Refer to the Python 'distutils' documentation for more +installation options. + +Download +-------- + +The Traits package is available as part of the Enthought Tool Suite (ETS), +available from: + + http://code.enthought.com/ets/ + +To install ETS using Enthought's egg-based 'Enstaller', download and run: + + http://code.enthought.com/enstaller/run_enstaller.py + +License +------- + +The 'traits' package is available under a BSD style license. + +Contact +------- + +If you encounter any problems using the 'traits' package, or have any comments +or suggestions about the package, please contact the author: + + David C. Morrill + dmo...@en... + +For discussion of the Traits package, as well as other tools in the Enthought +Tool Suite, use the enthought-dev mailing list: + + https://mail.enthought.com/mailman/listinfo/enthought-dev + + http://dir.gmane.org/gmane.comp.python.enthought.devel + +Prerequisites +------------- + +The base Traits package should work on any platform supporting Python >= 1.5.2. + +The user interface capabilities of the traits package require additional +Python packages to be installed. + +The UI toolkit backend that is actively maintained is wxPython. To use it, +install a version >= 2.3.3.1 (available from: http://www.wxpython.org). + +A UI toolkit backend for Tkinter exists, but is not actively maintained or +tested. If you wish to try Traits with Tkinter, you must also install: + + - Tkinter (usually installed as part of your Python distribution) + - PMW (Python MegaWidgets) (available from: http://pmw.sourceforge.net) Added: trunk/matplotlib/lib/enthought/traits/__init__.py =================================================================== --- trunk/matplotlib/lib/enthought/traits/__init__.py (rev 0) +++ trunk/matplotlib/lib/enthought/traits/__init__.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,26 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2005, Enthought, Inc. +# All rights reserved. +# +# This software is provided without warranty under the terms of the BSD +# license included in enthought/LICENSE.txt and may be redistributed only +# under the conditions described in the aforementioned license. The license +# is also available online at http://www.enthought.com/licenses/BSD.txt +# Thanks for using Enthought open source! +# +# Author: David C. Morrill +# Date: 06/21/2002 +# Description: Define a 'traits' package that allows other classes to easily +# define 'type-checked' and/or 'delegated' traits for their +# instances. +# +# Note: A 'trait' is similar to a 'property', but is used instead +# of the word 'property' to differentiate it from the Python +# language 'property' feature. +#------------------------------------------------------------------------------ + +try: + # if the code is ran from an egg, the namespace must be declared + pass +except: + pass Added: trunk/matplotlib/lib/enthought/traits/__init__.pyc =================================================================== (Binary files differ) Property changes on: trunk/matplotlib/lib/enthought/traits/__init__.pyc ___________________________________________________________________ Name: svn:mime-type + application/octet-stream Added: trunk/matplotlib/lib/enthought/traits/api.py =================================================================== --- trunk/matplotlib/lib/enthought/traits/api.py (rev 0) +++ trunk/matplotlib/lib/enthought/traits/api.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,159 @@ +#------------------------------------------------------------------------------ +# +# Copyright (c) 2005, Enthought, Inc. +# All rights reserved. +# +# Written by: David C. Morrill +# +# Date: 12/06/2005 +# +#------------------------------------------------------------------------------ + + +""" Pseudo-package for all of the core symbols from Traits and TraitsUI. +Use this module for importing Traits names into your namespace. For example:: + + from enthought.traits.api import HasTraits + +""" + +from enthought.traits.version import version, version as __version__ + + +from info_traits \ + import __doc__ + +from trait_base \ + import Undefined, Missing, Self + +from trait_errors \ + import TraitError, TraitNotificationError, DelegationError + +from trait_notifiers \ + import push_exception_handler, pop_exception_handler, \ + TraitChangeNotifyWrapper + +from category \ + import Category + +from trait_db \ + import tdb + +from traits \ + import Event, List, Dict, Tuple, Range, Constant, CTrait, Trait, Delegate, \ + Property, Expression, Button, ToolbarButton, PythonValue, Any, Int, \ + Long, Float, Str, Unicode, Complex, Bool, CInt, CLong, CFloat, \ + CStr, CUnicode, WeakRef + +from traits \ + import CComplex, CBool, false, true, Regex, String, Password, File, \ + Directory, Function, Method, Class, Instance, Module, Type, This, \ + self, Either, Python, Disallow, ReadOnly, undefined, missing, ListInt + +from traits \ + import ListFloat, ListStr, ListUnicode, ListComplex, ListBool, \ + ListFunction, ListMethod, ListClass, ListInstance, ListThis, \ + DictStrAny, DictStrStr, DictStrInt, DictStrLong, DictStrFloat + +from traits \ + import DictStrBool, DictStrList, TraitFactory, Callable, Array, CArray, \ + Enum, Code, HTML, Default, Color, RGBColor, Font + +from has_traits \ + import method, HasTraits, HasStrictTraits, HasPrivateTraits, \ + SingletonHasTraits, SingletonHasStrictTraits, \ + SingletonHasPrivateTraits, MetaHasTraits, Vetoable, VetoableEvent, \ + traits_super + +from trait_handlers \ + import TraitHandler, TraitRange, TraitString, TraitType, TraitCastType, \ + TraitInstance, ThisClass, TraitClass, TraitFunction, TraitEnum, \ + TraitPrefixList, TraitMap, TraitPrefixMap, TraitCompound, \ + TraitList, TraitListEvent, TraitDict, TraitDictEvent, TraitTuple + +from traits \ + import UIDebugger + +################### +# ui imports +if False: + + from ui.handler \ + import Handler, ViewHandler, default_handler + + from ui.view \ + import View + + from ui.group \ + import Group, HGroup, VGroup, VGrid, HFlow, VFlow, HSplit, VSplit, Tabbed + + from ui.ui \ + import UI + + from ui.ui_info \ + import UIInfo + + from ui.help \ + import on_help_call + + from ui.include \ + import Include + + from ui.item \ + import Item, Label, Heading, Spring, spring + + from ui.editor_factory \ + import EditorFactory + + from ui.editor \ + import Editor + + from ui.toolkit \ + import toolkit + + from ui.undo \ + import UndoHistory, AbstractUndoItem, UndoItem, ListUndoItem, \ + UndoHistoryUndoItem + + from ui.view_element \ + import ViewElement, ViewSubElement + + from ui.help_template \ + import help_template + + from ui.message \ + import message, error + + from ui.tree_node \ + import TreeNode, ObjectTreeNode, TreeNodeObject, MultiTreeNode + + from ui.editors \ + import ArrayEditor, BooleanEditor, ButtonEditor, CheckListEditor, \ + CodeEditor, ColorEditor, RGBColorEditor, \ + CompoundEditor, DirectoryEditor, EnumEditor, FileEditor, \ + FontEditor, ImageEnumEditor, InstanceEditor, \ + ListEditor, RangeEditor, TextEditor, TreeEditor, \ + TableEditor, TupleEditor, DropEditor, DNDEditor, CustomEditor + + from ui.editors \ + import ColorTrait, RGBColorTrait, \ + FontTrait, SetEditor, HTMLEditor, KeyBindingEditor, \ + ShellEditor, TitleEditor, ValueEditor, NullEditor + + +import ui.view_elements + +#------------------------------------------------------------------------------- +# Patch the main traits module with the correct definition for the ViewElements +# class: +#------------------------------------------------------------------------------- + +import has_traits as has_traits +has_traits.ViewElements = ui.view_elements.ViewElements + +#------------------------------------------------------------------------------- +# Patch the main traits module with the correct definition for the ViewElement +# and ViewSubElement class: +#------------------------------------------------------------------------------- + +has_traits.ViewElement = ui.view_element.ViewElement Added: trunk/matplotlib/lib/enthought/traits/category.py =================================================================== --- trunk/matplotlib/lib/enthought/traits/category.py (rev 0) +++ trunk/matplotlib/lib/enthought/traits/category.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,105 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2005, Enthought, Inc. +# All rights reserved. +# +# This software is provided without warranty under the terms of the BSD +# license included in enthought/LICENSE.txt and may be redistributed only +# under the conditions described in the aforementioned license. The license +# is also available online at http://www.enthought.com/licenses/BSD.txt +# Thanks for using Enthought open source! +# +# Author: David C. Morrill +# Date: 11/06/2004 +#------------------------------------------------------------------------------ +""" Adds a "category" capability to Traits-based classes, +similar to that provided by the Cocoa (Objective-C) environment for the +Macintosh. + +You can use categories to extend an existing HasTraits class, as an alternative +to subclassing. An advantage of categories over subclassing is that you can +access the added members on instances of the original class, without having to +change them to instances of a subclass. Unlike subclassing, categories do not +allow overriding trait attributes. +""" +#------------------------------------------------------------------------------- +# Imports: +#------------------------------------------------------------------------------- + +from has_traits \ + import MetaHasTraits, MetaHasTraitsObject, BaseTraits, ClassTraits, \ + PrefixTraits, ViewTraits + +#------------------------------------------------------------------------------- +# 'MetaCategory' class: +#------------------------------------------------------------------------------- + +class MetaCategory ( MetaHasTraits ): + + def __new__ ( cls, class_name, bases, class_dict ): + + # Make sure the correct usage is being applied: + if len( bases ) > 2: + raise TypeError, \ + "Correct usage is: class FooCategory(Category,Foo):" + + # Process any traits-related information in the class dictionary: + MetaCategoryObject( cls, class_name, bases, class_dict, True ) + + # Move all remaining items in our class dictionary to the base class's + # dictionary: + if len( bases ) == 2: + category_class = bases[1] + for name, value in class_dict.items(): + if not hasattr( category_class, name ): + setattr( category_class, name, value ) + del class_dict[ name ] + + # Finish building the class using the updated class dictionary: + return type.__new__( cls, class_name, bases, class_dict ) + +#------------------------------------------------------------------------------- +# 'MetaCategoryObject' class: +#------------------------------------------------------------------------------- + +class MetaCategoryObject ( MetaHasTraitsObject ): + + #--------------------------------------------------------------------------- + # Adds the traits meta-data to the class: + #--------------------------------------------------------------------------- + + def add_traits_meta_data ( self, bases, class_dict, base_traits, + class_traits, instance_traits, prefix_traits, + view_elements ): + if len( bases ) == 2: + # Update the class and each of the existing subclasses: + bases[1]._add_trait_category( base_traits, class_traits, + instance_traits, prefix_traits, view_elements ) + else: + MetaHasTraitsObject.add_traits_meta_data( self, bases, + class_dict, base_traits, class_traits, instance_traits, + prefix_traits, view_elements ) + +#------------------------------------------------------------------------------- +# 'Category' class: +#------------------------------------------------------------------------------- + +class Category ( object ): + """ Used for defining "category" extensions to existing classes. + + To define a class as a category, specify "Category," followed by the name + of the base class name in the base class list. + + The following example demonstrates defining a category:: + + from enthought.traits.api import HasTraits, Str, Category + + class Base(HasTraits): + x = Str("Base x") + y = Str("Base y") + + class BaseExtra(Category, Base): + z = Str("BaseExtra z") + """ + + __metaclass__ = MetaCategory + Added: trunk/matplotlib/lib/enthought/traits/core.py =================================================================== --- trunk/matplotlib/lib/enthought/traits/core.py (rev 0) +++ trunk/matplotlib/lib/enthought/traits/core.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,20 @@ +#------------------------------------------------------------------------------ +# +# Copyright (c) 2005, Enthought, Inc. +# All rights reserved. +# +# Written by: David C. Morrill +# +# Date: 12/06/2005 +# +#------------------------------------------------------------------------------ +""" Pseudo-package for all of the core symbols from Traits and TraitsUI. +""" +from enthought.traits.api \ + import * + +try: + from enthought.traits.ui.api \ + import * +except: + pass Added: trunk/matplotlib/lib/enthought/traits/core_traits.py =================================================================== --- trunk/matplotlib/lib/enthought/traits/core_traits.py (rev 0) +++ trunk/matplotlib/lib/enthought/traits/core_traits.py 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,84 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2005, Enthought, Inc. +# All rights reserved. +# +# This software is provided without warranty under the terms of the BSD +# license included in enthought/LICENSE.txt and may be redistributed only +# under the conditions described in the aforementioned license. The license +# is also available online at http://www.enthought.com/licenses/BSD.txt +# Thanks for using Enthought open source! +# +# Author: David C. Morrill +# Date: 11/26/2004 +# ------------------------------------------------------------------------------ +""" Adds all of the core traits to the Traits database. +""" +if __name__ == '__main__': + + from enthought.traits.api import Event, List, Dict, Any, Int, Long, Float, Str + from enthought.traits.api import Unicode, Complex, Bool, CInt, CLong, CFloat + from enthought.traits.api import CStr, CUnicode, CComplex, CBool, false, true + from enthought.traits.api import String, Password, File, Directory, Function + from enthought.traits.api import Method, Class, Module, Type, This, self, Python + from enthought.traits.api import ReadOnly, ListInt, ListFloat, ListStr + from enthought.traits.api import ListUnicode, ListComplex, ListBool + from enthought.traits.api import ListFunction, ListMethod, ListClass + from enthought.traits.api import ListInstance, ListThis, DictStrAny, DictStrStr + from enthought.traits.api import DictStrInt, DictStrLong, DictStrFloat + from enthought.traits.api import DictStrBool,DictStrList + from enthought.traits.api import tdb + + define = tdb.define + define( 'Event', Event ) + define( 'List', List ) + define( 'Dict', Dict ) + define( 'Any', Any ) + define( 'Int', Int ) + define( 'Long', Long ) + define( 'Float', Float ) + define( 'Str', Str ) + define( 'Unicode', Unicode ) + define( 'Complex', Complex ) + define( 'Bool', Bool ) + define( 'CInt', CInt ) + define( 'CLong', CLong ) + define( 'CFloat', CFloat ) + define( 'CStr', CStr ) + define( 'CUnicode', CUnicode ) + define( 'CComplex', CComplex ) + define( 'CBool', CBool ) + define( 'false', false ) + define( 'true', true ) + define( 'String', String ) + define( 'Password', Password ) + define( 'File', File ) + define( 'Directory', Directory ) +# define( 'Function', Function ) +# define( 'Method', Method ) +# define( 'Class', Class ) +# define( 'Module', Module ) + define( 'Type', Type ) + define( 'This', This ) +# define( 'self', self ) + define( 'Python', Python ) +## define( 'ReadOnly', ReadOnly ) <-- 'Undefined' doesn't have right + # semantics when persisted + define( 'ListInt', ListInt ) + define( 'ListFloat', ListFloat ) + define( 'ListStr', ListStr ) + define( 'ListUnicode', ListUnicode ) + define( 'ListComplex', ListComplex ) + define( 'ListBool', ListBool ) +# define( 'ListFunction', ListFunction ) +# define( 'ListMethod', ListMethod ) +# define( 'ListClass', ListClass ) +# define( 'ListInstance', ListInstance ) + define( 'ListThis', ListThis ) + define( 'DictStrAny', DictStrAny ) + define( 'DictStrStr', DictStrStr ) + define( 'DictStrInt', DictStrInt ) + define( 'DictStrLong', DictStrLong ) + define( 'DictStrFloat', DictStrFloat ) + define( 'DictStrBool', DictStrBool ) + define( 'DictStrList', DictStrList ) + Added: trunk/matplotlib/lib/enthought/traits/ctraits.c =================================================================== --- trunk/matplotlib/lib/enthought/traits/ctraits.c (rev 0) +++ trunk/matplotlib/lib/enthought/traits/ctraits.c 2007-11-08 00:24:57 UTC (rev 4156) @@ -0,0 +1,4518 @@ +/****************************************************************************** +* Copyright (c) 2005, Enthought, Inc. +* All rights reserved. +* +* This software is provided without warranty under the terms of the BSD +* license included in enthought/LICENSE.txt and may be redistributed only +* under the conditions described in the aforementioned license. The license +* is also available online at http://www.enthought.com/licenses/BSD.txt +* Thanks for using Enthought open source! +* +* Author: David C. Morrill +* Date: 06/15/2004 +* Description: C based implementation of the Traits package +******************************************************************************/ + +/*----------------------------------------------------------------------------- +| Includes: ++----------------------------------------------------------------------------*/ + +#include "Python.h" +#include "structmember.h" + +/*----------------------------------------------------------------------------- +| Constants: ++----------------------------------------------------------------------------*/ + +static PyObject * class_traits; /* == "__class_traits__" */ +static PyObject * editor_property; /* == "editor" */ +static PyObject * class_prefix; /* == "__prefix__" */ +static PyObject * empty_tuple; /* == () */ +static PyObject * undefined; /* Global 'undefined' value */ +static PyObject * TraitError; /* TraitError exception */ +static PyObject * DelegationError; /* DelegationError exception */ +static PyObject * TraitListObject; /* TraitListObject class */ +static PyObject * TraitDictObject; /* TraitDictObject class */ +static PyTypeObject * ctrait_type; /* Python-level CTrait type reference */ +static PyObject * is_callable; /* Marker for 'callable' value */ +static PyObject * _HasTraits_monitors; /* Object creation monitors. */ + +/*----------------------------------------------------------------------------- +| Macro definitions: ++----------------------------------------------------------------------------*/ + +/* The following macro is automatically defined in Python 2.4 and later: */ +#ifndef Py_VISIT +#define Py_VISIT(op) \ +do { \ + if (op) { \ + int vret = visit((PyObject *)(op), arg); \ + if (vret) return vret; \ + } \ +} while (0) +#endif + +/* The following macro is automatically defined in Python 2.4 and later: */ +#ifndef Py_CLEAR +#define Py_CLEAR(op) \ +do { \ + if (op) { \ + PyObject *tmp = (PyObject *)(op); \ + (op) = NULL; \ + Py_DECREF(tmp); \ + } \ +} while (0) +#endif + +#define DEFERRED_ADDRESS(ADDR) 0 +#define PyTrait_CheckExact(op) ((op)->ob_type == ctrait_type) + +#define PyHasTraits_Check(op) PyObject_TypeCheck(op, &has_traits_type) +#define PyHasTraits_CheckExact(op) ((op)->ob_type == &has_traits_type) + +/* Trait method related: */ + +#define TP_DESCR_GET(t) \ + (PyType_HasFeature(t, Py_TPFLAGS_HAVE_CLASS) ? (t)->tp_descr_get : NULL) +#define OFF(x) offsetof(trait_method_object, x) + +/* Field accessors: */ +#define trait_method_GET_NAME(meth) \ + (((trait_method_object *) meth)->tm_name) +#define trait_method_GET_FUNCTION(meth) \ + (((trait_method_object *) meth)->tm_func) +#define trait_method_GET_SELF(meth) \ + (((trait_method_object *) meth)->tm_self) +#define trait_method_GET_TRAITS(meth) \ + (((trait_method_object *) meth)->tm_traits) +#define trait_method_GET_CLASS(meth) \ + (((trait_method_object *) meth)->tm_class) + +/* Python version dependent macros: */ +#if ( (PY_MAJOR_VERSION == 2) && (PY_MINOR_VERSION < 3) ) +#define PyMODINIT_FUNC void +#define PyDoc_VAR(name) static char name[] +#define PyDoc_STRVAR(name,str) PyDoc_VAR(name) = PyDoc_STR(str) +#ifdef WITH_DOC_STRINGS +#define PyDoc_STR(str) str +#else +#define PyDoc_STR(str) "" +#endif +#endif +#if (PY_VERSION_HEX < 0x02050000) +typedef int Py_ssize_t; +#endif + +/*----------------------------------------------------------------------------- +| Forward declarations: ++----------------------------------------------------------------------------*/ + +static PyTypeObject trait_type; +static PyTypeObject trait_method_type; + +/*----------------------------------------------------------------------------- +| 'ctraits' module doc string: ++----------------------------------------------------------------------------*/ + +PyDoc_STRVAR( ctraits__doc__, +"The ctraits module defines the CHasTraits and CTrait C extension types that\n" +"define the core performance oriented portions of the Traits package." ); + +/*----------------------------------------------------------------------------- +| HasTraits behavior modification flags: ++----------------------------------------------------------------------------*/ + +/* Object has been initialized: */ +#define HASTRAITS_INITED 0x00000001 + +/* Do not send notifications when a trait changes value: */ +#define HASTRAITS_NO_NOTIFY 0x00000002 + +/* Requests that no event notifications be sent when this object is assigned to + a trait: */ +#define HASTRAITS_VETO_NOTIFY 0x00000004 + +/*----------------------------------------------------------------------------- +| 'CHasTraits' instance definition: +| +| Note: traits are normally stored in the type's dictionary, but are added to +| the instance's traits dictionary 'trait_dict' when the traits are defined +| dynamically or 'on_trait_change' is called on an instance of the trait. +| +| All 'anytrait_changed' notification handlers are stored in the instance's +| 'notifiers' list. ++----------------------------------------------------------------------------*/ + +typedef struct { + PyObject_HEAD /* Standard Python object header */ + PyDictObject * ctrait_dict; /* Class traits dictionary */ + PyDictObject * itrait_dict; /* Instance traits dictionary */ + PyListObject * notifiers; /* List of 'any trait changed' notification + handlers */ + int flags; /* Behavior modification flags */ + PyObject * obj_dict; /* Object attribute dictionary ('__dict__') */ + /* NOTE: 'obj_dict' field MUST be last field */ +} has_traits_object; + +static int call_notifiers ( PyListObject *, PyListObject *, + has_traits_object *, PyObject *, PyObject *, + PyObject * new_value ); + +/*----------------------------------------------------------------------------- +| 'CTrait' flag values: ++----------------------------------------------------------------------------*/ + +/* The trait is a Property: */ +#define TRAIT_PROPERTY 0x00000001 + +/* Should the delegate be modified (or the original object)? */ +#define TRAIT_MODIFY_DELEGATE 0x00000002 + +/* Should a simple object identity test be performed (or a rich compare)? */ +#define TRAIT_OBJECT_IDENTITY 0x00000004 + +/*----------------------------------------------------------------------------- +| 'CTrait' instance definition: ++----------------------------------------------------------------------------*/ + +typedef struct _trait_object a_trait_object; +typedef PyObject * (*trait_getattr)( a_trait_object *, has_traits_object *, + PyObject * ); +typedef int (*trait_setattr)( a_trait_object *, a_trait_object *, + has_traits_object *, PyObject *, PyObject * ); +typedef int (*trait_post_setattr)( a_trait_object *, has_traits_object *, + PyObject *, PyObject * ); +typedef PyObject * (*trait_validate)( a_trait_object *, has_traits_object *, + PyObject *, PyObject * ); +typedef PyObject * (*delegate_attr_name_func)( a_trait_object *, + has_traits_object *, PyObject * ); + +typedef struct _trait_object { + PyObject_HEAD /* Standard Python object header */ + int flags; /* Flag bits */ + trait_getattr getattr; /* Get trait value handler */ + trait_setattr setattr; /* Set trait value handler */ + trait_post_setattr post_setattr; /* Optional post 'setattr' handler */ + PyObject * py_post_setattr; /* Python-based post 'setattr' hndlr */ + trait_validate validate; /* Validate trait value handler */ + P... [truncated message content] |
From: <ds...@us...> - 2007-11-08 23:25:53
|
Revision: 4167 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4167&view=rev Author: dsdale Date: 2007-11-08 15:25:44 -0800 (Thu, 08 Nov 2007) Log Message: ----------- move pyparsing from lib/matplotlib/ to lib/ install pyparsing only if not already available Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py trunk/matplotlib/lib/matplotlib/mathtext.py trunk/matplotlib/setup.py Added Paths: ----------- trunk/matplotlib/lib/pyparsing.py Removed Paths: ------------- trunk/matplotlib/lib/matplotlib/pyparsing.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-08 22:29:29 UTC (rev 4166) +++ trunk/matplotlib/CHANGELOG 2007-11-08 23:25:44 UTC (rev 4167) @@ -1,3 +1,10 @@ +2007-11-08 If available, use existing pyparsing installation - DSD + +2007-11-07 Removed old enthought.traits from lib/matplotlib, added + Gael Varoquaux's enthought.traits-2.6b1, which is stripped + of setuptools. The package is installed to site-packages + if not already available - DSD + 2007-11-02 Commited Phil Thompson's patch 1599876, fixes to Qt4Agg backend and qt4 blitting demo - DSD Modified: trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py =================================================================== --- trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py 2007-11-08 22:29:29 UTC (rev 4166) +++ trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py 2007-11-08 23:25:44 UTC (rev 4167) @@ -18,8 +18,8 @@ License : matplotlib license (PSF compatible) """ import re -from matplotlib.pyparsing import Literal, OneOrMore, ZeroOrMore, \ - Optional, Regex, StringEnd, ParseException, Suppress +from pyparsing import Literal, OneOrMore, ZeroOrMore, Optional, Regex, \ + StringEnd, ParseException, Suppress family_punc = r'\\\-:,' family_unescape = re.compile(r'\\([%s])' % family_punc).sub Modified: trunk/matplotlib/lib/matplotlib/mathtext.py =================================================================== --- trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-08 22:29:29 UTC (rev 4166) +++ trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-08 23:25:44 UTC (rev 4167) @@ -134,11 +134,11 @@ from numpy import inf, isinf from matplotlib import verbose -from matplotlib.pyparsing import Literal, Word, OneOrMore, ZeroOrMore, \ - Combine, Group, Optional, Forward, NotAny, alphas, nums, alphanums, \ - StringStart, StringEnd, ParseFatalException, FollowedBy, Regex, \ - operatorPrecedence, opAssoc, ParseResults, Or, Suppress, oneOf, \ - ParseException, MatchFirst, NoMatch, Empty +from pyparsing import Literal, Word, OneOrMore, ZeroOrMore, Combine, Group, \ + Optional, Forward, NotAny, alphas, nums, alphanums, StringStart, \ + StringEnd, ParseFatalException, FollowedBy, Regex, operatorPrecedence, \ + opAssoc, ParseResults, Or, Suppress, oneOf, ParseException, MatchFirst, \ + NoMatch, Empty from matplotlib.afm import AFM from matplotlib.cbook import enumerate, iterable, Bunch, get_realpath_and_stat, \ Deleted: trunk/matplotlib/lib/matplotlib/pyparsing.py =================================================================== --- trunk/matplotlib/lib/matplotlib/pyparsing.py 2007-11-08 22:29:29 UTC (rev 4166) +++ trunk/matplotlib/lib/matplotlib/pyparsing.py 2007-11-08 23:25:44 UTC (rev 4167) @@ -1,3086 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2007 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -#from __future__ import generators - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars - -The pyparsing module is an alternative approach to creating and executing simple grammars, -vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you -don't need to learn a new syntax for defining grammars or matching expressions - the parsing module -provides a library of classes that you use to construct the grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!"):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word( alphas ) + "," + Word( alphas ) + "!" - - hello = "Hello, World!" - print hello, "->", greet.parseString( hello ) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the self-explanatory -class names, and the use of '+', '|' and '^' operators. - -The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an -object with named attributes. - -The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments -""" -__version__ = "1.4.6" -__versionTime__ = "11 April 2007 16:41" -__author__ = "Paul McGuire <pt...@us...>" - -import string -from weakref import ref as wkref -import copy,sys -import warnings -import re -import sre_constants -import xml.sax.saxutils -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries - str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It - then < returns the unicode object | encodes it with the default encoding | ... >. - """ - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError, e: - # The Python docs (http://docs.python.org/ref/customization.html#l2h-182) - # state that "The return value must be a string object". However, does a - # unicode object (being a subclass of basestring) count as a "string - # object"? - # If so, then return a unicode object: - return unicode(obj) - # Else encode it... but how? There are many choices... :) - # Replace unprintables with escape codes? - #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors') - # Replace unprintables with question marks? - #return unicode(obj).encode(sys.getdefaultencoding(), 'replace') - # ... - -def _str2dict(strg): - return dict( [(c,0) for c in strg] ) - #~ return set( [c for c in strg] ) - -class _Constants(object): - pass - -alphas = string.lowercase + string.uppercase -nums = string.digits -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - __slots__ = ( "loc","msg","pstr","parserElement" ) - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - - def __getattr__( self, aname ): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) - else: - raise AttributeError, aname - - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): - return _ustr(self) - def markInputline( self, markerString = ">!<" ): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join( [line_str[:line_column], - markerString, line_str[line_column:]]) - return line_str.strip() - -class ParseException(ParseBaseException): - """exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - pass - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by validate() if the grammar could be improperly recursive""" - def __init__( self, parseElementList ): - self.parseElementTrace = parseElementList - - def __str__( self ): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): - return self.tup[i] - def __repr__(self): - return repr(self.tup) - -class ParseResults(object): - """Structured parse results, to provide multiple means of access to the parsed data: - - as a list (len(results)) - - by list index (results[0], results[1], etc.) - - by attribute (results.<resultsName>) - """ - __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" ) - def __new__(cls, toklist, name=None, asList=True, modal=True ): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, toklist, name=None, asList=True, modal=True ): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - if isinstance(toklist, list): - self.__toklist = toklist[:] - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - # this line is related to debugging the asXML bug - #~ asList = False - - if name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not toklist in (None,'',[]): - if isinstance(toklist,basestring): - toklist = [ toklist ] - if asList: - if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(toklist.copy(),-1) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),-1) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError,TypeError): - self[name] = toklist - - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) - - def __setitem__( self, k, v ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] - sub = v[0] - elif isinstance(k,int): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [(v,0)] - sub = v - if isinstance(sub,ParseResults): - sub.__parent = wkref(self) - - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - del self.__toklist[i] - else: - del self._tokdict[i] - - def __contains__( self, k ): - return self.__tokdict.has_key(k) - - def __len__( self ): return len( self.__toklist ) - def __nonzero__( self ): return len( self.__toklist ) > 0 - def __iter__( self ): return iter( self.__toklist ) - def keys( self ): - """Returns all named result keys.""" - return self.__tokdict.keys() - - def items( self ): - """Returns all named result keys and values as a list of tuples.""" - return [(k,self[k]) for k in self.__tokdict.keys()] - - def values( self ): - """Returns all named result values.""" - return [ v[-1][0] for v in self.__tokdict.values() ] - - def __getattr__( self, name ): - if name not in self.__slots__: - if self.__tokdict.has_key( name ): - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - return None - - def __add__( self, other ): - ret = self.copy() - ret += other - return ret - - def __iadd__( self, other ): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = ( lambda a: (a<0 and offset) or (a+offset) ) - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: - self[k] = v - if isinstance(v[0],ParseResults): - v[0].__parent = wkref(self) - self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) - del other - return self - - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) - - def __str__( self ): - out = "[" - sep = "" - for i in self.__toklist: - if isinstance(i, ParseResults): - out += sep + _ustr(i) - else: - out += sep + repr(i) - sep = ", " - out += "]" - return out - - def _asStringList( self, sep='' ): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance( item, ParseResults ): - out += item._asStringList() - else: - out.append( _ustr(item) ) - return out - - def asList( self ): - """Returns the parse results as a nested list of matching tokens, all converted to strings.""" - out = [] - for res in self.__toklist: - if isinstance(res,ParseResults): - out.append( res.asList() ) - else: - out.append( res ) - return out - - def asDict( self ): - """Returns the named parse results as dictionary.""" - return dict( self.items() ) - - def copy( self ): - """Returns a new copy of a ParseResults object.""" - ret = ParseResults( self.__toklist ) - ret.__tokdict = self.__tokdict.copy() - ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) - ret.__name = self.__name - return ret - - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" - nl = "\n" - out = [] - namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist ] ) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [ nl, indent, "<", selfTag, ">" ] - - worklist = self.__toklist - for i,res in enumerate(worklist): - if isinstance(res,ParseResults): - if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = xml.sax.saxutils.escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "</", resTag, ">" ] - - out += [ nl, indent, "</", selfTag, ">" ] - return "".join(out) - - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: - if sub is v: - return k - return None - - def getName(self): - """Returns the results name for this token expression.""" - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - self.__tokdict.values()[0][0][1] in (0,-1)): - return self.__tokdict.keys()[0] - else: - return None - - def dump(self,indent='',depth=0): - """Diagnostic method for listing out the contents of a ParseResults. - Accepts an optional indent argument so that this string can be embedded - in a nested display of other data.""" - out = [] - out.append( indent+_ustr(self.asList()) ) - keys = self.items() - keys.sort() - for k,v in keys: - if out: - out.append('\n') - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v.keys(): - #~ out.append('\n') - out.append( v.dump(indent,depth+1) ) - #~ out.append('\n') - else: - out.append(_ustr(v)) - else: - out.append(_ustr(v)) - #~ out.append('\n') - return "".join(out) - - # add support for pickle protocol - def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) - - def __setstate__(self,state): - self.__toklist = state[0] - self.__tokdict, \ - par, \ - inAccumNames, \ - self.__name = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - -def col (loc,strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information - on parsing strings containing <TAB>s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc) - -def lineno(loc,strg): - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information - on parsing strings containing <TAB>s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return strg.count("\n",0,loc) + 1 - -def line( loc, strg ): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR > 0: - return strg[lastCR+1:nextCR] - else: - return strg[lastCR+1:] - -def _defaultStartDebugAction( instring, loc, expr ): - print "Match",_ustr(expr),"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ) - -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print "Matched",_ustr(expr),"->",toks.asList() - -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print "Exception raised:", _ustr(exc) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - - def setDefaultWhitespaceChars( chars ): - """Overrides the default whitespace chars - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars) - - def __init__( self, savelist=False ): - self.parseAction = list() - self.failAction = None - #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy( self ): - """Make a copy of this ParserElement. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element.""" - cpy = copy.copy( self ) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName( self, name ): - """Define name for this expression, for use in debugging.""" - self.name = name - self.errmsg = "Expected " + self.name - return self - - def setResultsName( self, name, listAllMatches=False ): - """Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original ParserElement object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - """ - newself = self.copy() - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def normalizeParseActionArgs( f ): - """Internal method used to decorate parse actions that take fewer than 3 arguments, - so that all parse actions can be called as f(s,l,t).""" - STAR_ARGS = 4 - - try: - restore = None - if isinstance(f,type): - restore = f - f = f.__init__ - if f.func_code.co_flags & STAR_ARGS: - return f - numargs = f.func_code.co_argcount - if hasattr(f,"im_self"): - numargs -= 1 - if restore: - f = restore - except AttributeError: - try: - # not a function, must be a callable object, get info from the - # im_func binding of its bound __call__ method - if f.__call__.im_func.func_code.co_flags & STAR_ARGS: - return f - numargs = f.__call__.im_func.func_code.co_argcount - if hasattr(f.__call__,"im_self"): - numargs -= 1 - except AttributeError: - # not a bound method, get info directly from __call__ method - if f.__call__.func_code.co_flags & STAR_ARGS: - return f - numargs = f.__call__.func_code.co_argcount - if hasattr(f.__call__,"im_self"): - numargs -= 1 - - #~ print "adding function %s with %d args" % (f.func_name,numargs) - if numargs == 3: - return f - else: - if numargs == 2: - def tmp(s,l,t): - return f(l,t) - elif numargs == 1: - def tmp(s,l,t): - return f(t) - else: #~ numargs == 0: - def tmp(s,l,t): - return f() - try: - tmp.__name__ = f.__name__ - except AttributeError: - # no need for special handling if attribute doesnt exist - pass - try: - tmp.__doc__ = f.__doc__ - except AttributeError: - # no need for special handling if attribute doesnt exist - pass - try: - tmp.__dict__.update(f.__dict__) - except AttributeError: - # no need for special handling if attribute doesnt exist - pass - return tmp - normalizeParseActionArgs = staticmethod(normalizeParseActionArgs) - - def setParseAction( self, *fns, **kwargs ): - """Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks), - fn(loc,toks), fn(toks), or just fn(), where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a ParseResults object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}<parseString>} for more information - on parsing strings containing <TAB>s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - self.parseAction = map(self.normalizeParseActionArgs, list(fns)) - self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def addParseAction( self, *fns, **kwargs ): - """Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.""" - self.parseAction += map(self.normalizeParseActionArgs, list(fns)) - self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"]) - return self - - def setFailAction( self, fn ): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - fn(s,loc,expr,err) where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw ParseFatalException - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def skipIgnorables( self, instring, loc ): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc,dummy = e._parse( instring, loc ) - exprsFound = True - except ParseException: - pass - return loc - - def preParse( self, instring, loc ): - if self.ignoreExprs: - loc = self.skipIgnorables( instring, loc ) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl( self, instring, loc, doActions=True ): - return loc, [] - - def postParse( self, instring, loc, tokenlist ): - return tokenlist - - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) - - if debugging or self.failAction: - #~ print "Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = loc - try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseException, err: - #~ print "Exception raised:", err - if self.debugActions[2]: - self.debugActions[2]( instring, tokensStart, self, err ) - if self.failAction: - self.failAction( instring, tokensStart, self, err ) - raise - else: - if callPreParse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = loc - if self.mayIndexError or loc >= len(instring): - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - - tokens = self.postParse( instring, loc, tokens ) - - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseException, err: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - self.debugActions[2]( instring, tokensStart, self, err ) - raise - else: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - - if debugging: - #~ print "Matched",self,"->",retTokens.asList() - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) - - return loc, retTokens - - def tryParse( self, instring, loc ): - return self._parse( instring, loc, doActions=False )[0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - #if doActions and self.parseAction: - # return self._parseNoCache( instring, loc, doActions, callPreParse ) - lookup = (self,instring,loc,callPreParse,doActions) - if lookup in ParserElement._exprArgCache: - value = ParserElement._exprArgCache[ lookup ] - if isinstance(value,Exception): - if isinstance(value,ParseBaseException): - value.loc = loc - raise value - return value - else: - try: - ParserElement._exprArgCache[ lookup ] = \ - value = self._parseNoCache( instring, loc, doActions, callPreParse ) - return value - except ParseBaseException, pe: - ParserElement._exprArgCache[ lookup ] = pe - raise - - _parse = _parseNoCache - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - _exprArgCache = {} - def resetCache(): - ParserElement._exprArgCache.clear() - resetCache = staticmethod(resetCache) - - _packratEnabled = False - def enablePackrat(): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method ParserElement.enablePackrat(). If - your program uses psyco to "compile as you go", you must call - enablePackrat before calling psyco.full(). If you do not do this, - Python will crash. For best results, call enablePackrat() immediately - after importing pyparsing. - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - ParserElement._parse = ParserElement._parseCache - enablePackrat = staticmethod(enablePackrat) - - def parseString( self, instring ): - """Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - Note: parseString implicitly calls expandtabs() on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the loc argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling parseWithTabs on your grammar before calling parseString - (see L{I{parseWithTabs}<parseWithTabs>}) - - define your parse action using the full (s,loc,toks) signature, and - reference the input string using the parse action's s argument - - explictly expand the tabs in your input string before calling - parseString - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - #~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if self.keepTabs: - loc, tokens = self._parse( instring, 0 ) - else: - loc, tokens = self._parse( instring.expandtabs(), 0 ) - return tokens - - def scanString( self, instring, maxMatches=sys.maxint ): - """Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - maxMatches argument, to clip scanning after 'n' matches are found. - - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}<parseString>} for more information on parsing - strings with embedded tabs.""" - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) - except ParseException: - loc = preloc+1 - else: - matches += 1 - yield tokens, preloc, nextLoc - loc = nextLoc - - def transformString( self, instring ): - """Extension to scanString, to modify matching text with modified tokens that may - be returned from a parse action. To use transformString, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking transformString() on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. transformString() returns the resulting transformed string.""" - out = [] - lastE = 0 - # force preservation of <TAB>s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) - if t: - if isinstance(t,ParseResults): - out += t.asList() - elif isinstance(t,list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - return "".join(map(_ustr,out)) - - def searchString( self, instring, maxMatches=sys.maxint ): - """Another extension to scanString, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - maxMatches argument, to clip searching after 'n' matches are found. - """ - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) - - def __add__(self, other ): - """Implementation of + operator - returns And""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return And( [ self, other ] ) - - def __radd__(self, other ): - """Implementation of += operator""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return other + self - - def __or__(self, other ): - """Implementation of | operator - returns MatchFirst""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return MatchFirst( [ self, other ] ) - - def __ror__(self, other ): - """Implementation of |= operator""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return other | self - - def __xor__(self, other ): - """Implementation of ^ operator - returns Or""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return Or( [ self, other ] ) - - def __rxor__(self, other ): - """Implementation of ^= operator""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return other ^ self - - def __and__(self, other ): - """Implementation of & operator - returns Each""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return Each( [ self, other ] ) - - def __rand__(self, other ): - """Implementation of right-& operator""" - if isinstance( other, basestring ): - other = Literal( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot add element of type %s to ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return other & self - - def __invert__( self ): - """Implementation of ~ operator - returns NotAny""" - return NotAny( self ) - - def suppress( self ): - """Suppresses the output of this ParserElement; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress( self ) - - def leaveWhitespace( self ): - """Disables the skipping of whitespace before matching the characters in the - ParserElement's defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars( self, chars ): - """Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs( self ): - """Overrides default behavior to expand <TAB>s to spaces before parsing the input string. - Must be called before parseString when the input grammar contains elements that - match <TAB> characters.""" - self.keepTabs = True - return self - - def ignore( self, other ): - """Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - """ - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - self.ignoreExprs.append( other ) - else: - self.ignoreExprs.append( Suppress( other ) ) - return self - - def setDebugActions( self, startAction, successAction, exceptionAction ): - """Enable display of debugging messages while doing pattern matching.""" - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug( self, flag=True ): - """Enable display of debugging messages while doing pattern matching.""" - if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) - else: - self.debug = False - return self - - def __str__( self ): - return self.name - - def __repr__( self ): - return _ustr(self) - - def streamline( self ): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion( self, parseElementList ): - pass - - def validate( self, validateTrace=[] ): - """Check defined expressions for valid structure, check for infinite recursive definitions.""" - self.checkRecursion( [] ) - - def parseFile( self, file_or_filename ): - """Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - f = open(file_or_filename, "rb") - file_contents = f.read() - f.close() - return self.parseString(file_contents) - - -class Token(ParserElement): - """Abstract ParserElement subclass, for defining atomic matching patterns.""" - def __init__( self ): - super(Token,self).__init__( savelist=False ) - self.myException = ParseException("",0,"",self) - - def setName(self, name): - s = super(Token,self).setName(name) - self.errmsg = "Expected " + self.name - s.myException.msg = self.errmsg - return s - - -class Empty(Token): - """An empty token, will always match.""" - def __init__( self ): - super(Empty,self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """A token that will never match.""" - def __init__( self ): - super(NoMatch,self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - -class Literal(Token): - """Token to exactly match a specified string.""" - def __init__( self, matchString ): - super(Literal,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.myException.msg = self.errmsg - self.mayIndexError = False - - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class Keyword(Token): - """Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with Literal:: - Literal("if") will match the leading 'if' in 'ifAndOnlyIf'. - Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)' - Accepts two optional constructor arguments in addition to the keyword string: - identChars is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive - matching, default is False. - """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" - - def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ): - super(Keyword,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.myException.msg = self.errmsg - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = _str2dict(identChars) - - def parseImpl( self, instring, loc, doActions=True ): - if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - - def copy(self): - c = super(Keyword,self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - def setDefaultKeywordChars( chars ): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - setDefaultKeywordChars = staticmethod(setDefaultKeywordChars) - - -class CaselessLiteral(Literal): - """Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - self.myException.msg = self.errmsg - - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class CaselessKeyword(Keyword): - def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - - def parseImpl( self, instring, loc, doActions=True ): - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - #~ raise ParseException( instring, loc, self.errmsg ) - exc = self.myException - exc.loc = loc - exc.pstr = instring - raise exc - -class Word(Token): - """Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. - """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ): - super(Word,self).__init__() - self.initCharsOrig = initChars - self.initChars = _str2dict(initChars) - if bodyChars : - self.bodyCharsOrig = bodyChars - self.bodyChars = _str2dict(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = _str2dict(initChars) - - self.maxSpecified = max > 0 - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = sys.maxint - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.myException.msg = self.errmsg - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.bodyCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" - try: - self.re = re.compile( self.reString ) - except: - self.re = None - - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - ... [truncated message content] |
From: <jd...@us...> - 2007-11-08 23:29:49
|
Revision: 4168 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4168&view=rev Author: jdh2358 Date: 2007-11-08 15:29:46 -0800 (Thu, 08 Nov 2007) Log Message: ----------- added recarray utils module Modified Paths: -------------- trunk/matplotlib/API_CHANGES trunk/matplotlib/CHANGELOG trunk/matplotlib/examples/date_index_formatter.py trunk/matplotlib/examples/loadrec.py trunk/matplotlib/examples/mathtext_examples.py trunk/matplotlib/lib/matplotlib/axes.py trunk/matplotlib/lib/matplotlib/cbook.py trunk/matplotlib/lib/matplotlib/mlab.py Modified: trunk/matplotlib/API_CHANGES =================================================================== --- trunk/matplotlib/API_CHANGES 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/API_CHANGES 2007-11-08 23:29:46 UTC (rev 4168) @@ -1,3 +1,5 @@ + Moved mlab.csv2rec -> recutils.csv2rec + Added ax kwarg to pyplot.colorbar and Figure.colorbar so that one can specify the axes object from which space for the colorbar is to be taken, if one does not want to make the colorbar axes Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/CHANGELOG 2007-11-08 23:29:46 UTC (rev 4168) @@ -1,3 +1,6 @@ +2007-11-08 Moved csv2rec to recutils and added other record array + utilities - JDH + 2007-11-08 If available, use existing pyparsing installation - DSD 2007-11-07 Removed old enthought.traits from lib/matplotlib, added Modified: trunk/matplotlib/examples/date_index_formatter.py =================================================================== --- trunk/matplotlib/examples/date_index_formatter.py 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/examples/date_index_formatter.py 2007-11-08 23:29:46 UTC (rev 4168) @@ -9,7 +9,7 @@ """ import numpy -from matplotlib.mlab import csv2rec +from matplotlib.recutils import csv2rec from pylab import figure, show from matplotlib.ticker import Formatter Modified: trunk/matplotlib/examples/loadrec.py =================================================================== --- trunk/matplotlib/examples/loadrec.py 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/examples/loadrec.py 2007-11-08 23:29:46 UTC (rev 4168) @@ -1,4 +1,4 @@ -from matplotlib.mlab import csv2rec +from matplotlib.recutils import csv2rec from pylab import figure, show a = csv2rec('data/msft.csv') Modified: trunk/matplotlib/examples/mathtext_examples.py =================================================================== --- trunk/matplotlib/examples/mathtext_examples.py 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/examples/mathtext_examples.py 2007-11-08 23:29:46 UTC (rev 4168) @@ -49,7 +49,7 @@ r'$\widehat{abc}\widetilde{def}$', r'$\Gamma \Delta \Theta \Lambda \Xi \Pi \Sigma \Upsilon \Phi \Psi \Omega$', r'$\alpha \beta \gamma \delta \epsilon \zeta \eta \theta \iota \lambda \mu \nu \xi \pi \kappa \rho \sigma \tau \upsilon \phi \chi \psi$', - ur'Generic symbol: $\u23ce \mathrm{\ue0f2 \U0001D538}$' + #ur'Generic symbol: $\u23ce \mathrm{\ue0f2 \U0001D538}$' ] from pylab import * @@ -63,12 +63,13 @@ axis([0, 3, -len(tests), 0]) yticks(arange(len(tests)) * -1) for i, s in enumerate(tests): - print "%02d: %s" % (i, s) + print (i, s) text(0.1, -i, s, fontsize=20) - savefig('mathtext_example') - close('all') - + #savefig('mathtext_example') + #close('all') + show() + if '--latex' in sys.argv: fd = open("mathtext_examples.ltx", "w") fd.write("\\documentclass{article}\n") Modified: trunk/matplotlib/lib/matplotlib/axes.py =================================================================== --- trunk/matplotlib/lib/matplotlib/axes.py 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/lib/matplotlib/axes.py 2007-11-08 23:29:46 UTC (rev 4168) @@ -4068,6 +4068,8 @@ Optional kwargs control the PatchCollection properties: %(PatchCollection)s + + A Collection instance is returned """ if not self._hold: self.cla() Modified: trunk/matplotlib/lib/matplotlib/cbook.py =================================================================== --- trunk/matplotlib/lib/matplotlib/cbook.py 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/lib/matplotlib/cbook.py 2007-11-08 23:29:46 UTC (rev 4168) @@ -853,6 +853,7 @@ raise ValueError(_safezip_msg % (Nx, i+1, len(arg))) return zip(*args) + class MemoryMonitor: def __init__(self, nmax=20000): self._nmax = nmax @@ -895,12 +896,14 @@ x = npy.arange(i0, self._n, isub) return x, self._mem[i0:self._n:isub] - def plot(self, i0=0, isub=1): - from pylab import figure, show - fig = figure() + def plot(self, i0=0, isub=1, fig=None): + if fig is None: + from pylab import figure, show + fig = figure() + ax = fig.add_subplot(111) ax.plot(*self.xy(i0, isub)) - show() + fig.canvas.draw() def print_cycles(objects, outstream=sys.stdout, show_progress=False): Modified: trunk/matplotlib/lib/matplotlib/mlab.py =================================================================== --- trunk/matplotlib/lib/matplotlib/mlab.py 2007-11-08 23:25:44 UTC (rev 4167) +++ trunk/matplotlib/lib/matplotlib/mlab.py 2007-11-08 23:29:46 UTC (rev 4168) @@ -1257,415 +1257,7 @@ if unpack: return X.transpose() else: return X -def csv2rec(fname, comments='#', skiprows=0, checkrows=5, delimiter=',', - converterd=None, names=None, missing=None): - """ - Load data from comma/space/tab delimited file in fname into a - numpy record array and return the record array. - If names is None, a header row is required to automatically assign - the recarray names. The headers will be lower cased, spaces will - be converted to underscores, and illegal attribute name characters - removed. If names is not None, it is a sequence of names to use - for the column names. In this case, it is assumed there is no header row. - - - fname - can be a filename or a file handle. Support for gzipped - files is automatic, if the filename ends in .gz - - comments - the character used to indicate the start of a comment - in the file - - skiprows - is the number of rows from the top to skip - - checkrows - is the number of rows to check to validate the column - data type. When set to zero all rows are validated. - - converterd, if not None, is a dictionary mapping column number or - munged column name to a converter function - - names, if not None, is a list of header names. In this case, no - header will be read from the file - - if no rows are found, None is returned See examples/loadrec.py - """ - - if converterd is None: - converterd = dict() - - import dateutil.parser - parsedate = dateutil.parser.parse - - - fh = cbook.to_filehandle(fname) - - - class FH: - """ - for space delimited files, we want different behavior than - comma or tab. Generally, we want multiple spaces to be - treated as a single separator, whereas with comma and tab we - want multiple commas to return multiple (empty) fields. The - join/strip trick below effects this - """ - def __init__(self, fh): - self.fh = fh - - def close(self): - self.fh.close() - - def seek(self, arg): - self.fh.seek(arg) - - def fix(self, s): - return ' '.join(s.split()) - - - def next(self): - return self.fix(self.fh.next()) - - def __iter__(self): - for line in self.fh: - yield self.fix(line) - - if delimiter==' ': - fh = FH(fh) - - reader = csv.reader(fh, delimiter=delimiter) - def process_skiprows(reader): - if skiprows: - for i, row in enumerate(reader): - if i>=(skiprows-1): break - - return fh, reader - - process_skiprows(reader) - - - def myfloat(x): - if x==missing: - return npy.nan - else: - return float(x) - - def get_func(item, func): - # promote functions in this order - funcmap = {int:myfloat, myfloat:dateutil.parser.parse, dateutil.parser.parse:str} - try: func(item) - except: - if func==str: - raise ValueError('Could not find a working conversion function') - else: return get_func(item, funcmap[func]) # recurse - else: return func - - - # map column names that clash with builtins -- TODO - extend this list - itemd = { - 'return' : 'return_', - 'file' : 'file_', - 'print' : 'print_', - } - - def get_converters(reader): - - converters = None - for i, row in enumerate(reader): - if i==0: - converters = [int]*len(row) - if checkrows and i>checkrows: - break - #print i, len(names), len(row) - #print 'converters', zip(converters, row) - for j, (name, item) in enumerate(zip(names, row)): - func = converterd.get(j) - if func is None: - func = converterd.get(name) - if func is None: - if not item.strip(): continue - func = converters[j] - if len(item.strip()): - func = get_func(item, func) - converters[j] = func - return converters - - # Get header and remove invalid characters - needheader = names is None - if needheader: - headers = reader.next() - # remove these chars - delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") - delete.add('"') - - names = [] - seen = dict() - for i, item in enumerate(headers): - item = item.strip().lower().replace(' ', '_') - item = ''.join([c for c in item if c not in delete]) - if not len(item): - item = 'column%d'%i - - item = itemd.get(item, item) - cnt = seen.get(item, 0) - if cnt>0: - names.append(item + '%d'%cnt) - else: - names.append(item) - seen[item] = cnt+1 - - # get the converter functions by inspecting checkrows - converters = get_converters(reader) - if converters is None: - raise ValueError('Could not find any valid data in CSV file') - - # reset the reader and start over - fh.seek(0) - process_skiprows(reader) - if needheader: - skipheader = reader.next() - - # iterate over the remaining rows and convert the data to date - # objects, ints, or floats as approriate - rows = [] - for i, row in enumerate(reader): - if not len(row): continue - if row[0].startswith(comments): continue - rows.append([func(val) for func, val in zip(converters, row)]) - fh.close() - - if not len(rows): - return None - r = npy.rec.fromrecords(rows, names=names) - return r - - -def rec2csv(r, fname, delimiter=','): - """ - Save the data from numpy record array r into a comma/space/tab - delimited file. The record array dtype names will be used for - column headers. - - - fname - can be a filename or a file handle. Support for gzipped - files is automatic, if the filename ends in .gz - """ - fh = cbook.to_filehandle(fname, 'w') - writer = csv.writer(fh, delimiter=delimiter, quoting=csv.QUOTE_NONNUMERIC) - header = r.dtype.names - writer.writerow(header) - for row in r: - writer.writerow(map(str, row)) - fh.close() - -try: - import pyExcelerator as excel -except ImportError: - pass -else: - - class Format: - xlstyle = None - def convert(self, x): - return x - - class FormatFloat(Format): - def __init__(self, precision=4): - self.xlstyle = excel.XFStyle() - zeros = ''.join(['0']*precision) - self.xlstyle.num_format_str = '#,##0.%s;[RED]-#,##0.%s'%(zeros, zeros) - - class FormatInt(Format): - convert = int - def __init__(self): - - self.xlstyle = excel.XFStyle() - self.xlstyle.num_format_str = '#,##;[RED]-#,##' - - class FormatPercent(Format): - def __init__(self, precision=4): - self.xlstyle = excel.XFStyle() - zeros = ''.join(['0']*precision) - self.xlstyle.num_format_str = '0.%s%;[RED]-0.%s%'%(zeros, zeros) - - class FormatThousands(FormatFloat): - def __init__(self, precision=1): - FormatFloat.__init__(self, precision) - - def convert(self, x): - return x/1e3 - - class FormatMillions(FormatFloat): - def __init__(self, precision=1): - FormatFloat.__init__(self, precision) - - def convert(self, x): - return x/1e6 - - class FormatDate(Format): - def __init__(self, fmt='%Y-%m-%d'): - self.fmt = fmt - - def convert(self, val): - return val.strftime(self.fmt) - - class FormatDatetime(Format): - def __init__(self, fmt='%Y-%m-%d %H:%M:%S'): - self.fmt = fmt - - def convert(self, val): - return val.strftime(self.fmt) - - class FormatObject(Format): - - def convert(self, x): - return str(x) - - def rec2excel(ws, r, formatd=None, rownum=0): - """ - save record array r to excel pyExcelerator worksheet ws - starting at rownum - - formatd is a dictionary mapping dtype name -> Format instances - """ - - if formatd is None: - formatd = dict() - - formats = [] - for i, name in enumerate(r.dtype.names): - dt = r.dtype[name] - format = formatd.get(name) - if format is None: - format = rec2excel.formatd.get(dt.type, FormatObject()) - - ws.write(rownum, i, name) - formats.append(format) - - rownum+=1 - - ind = npy.arange(len(r.dtype.names)) - for row in r: - for i in ind: - val = row[i] - format = formats[i] - val = format.convert(val) - if format.xlstyle is None: - ws.write(rownum, i, val) - else: - ws.write(rownum, i, val, format.xlstyle) - rownum += 1 - rec2excel.formatd = { - npy.int16 : FormatInt(), - npy.int32 : FormatInt(), - npy.int64 : FormatInt(), - npy.float32 : FormatFloat(), - npy.float64 : FormatFloat(), - npy.object_ : FormatObject(), - npy.string_ : Format(), - } - - - -# some record array helpers -def rec_append_field(rec, name, arr, dtype=None): - 'return a new record array with field name populated with data from array arr' - arr = npy.asarray(arr) - if dtype is None: - dtype = arr.dtype - newdtype = npy.dtype(rec.dtype.descr + [(name, dtype)]) - newrec = npy.empty(rec.shape, dtype=newdtype) - for field in rec.dtype.fields: - newrec[field] = rec[field] - newrec[name] = arr - return newrec.view(npy.recarray) - - -def rec_drop_fields(rec, names): - 'return a new numpy record array with fields in names dropped' - - names = set(names) - Nr = len(rec) - - newdtype = npy.dtype([(name, rec.dtype[name]) for name in rec.dtype.names - if name not in names]) - - newrec = npy.empty(Nr, dtype=newdtype) - for field in newdtype.names: - newrec[field] = rec[field] - - return newrec.view(npy.recarray) - - -def rec_join(key, r1, r2): - """ - join record arrays r1 and r2 on key; key is a tuple of field - names. if r1 and r2 have equal values on all the keys in the key - tuple, then their fields will be merged into a new record array - containing the union of the fields of r1 and r2 - """ - - for name in key: - if name not in r1.dtype.names: - raise ValueError('r1 does not have key field %s'%name) - if name not in r2.dtype.names: - raise ValueError('r2 does not have key field %s'%name) - - def makekey(row): - return tuple([row[name] for name in key]) - - - names = list(r1.dtype.names) + [name for name in r2.dtype.names if name not in set(r1.dtype.names)] - - - - r1d = dict([(makekey(row),i) for i,row in enumerate(r1)]) - r2d = dict([(makekey(row),i) for i,row in enumerate(r2)]) - - r1keys = set(r1d.keys()) - r2keys = set(r2d.keys()) - - keys = r1keys & r2keys - - r1ind = [r1d[k] for k in keys] - r2ind = [r2d[k] for k in keys] - - - r1 = r1[r1ind] - r2 = r2[r2ind] - - r2 = rec_drop_fields(r2, r1.dtype.names) - - - def key_desc(name): - 'if name is a string key, use the larger size of r1 or r2 before merging' - dt1 = r1.dtype[name] - if dt1.type != npy.string_: - return (name, dt1.descr[0][1]) - - dt2 = r1.dtype[name] - assert dt2==dt1 - if dt1.num>dt2.num: - return (name, dt1.descr[0][1]) - else: - return (name, dt2.descr[0][1]) - - - - keydesc = [key_desc(name) for name in key] - - newdtype = npy.dtype(keydesc + - [desc for desc in r1.dtype.descr if desc[0] not in key ] + - [desc for desc in r2.dtype.descr if desc[0] not in key ] ) - - - newrec = npy.empty(len(r1), dtype=newdtype) - for field in r1.dtype.names: - newrec[field] = r1[field] - - for field in r2.dtype.names: - newrec[field] = r2[field] - - return newrec.view(npy.recarray) - def slopes(x,y): """ SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-09 00:19:52
|
Revision: 4169 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4169&view=rev Author: dsdale Date: 2007-11-08 16:19:45 -0800 (Thu, 08 Nov 2007) Log Message: ----------- updated pyparsing to version 1.4.8 Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/lib/pyparsing.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-08 23:29:46 UTC (rev 4168) +++ trunk/matplotlib/CHANGELOG 2007-11-09 00:19:45 UTC (rev 4169) @@ -1,3 +1,5 @@ +2007-11-08 Update pyparsing to version 1.4.8 - DSD + 2007-11-08 Moved csv2rec to recutils and added other record array utilities - JDH Modified: trunk/matplotlib/lib/pyparsing.py =================================================================== --- trunk/matplotlib/lib/pyparsing.py 2007-11-08 23:29:46 UTC (rev 4168) +++ trunk/matplotlib/lib/pyparsing.py 2007-11-09 00:19:45 UTC (rev 4169) @@ -57,8 +57,9 @@ - quoted strings - embedded comments """ -__version__ = "1.4.6" -__versionTime__ = "11 April 2007 16:41" + +__version__ = "1.4.8" +__versionTime__ = "7 October 2007 00:25" __author__ = "Paul McGuire <pt...@us...>" import string @@ -273,13 +274,14 @@ if isinstance(i,(int,slice)): del self.__toklist[i] else: - del self._tokdict[i] + del self.__tokdict[i] def __contains__( self, k ): return self.__tokdict.has_key(k) def __len__( self ): return len( self.__toklist ) - def __nonzero__( self ): return len( self.__toklist ) > 0 + def __bool__(self): return len( self.__toklist ) > 0 + def __nonzero__( self ): return self.__bool__() def __iter__( self ): return iter( self.__toklist ) def keys( self ): """Returns all named result keys.""" @@ -598,6 +600,8 @@ """Define name for this expression, for use in debugging.""" self.name = name self.errmsg = "Expected " + self.name + if hasattr(self,"exception"): + self.exception.msg = self.errmsg return self def setResultsName( self, name, listAllMatches=False ): @@ -612,6 +616,24 @@ newself.modalResults = not listAllMatches return newself + def setBreak(self,breakFlag = True): + """Method to invoke the Python pdb debugger when this element is + about to be parsed. Set breakFlag to True to enable, False to + disable. + """ + if breakFlag: + _parseMethod = self._parse + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + pdb.set_trace() + _parseMethod( instring, loc, doActions, callPreParse ) + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse,"_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + def normalizeParseActionArgs( f ): """Internal method used to decorate parse actions that take fewer than 3 arguments, so that all parse actions can be called as f(s,l,t).""" @@ -774,7 +796,7 @@ self.failAction( instring, tokensStart, self, err ) raise else: - if callPreParse: + if callPreParse and self.callPreparse: preloc = self.preParse( instring, loc ) else: preloc = loc @@ -827,8 +849,6 @@ # this method gets repeatedly called during backtracking with the same arguments - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - #if doActions and self.parseAction: - # return self._parseNoCache( instring, loc, doActions, callPreParse ) lookup = (self,instring,loc,callPreParse,doActions) if lookup in ParserElement._exprArgCache: value = ParserElement._exprArgCache[ lookup ] @@ -836,11 +856,11 @@ if isinstance(value,ParseBaseException): value.loc = loc raise value - return value + return (value[0],value[1].copy()) else: try: - ParserElement._exprArgCache[ lookup ] = \ - value = self._parseNoCache( instring, loc, doActions, callPreParse ) + value = self._parseNoCache( instring, loc, doActions, callPreParse ) + ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy()) return value except ParseBaseException, pe: ParserElement._exprArgCache[ lookup ] = pe @@ -1046,6 +1066,14 @@ """Implementation of ~ operator - returns NotAny""" return NotAny( self ) + def __call__(self, name): + """Shortcut for setResultsName, with listAllMatches=default:: + userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") + could be written as:: + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + """ + return self.setResultsName(name) + def suppress( self ): """Suppresses the output of this ParserElement; useful to keep punctuation from cluttering up returned output. @@ -1096,7 +1124,8 @@ return self def setDebug( self, flag=True ): - """Enable display of debugging messages while doing pattern matching.""" + """Enable display of debugging messages while doing pattern matching. + Set flag to True to enable, False to disable.""" if flag: self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) else: @@ -1134,20 +1163,29 @@ f.close() return self.parseString(file_contents) + def getException(self): + return ParseException("",0,self.errmsg,self) + + def __getattr__(self,aname): + if aname == "myException": + self.myException = ret = self.getException(); + return ret; + else: + raise AttributeError, "no such attribute " + aname class Token(ParserElement): """Abstract ParserElement subclass, for defining atomic matching patterns.""" def __init__( self ): super(Token,self).__init__( savelist=False ) - self.myException = ParseException("",0,"",self) + #self.myException = ParseException("",0,"",self) def setName(self, name): s = super(Token,self).setName(name) self.errmsg = "Expected " + self.name - s.myException.msg = self.errmsg + #s.myException.msg = self.errmsg return s + - class Empty(Token): """An empty token, will always match.""" def __init__( self ): @@ -1165,7 +1203,7 @@ self.mayReturnEmpty = True self.mayIndexError = False self.errmsg = "Unmatchable token" - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg def parseImpl( self, instring, loc, doActions=True ): exc = self.myException @@ -1189,7 +1227,7 @@ self.name = '"%s"' % _ustr(self.match) self.errmsg = "Expected " + self.name self.mayReturnEmpty = False - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg self.mayIndexError = False # Performance tuning: this routine gets called a *lot* @@ -1230,7 +1268,7 @@ self.name = '"%s"' % self.match self.errmsg = "Expected " + self.name self.mayReturnEmpty = False - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg self.mayIndexError = False self.caseless = caseless if caseless: @@ -1279,7 +1317,7 @@ self.returnString = matchString self.name = "'%s'" % self.returnString self.errmsg = "Expected " + self.name - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg def parseImpl( self, instring, loc, doActions=True ): if instring[ loc:loc+self.matchLen ].upper() == self.match: @@ -1309,7 +1347,9 @@ Defined with string containing all allowed initial characters, an optional string containing allowed body characters (if omitted, defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. + maximum, and/or exact length. The default value for min is 1 (a + minimum value < 1 is not valid); the default values for max and exact + are 0, meaning no maximum or exact length restriction. """ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ): super(Word,self).__init__() @@ -1323,6 +1363,9 @@ self.bodyChars = _str2dict(initChars) self.maxSpecified = max > 0 + + if min < 1: + raise ValueError, "cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted" self.minLen = min @@ -1337,7 +1380,7 @@ self.name = _ustr(self) self.errmsg = "Expected " + self.name - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg self.mayIndexError = False self.asKeyword = asKeyword @@ -1452,7 +1495,7 @@ self.name = _ustr(self) self.errmsg = "Expected " + self.name - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg self.mayIndexError = False self.mayReturnEmpty = True @@ -1557,7 +1600,7 @@ self.name = _ustr(self) self.errmsg = "Expected " + self.name - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg self.mayIndexError = False self.mayReturnEmpty = True @@ -1603,13 +1646,18 @@ class CharsNotIn(Token): """Token for matching words composed of characters *not* in a given set. Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. + minimum, maximum, and/or exact length. The default value for min is 1 (a + minimum value < 1 is not valid); the default values for max and exact + are 0, meaning no maximum or exact length restriction. """ def __init__( self, notChars, min=1, max=0, exact=0 ): super(CharsNotIn,self).__init__() self.skipWhitespace = False self.notChars = notChars + if min < 1: + raise ValueError, "cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted" + self.minLen = min if max > 0: @@ -1624,7 +1672,7 @@ self.name = _ustr(self) self.errmsg = "Expected " + self.name self.mayReturnEmpty = ( self.minLen == 0 ) - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg self.mayIndexError = False def parseImpl( self, instring, loc, doActions=True ): @@ -1687,7 +1735,7 @@ self.name = ("".join([White.whiteStrs[c] for c in self.matchWhite])) self.mayReturnEmpty = True self.errmsg = "Expected " + self.name - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg self.minLen = min @@ -1760,7 +1808,7 @@ super(LineStart,self).__init__() self.setWhitespaceChars( " \t" ) self.errmsg = "Expected start of line" - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg def preParse( self, instring, loc ): preloc = super(LineStart,self).preParse(instring,loc) @@ -1785,7 +1833,7 @@ super(LineEnd,self).__init__() self.setWhitespaceChars( " \t" ) self.errmsg = "Expected end of line" - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg def parseImpl( self, instring, loc, doActions=True ): if loc<len(instring): @@ -1810,7 +1858,7 @@ def __init__( self ): super(StringStart,self).__init__() self.errmsg = "Expected start of text" - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg def parseImpl( self, instring, loc, doActions=True ): if loc != 0: @@ -1828,7 +1876,7 @@ def __init__( self ): super(StringEnd,self).__init__() self.errmsg = "Expected end of text" - self.myException.msg = self.errmsg + #self.myException.msg = self.errmsg def parseImpl( self, instring, loc, doActions=True ): if loc < len(instring): @@ -1839,6 +1887,8 @@ raise exc elif loc == len(instring): return loc+1, [] + elif loc > len(instring): + return loc, [] else: exc = self.myException exc.loc = loc @@ -2104,6 +2154,7 @@ for e in self.exprs: e.checkRecursion( subRecCheckList ) + class Each(ParseExpression): """Requires all given ParseExpressions to be found, but in any order. Expressions may be separated by whitespace. @@ -2198,6 +2249,7 @@ self.setWhitespaceChars( expr.whiteChars ) self.skipWhitespace = expr.skipWhitespace self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse def parseImpl( self, instring, loc, doActions=True ): if self.expr is not None: @@ -2280,7 +2332,7 @@ self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs self.mayReturnEmpty = True self.errmsg = "Found unwanted token, "+_ustr(self.expr) - self.myException = ParseException("",0,self.errmsg,self) + #self.myException = ParseException("",0,self.errmsg,self) def parseImpl( self, instring, loc, doActions=True ): try: @@ -2431,7 +2483,7 @@ self.includeMatch = include self.asList = False self.errmsg = "No match found for "+_ustr(self.expr) - self.myException = ParseException("",0,self.errmsg,self) + #self.myException = ParseException("",0,self.errmsg,self) def parseImpl( self, instring, loc, doActions=True ): startLoc = loc @@ -2601,7 +2653,11 @@ def postParse( self, instring, loc, tokenlist ): for i,tok in enumerate(tokenlist): - ikey = _ustr(tok[0]).strip() + if len(tok) == 0: + continue + ikey = tok[0] + if isinstance(ikey,int): + ikey = _ustr(tok[0]).strip() if len(tok)==1: tokenlist[ikey] = _ParseResultsWithOffset("",i) elif len(tok)==2 and not isinstance(tok[1],ParseResults): @@ -2940,7 +2996,7 @@ tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) openTag = Suppress("<") + tagStr + \ Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Suppress("=") + tagAttrValue ))) + \ + Optional( Suppress("=") + tagAttrValue ) ))) + \ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") closeTag = Combine("</" + tagStr + ">") @@ -2957,10 +3013,47 @@ """Helper to construct opening and closing tag expressions for XML, given a tag name""" return _makeTags( tagStr, True ) +def withAttribute(*args,**attrDict): + """Helper to create a validating parse action to be used with start tags created + with makeXMLTags or makeHTMLTags. Use withAttribute to qualify a starting tag + with a required attribute value, to avoid false matches on common tags such as + <TD> or <DIV>. + + Call withAttribute with a series of attribute names and values. Specify the list + of filter attributes names and values as: + - keyword arguments, as in (class="Customer",align="right"), or + - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) + For attribute names with a namespace prefix, you must use the second form. Attribute + names are matched insensitive to upper/lower case. + """ + if args: + attrs = args[:] + else: + attrs = attrDict.items() + attrs = [(k.lower(),v) for k,v in attrs] + def pa(s,l,tokens): + for attrName,attrValue in attrs: + if attrName not in tokens: + raise ParseException(s,l,"no matching attribute " + attrName) + if tokens[attrName] != attrValue: + raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % + (attrName, tokens[attrName], attrValue)) + return pa + opAssoc = _Constants() opAssoc.LEFT = object() opAssoc.RIGHT = object() +def _flattenOpPrecTokens(tokens): + if isinstance(tokens,ParseResults): + if len(tokens)==1: + if isinstance(tokens[0],ParseResults): + return _flattenOpPrecTokens(tokens[0]) + else: + return tokens[0] + return map(_flattenOpPrecTokens,tokens) + return tokens + def operatorPrecedence( baseExpr, opList ): """Helper method for constructing grammars of expressions made up of operators working in a precedence hierarchy. Operators may be unary or @@ -2969,7 +3062,8 @@ Parameters: - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the expression grammar; each tuple is of the form + - opList - list of tuples, one for each operator precedence level in the + expression grammar; each tuple is of the form (opExpr, numTerms, rightLeftAssoc, parseAction), where: - opExpr is the pyparsing expression for the operator; may also be a string, which will be converted to a Literal @@ -2986,12 +3080,12 @@ lastExpr = baseExpr | ( Suppress('(') + ret + Suppress(')') ) for i,operDef in enumerate(opList): opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - thisExpr = Forward().setName("expr%d" % i) + thisExpr = Forward()#.setName("expr%d" % i) if rightLeftAssoc == opAssoc.LEFT: if arity == 1: - matchExpr = Group( lastExpr + opExpr ) + matchExpr = Group( lastExpr + ZeroOrMore( opExpr ) ) elif arity == 2: - matchExpr = Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) + matchExpr = Group( lastExpr + ZeroOrMore( opExpr + lastExpr ) ) else: raise ValueError, "operator must be unary (1) or binary (2)" elif rightLeftAssoc == opAssoc.RIGHT: @@ -3000,26 +3094,63 @@ if not isinstance(opExpr, Optional): opExpr = Optional(opExpr) matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) + matchExpr |= lastExpr elif arity == 2: - matchExpr = Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) + matchExpr = Group( lastExpr + ZeroOrMore( opExpr + thisExpr ) ) else: raise ValueError, "operator must be unary (1) or binary (2)" else: raise ValueError, "operator must indicate right or left associativity" if pa: matchExpr.setParseAction( pa ) - thisExpr << ( matchExpr | lastExpr ) + thisExpr << ( matchExpr ) lastExpr = thisExpr ret << lastExpr + ret.setParseAction(_flattenOpPrecTokens) + return Group(ret) + +dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\.))*"').setName("string enclosed in double quotes") +sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\.))*'").setName("string enclosed in single quotes") +quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\.))*')''').setName("quotedString using single or double quotes") + +def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString): + """Helper method for defining nested lists enclosed in opening and closing + delimiters ("(" and ")" are the default). + + Parameters: + - opener - opening character for a nested list (default="("); can also be a pyparsing expression + - closer - closing character for a nested list (default=")"); can also be a pyparsing expression + - content - expression for items within the nested lists (default=None) + - ignoreExpr - expression for ignoring opening and closing delimiters (default=quotedString) + + If an expression is not provided for the content argument, the nested + expression will capture all whitespace-delimited content between delimiters + as a list of separate values. + + Use the ignoreExpr argument to define expressions that may contain + opening or closing characters that should not be treated as opening + or closing characters for nesting, such as quotedString or a comment + expression. Specify multiple expressions using an Or or MatchFirst. + The default is quotedString, but if no expressions are to be ignored, + then pass None for this argument. + """ + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener,basestring) and isinstance(closer,basestring): + content = (empty+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS).setParseAction(lambda t:t[0].strip())) + else: + raise ValueError("opening and closing arguments must be strings if no content expression is given") + ret = Forward() + if ignoreExpr is not None: + ret << ZeroOrMore( ignoreExpr | content | Group( Suppress(opener) + ret + Suppress(closer) ) ) + else: + ret << ZeroOrMore( content | Group( Suppress(opener) + ret + Suppress(closer) ) ) return ret alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") -dblQuotedString = Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\.))*"').setName("string enclosed in double quotes") -sglQuotedString = Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\.))*'").setName("string enclosed in single quotes") -quotedString = Regex(r'''(?:"(?:[^"\n\r\\]|(?:"")|(?:\\.))*")|(?:'(?:[^'\n\r\\]|(?:'')|(?:\\.))*')''').setName("quotedString using single or double quotes") - anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:")) commonHTMLEntity = Combine("&" + oneOf("gt lt amp nbsp quot").setResultsName("entity") +";") _htmlEntityMap = dict(zip("gt lt amp nbsp quot".split(),"><& '")) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-09 00:22:08
|
Revision: 4170 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4170&view=rev Author: dsdale Date: 2007-11-08 16:22:03 -0800 (Thu, 08 Nov 2007) Log Message: ----------- added checks that also print version numbers for pyparsing, pytz, dateutil, configobj Modified Paths: -------------- trunk/matplotlib/setup.py trunk/matplotlib/setupext.py Modified: trunk/matplotlib/setup.py =================================================================== --- trunk/matplotlib/setup.py 2007-11-09 00:19:45 UTC (rev 4169) +++ trunk/matplotlib/setup.py 2007-11-09 00:22:03 UTC (rev 4170) @@ -80,9 +80,10 @@ build_ft2font, build_image, build_windowing, build_transforms, \ build_contour, build_nxutils, build_traits, build_swigagg, build_gdk, \ build_subprocess, build_ttconv, print_line, print_status, print_message, \ - print_raw, check_for_freetype, check_for_libpng, check_for_gtk, check_for_tk, \ - check_for_wx, check_for_numpy, check_for_qt, check_for_qt4, check_for_cairo, \ - check_for_traits + print_raw, check_for_freetype, check_for_libpng, check_for_gtk, \ + check_for_tk, check_for_wx, check_for_numpy, check_for_qt, check_for_qt4, \ + check_for_cairo, check_for_traits, check_for_pytz, check_for_dateutil, \ + check_for_pyparsing, check_for_configobj #import distutils.sysconfig # jdh @@ -183,14 +184,16 @@ build_contour(ext_modules, packages) build_nxutils(ext_modules, packages) +if not check_for_pyparsing(): py_modules.append('pyparsing') + print_raw("") print_raw("OPTIONAL DEPENDENCIES") try: import datetime -except ImportError: havedate = False -else: havedate = True +except ImportError: hasdatetime = False +else: hasdatetime = True -if havedate: # dates require python23 datetime +if hasdatetime: # dates require python23 datetime # only install pytz and dateutil if the user hasn't got them def add_pytz(): packages.append('pytz') @@ -202,32 +205,23 @@ def add_dateutil(): packages.append('dateutil') + haspytz = check_for_pytz() + hasdateutil = check_for_dateutil() + if sys.platform=='win32': # always add these to the win32 installer add_pytz() add_dateutil() else: # only add them if we need them + if not haspytz: add_pytz() + if not hasdateutil: add_dateutil() - try: - import pytz - except ImportError: - add_pytz() - - try: - import dateutil - except ImportError: - add_dateutil() - build_swigagg(ext_modules, packages) build_transforms(ext_modules, packages) -try: import pyparsing -except ImportError: py_modules.append('pyparsing') - # for the traited config package: -try: import configobj -except ImportError: py_modules.append('configobj') +if not check_for_configobj(): py_modules.append('configobj') if not check_for_traits(): build_traits(ext_modules, packages) Modified: trunk/matplotlib/setupext.py =================================================================== --- trunk/matplotlib/setupext.py 2007-11-09 00:19:45 UTC (rev 4169) +++ trunk/matplotlib/setupext.py 2007-11-09 00:22:03 UTC (rev 4170) @@ -323,7 +323,51 @@ return False else: print_status("Cairo", cairo.version) + return True +def check_for_pyparsing(): + try: + import pyparsing + except ImportError: + print_status("pyparsing", "mpl-provided") + return False + else: + print_status("pyparsing", pyparsing.__version__) + return True + +def check_for_pytz(): + try: + import pytz + except ImportError: + print_status("pytz", "mpl-provided") + return False + else: + print_status("pytz", pytz.__version__) + return True + +def check_for_dateutil(): + try: + import dateutil + except ImportError: + print_status("dateutil", "mpl-provided") + return False + else: + try: + print_status("dateutil", dateutil.__version) + except AttributeError: + print_status("dateutil", "present, version unknown") + return True + +def check_for_configobj(): + try: + import configobj + except ImportError: + print_status("configobj", "mpl-provided") + return False + else: + print_status("configobj", configobj.__version__) + return True + def check_for_traits(): gotit = False try: This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-09 01:37:04
|
Revision: 4172 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4172&view=rev Author: dsdale Date: 2007-11-08 17:37:02 -0800 (Thu, 08 Nov 2007) Log Message: ----------- remove old version of pytz Modified Paths: -------------- trunk/matplotlib/lib/dateutil/__init__.py trunk/matplotlib/setup.py Removed Paths: ------------- trunk/matplotlib/lib/pytz/ Modified: trunk/matplotlib/lib/dateutil/__init__.py =================================================================== --- trunk/matplotlib/lib/dateutil/__init__.py 2007-11-09 00:31:48 UTC (rev 4171) +++ trunk/matplotlib/lib/dateutil/__init__.py 2007-11-09 01:37:02 UTC (rev 4172) @@ -6,3 +6,4 @@ """ __author__ = "Gustavo Niemeyer <gu...@ni...>" __license__ = "PSF License" +__version__ = "1.2" Modified: trunk/matplotlib/setup.py =================================================================== --- trunk/matplotlib/setup.py 2007-11-09 00:31:48 UTC (rev 4171) +++ trunk/matplotlib/setup.py 2007-11-09 01:37:02 UTC (rev 4172) @@ -197,13 +197,23 @@ # only install pytz and dateutil if the user hasn't got them def add_pytz(): packages.append('pytz') + resources = ['zone.tab', 'locales/pytz.pot'] # install pytz subdirs - for dirpath, dirname, filenames in os.walk(os.path.join('lib', 'pytz','zoneinfo')): + for dirpath, dirname, filenames in os.walk(os.path.join('lib', 'pytz', + 'zoneinfo')): if '.svn' not in dirpath: - packages.append('/'.join(dirpath.split(os.sep)[1:])) + # remove the 'lib/pytz' part of the path + basepath = dirpath.split(os.path.sep, 2)[2] + resources.extend([os.path.join(basepath, filename) + for filename in filenames]) + package_data['pytz'] = resources + assert len(resources) > 10, 'pytz zoneinfo files not found!' +# packages.append('/'.join(dirpath.split(os.sep)[1:])) def add_dateutil(): packages.append('dateutil') + packages.append('dateutil/zoneinfo') + package_data['dateutil'] = ['zoneinfo/zoneinfo*.tar.*'] haspytz = check_for_pytz() hasdateutil = check_for_dateutil() This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-09 01:40:56
|
Revision: 4173 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4173&view=rev Author: dsdale Date: 2007-11-08 17:40:54 -0800 (Thu, 08 Nov 2007) Log Message: ----------- added pytz-2007g Modified Paths: -------------- trunk/matplotlib/CHANGELOG Added Paths: ----------- trunk/matplotlib/lib/pytz/ trunk/matplotlib/lib/pytz/CHANGES.txt trunk/matplotlib/lib/pytz/LICENSE.txt trunk/matplotlib/lib/pytz/README.txt trunk/matplotlib/lib/pytz/__init__.py trunk/matplotlib/lib/pytz/reference.py trunk/matplotlib/lib/pytz/tzfile.py trunk/matplotlib/lib/pytz/tzinfo.py trunk/matplotlib/lib/pytz/zoneinfo/ trunk/matplotlib/lib/pytz/zoneinfo/Africa/ trunk/matplotlib/lib/pytz/zoneinfo/Africa/Abidjan trunk/matplotlib/lib/pytz/zoneinfo/Africa/Accra trunk/matplotlib/lib/pytz/zoneinfo/Africa/Addis_Ababa trunk/matplotlib/lib/pytz/zoneinfo/Africa/Algiers trunk/matplotlib/lib/pytz/zoneinfo/Africa/Asmara trunk/matplotlib/lib/pytz/zoneinfo/Africa/Asmera trunk/matplotlib/lib/pytz/zoneinfo/Africa/Bamako trunk/matplotlib/lib/pytz/zoneinfo/Africa/Bangui trunk/matplotlib/lib/pytz/zoneinfo/Africa/Banjul trunk/matplotlib/lib/pytz/zoneinfo/Africa/Bissau trunk/matplotlib/lib/pytz/zoneinfo/Africa/Blantyre trunk/matplotlib/lib/pytz/zoneinfo/Africa/Brazzaville trunk/matplotlib/lib/pytz/zoneinfo/Africa/Bujumbura trunk/matplotlib/lib/pytz/zoneinfo/Africa/Cairo trunk/matplotlib/lib/pytz/zoneinfo/Africa/Casablanca trunk/matplotlib/lib/pytz/zoneinfo/Africa/Ceuta trunk/matplotlib/lib/pytz/zoneinfo/Africa/Conakry trunk/matplotlib/lib/pytz/zoneinfo/Africa/Dakar trunk/matplotlib/lib/pytz/zoneinfo/Africa/Dar_es_Salaam trunk/matplotlib/lib/pytz/zoneinfo/Africa/Djibouti trunk/matplotlib/lib/pytz/zoneinfo/Africa/Douala trunk/matplotlib/lib/pytz/zoneinfo/Africa/El_Aaiun trunk/matplotlib/lib/pytz/zoneinfo/Africa/Freetown trunk/matplotlib/lib/pytz/zoneinfo/Africa/Gaborone trunk/matplotlib/lib/pytz/zoneinfo/Africa/Harare trunk/matplotlib/lib/pytz/zoneinfo/Africa/Johannesburg trunk/matplotlib/lib/pytz/zoneinfo/Africa/Kampala trunk/matplotlib/lib/pytz/zoneinfo/Africa/Khartoum trunk/matplotlib/lib/pytz/zoneinfo/Africa/Kigali trunk/matplotlib/lib/pytz/zoneinfo/Africa/Kinshasa trunk/matplotlib/lib/pytz/zoneinfo/Africa/Lagos trunk/matplotlib/lib/pytz/zoneinfo/Africa/Libreville trunk/matplotlib/lib/pytz/zoneinfo/Africa/Lome trunk/matplotlib/lib/pytz/zoneinfo/Africa/Luanda trunk/matplotlib/lib/pytz/zoneinfo/Africa/Lubumbashi trunk/matplotlib/lib/pytz/zoneinfo/Africa/Lusaka trunk/matplotlib/lib/pytz/zoneinfo/Africa/Malabo trunk/matplotlib/lib/pytz/zoneinfo/Africa/Maputo trunk/matplotlib/lib/pytz/zoneinfo/Africa/Maseru trunk/matplotlib/lib/pytz/zoneinfo/Africa/Mbabane trunk/matplotlib/lib/pytz/zoneinfo/Africa/Mogadishu trunk/matplotlib/lib/pytz/zoneinfo/Africa/Monrovia trunk/matplotlib/lib/pytz/zoneinfo/Africa/Nairobi trunk/matplotlib/lib/pytz/zoneinfo/Africa/Ndjamena trunk/matplotlib/lib/pytz/zoneinfo/Africa/Niamey trunk/matplotlib/lib/pytz/zoneinfo/Africa/Nouakchott trunk/matplotlib/lib/pytz/zoneinfo/Africa/Ouagadougou trunk/matplotlib/lib/pytz/zoneinfo/Africa/Porto-Novo trunk/matplotlib/lib/pytz/zoneinfo/Africa/Sao_Tome trunk/matplotlib/lib/pytz/zoneinfo/Africa/Timbuktu trunk/matplotlib/lib/pytz/zoneinfo/Africa/Tripoli trunk/matplotlib/lib/pytz/zoneinfo/Africa/Tunis trunk/matplotlib/lib/pytz/zoneinfo/Africa/Windhoek trunk/matplotlib/lib/pytz/zoneinfo/America/ trunk/matplotlib/lib/pytz/zoneinfo/America/Adak trunk/matplotlib/lib/pytz/zoneinfo/America/Anchorage trunk/matplotlib/lib/pytz/zoneinfo/America/Anguilla trunk/matplotlib/lib/pytz/zoneinfo/America/Antigua trunk/matplotlib/lib/pytz/zoneinfo/America/Araguaina trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/ trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Buenos_Aires trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Catamarca trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/ComodRivadavia trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Cordoba trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Jujuy trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/La_Rioja trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Mendoza trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Rio_Gallegos trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/San_Juan trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Tucuman trunk/matplotlib/lib/pytz/zoneinfo/America/Argentina/Ushuaia trunk/matplotlib/lib/pytz/zoneinfo/America/Aruba trunk/matplotlib/lib/pytz/zoneinfo/America/Asuncion trunk/matplotlib/lib/pytz/zoneinfo/America/Atikokan trunk/matplotlib/lib/pytz/zoneinfo/America/Atka trunk/matplotlib/lib/pytz/zoneinfo/America/Bahia trunk/matplotlib/lib/pytz/zoneinfo/America/Barbados trunk/matplotlib/lib/pytz/zoneinfo/America/Belem trunk/matplotlib/lib/pytz/zoneinfo/America/Belize trunk/matplotlib/lib/pytz/zoneinfo/America/Blanc-Sablon trunk/matplotlib/lib/pytz/zoneinfo/America/Boa_Vista trunk/matplotlib/lib/pytz/zoneinfo/America/Bogota trunk/matplotlib/lib/pytz/zoneinfo/America/Boise trunk/matplotlib/lib/pytz/zoneinfo/America/Buenos_Aires trunk/matplotlib/lib/pytz/zoneinfo/America/Cambridge_Bay trunk/matplotlib/lib/pytz/zoneinfo/America/Campo_Grande trunk/matplotlib/lib/pytz/zoneinfo/America/Cancun trunk/matplotlib/lib/pytz/zoneinfo/America/Caracas trunk/matplotlib/lib/pytz/zoneinfo/America/Catamarca trunk/matplotlib/lib/pytz/zoneinfo/America/Cayenne trunk/matplotlib/lib/pytz/zoneinfo/America/Cayman trunk/matplotlib/lib/pytz/zoneinfo/America/Chicago trunk/matplotlib/lib/pytz/zoneinfo/America/Chihuahua trunk/matplotlib/lib/pytz/zoneinfo/America/Coral_Harbour trunk/matplotlib/lib/pytz/zoneinfo/America/Cordoba trunk/matplotlib/lib/pytz/zoneinfo/America/Costa_Rica trunk/matplotlib/lib/pytz/zoneinfo/America/Cuiaba trunk/matplotlib/lib/pytz/zoneinfo/America/Curacao trunk/matplotlib/lib/pytz/zoneinfo/America/Danmarkshavn trunk/matplotlib/lib/pytz/zoneinfo/America/Dawson trunk/matplotlib/lib/pytz/zoneinfo/America/Dawson_Creek trunk/matplotlib/lib/pytz/zoneinfo/America/Denver trunk/matplotlib/lib/pytz/zoneinfo/America/Detroit trunk/matplotlib/lib/pytz/zoneinfo/America/Dominica trunk/matplotlib/lib/pytz/zoneinfo/America/Edmonton trunk/matplotlib/lib/pytz/zoneinfo/America/Eirunepe trunk/matplotlib/lib/pytz/zoneinfo/America/El_Salvador trunk/matplotlib/lib/pytz/zoneinfo/America/Ensenada trunk/matplotlib/lib/pytz/zoneinfo/America/Fort_Wayne trunk/matplotlib/lib/pytz/zoneinfo/America/Fortaleza trunk/matplotlib/lib/pytz/zoneinfo/America/Glace_Bay trunk/matplotlib/lib/pytz/zoneinfo/America/Godthab trunk/matplotlib/lib/pytz/zoneinfo/America/Goose_Bay trunk/matplotlib/lib/pytz/zoneinfo/America/Grand_Turk trunk/matplotlib/lib/pytz/zoneinfo/America/Grenada trunk/matplotlib/lib/pytz/zoneinfo/America/Guadeloupe trunk/matplotlib/lib/pytz/zoneinfo/America/Guatemala trunk/matplotlib/lib/pytz/zoneinfo/America/Guayaquil trunk/matplotlib/lib/pytz/zoneinfo/America/Guyana trunk/matplotlib/lib/pytz/zoneinfo/America/Halifax trunk/matplotlib/lib/pytz/zoneinfo/America/Havana trunk/matplotlib/lib/pytz/zoneinfo/America/Hermosillo trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/ trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Indianapolis trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Knox trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Marengo trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Petersburg trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Tell_City trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Vevay trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Vincennes trunk/matplotlib/lib/pytz/zoneinfo/America/Indiana/Winamac trunk/matplotlib/lib/pytz/zoneinfo/America/Indianapolis trunk/matplotlib/lib/pytz/zoneinfo/America/Inuvik trunk/matplotlib/lib/pytz/zoneinfo/America/Iqaluit trunk/matplotlib/lib/pytz/zoneinfo/America/Jamaica trunk/matplotlib/lib/pytz/zoneinfo/America/Jujuy trunk/matplotlib/lib/pytz/zoneinfo/America/Juneau trunk/matplotlib/lib/pytz/zoneinfo/America/Kentucky/ trunk/matplotlib/lib/pytz/zoneinfo/America/Kentucky/Louisville trunk/matplotlib/lib/pytz/zoneinfo/America/Kentucky/Monticello trunk/matplotlib/lib/pytz/zoneinfo/America/Knox_IN trunk/matplotlib/lib/pytz/zoneinfo/America/La_Paz trunk/matplotlib/lib/pytz/zoneinfo/America/Lima trunk/matplotlib/lib/pytz/zoneinfo/America/Los_Angeles trunk/matplotlib/lib/pytz/zoneinfo/America/Louisville trunk/matplotlib/lib/pytz/zoneinfo/America/Maceio trunk/matplotlib/lib/pytz/zoneinfo/America/Managua trunk/matplotlib/lib/pytz/zoneinfo/America/Manaus trunk/matplotlib/lib/pytz/zoneinfo/America/Martinique trunk/matplotlib/lib/pytz/zoneinfo/America/Mazatlan trunk/matplotlib/lib/pytz/zoneinfo/America/Mendoza trunk/matplotlib/lib/pytz/zoneinfo/America/Menominee trunk/matplotlib/lib/pytz/zoneinfo/America/Merida trunk/matplotlib/lib/pytz/zoneinfo/America/Mexico_City trunk/matplotlib/lib/pytz/zoneinfo/America/Miquelon trunk/matplotlib/lib/pytz/zoneinfo/America/Moncton trunk/matplotlib/lib/pytz/zoneinfo/America/Monterrey trunk/matplotlib/lib/pytz/zoneinfo/America/Montevideo trunk/matplotlib/lib/pytz/zoneinfo/America/Montreal trunk/matplotlib/lib/pytz/zoneinfo/America/Montserrat trunk/matplotlib/lib/pytz/zoneinfo/America/Nassau trunk/matplotlib/lib/pytz/zoneinfo/America/New_York trunk/matplotlib/lib/pytz/zoneinfo/America/Nipigon trunk/matplotlib/lib/pytz/zoneinfo/America/Nome trunk/matplotlib/lib/pytz/zoneinfo/America/Noronha trunk/matplotlib/lib/pytz/zoneinfo/America/North_Dakota/ trunk/matplotlib/lib/pytz/zoneinfo/America/North_Dakota/Center trunk/matplotlib/lib/pytz/zoneinfo/America/North_Dakota/New_Salem trunk/matplotlib/lib/pytz/zoneinfo/America/Panama trunk/matplotlib/lib/pytz/zoneinfo/America/Pangnirtung trunk/matplotlib/lib/pytz/zoneinfo/America/Paramaribo trunk/matplotlib/lib/pytz/zoneinfo/America/Phoenix trunk/matplotlib/lib/pytz/zoneinfo/America/Port-au-Prince trunk/matplotlib/lib/pytz/zoneinfo/America/Port_of_Spain trunk/matplotlib/lib/pytz/zoneinfo/America/Porto_Acre trunk/matplotlib/lib/pytz/zoneinfo/America/Porto_Velho trunk/matplotlib/lib/pytz/zoneinfo/America/Puerto_Rico trunk/matplotlib/lib/pytz/zoneinfo/America/Rainy_River trunk/matplotlib/lib/pytz/zoneinfo/America/Rankin_Inlet trunk/matplotlib/lib/pytz/zoneinfo/America/Recife trunk/matplotlib/lib/pytz/zoneinfo/America/Regina trunk/matplotlib/lib/pytz/zoneinfo/America/Resolute trunk/matplotlib/lib/pytz/zoneinfo/America/Rio_Branco trunk/matplotlib/lib/pytz/zoneinfo/America/Rosario trunk/matplotlib/lib/pytz/zoneinfo/America/Santiago trunk/matplotlib/lib/pytz/zoneinfo/America/Santo_Domingo trunk/matplotlib/lib/pytz/zoneinfo/America/Sao_Paulo trunk/matplotlib/lib/pytz/zoneinfo/America/Scoresbysund trunk/matplotlib/lib/pytz/zoneinfo/America/Shiprock trunk/matplotlib/lib/pytz/zoneinfo/America/St_Johns trunk/matplotlib/lib/pytz/zoneinfo/America/St_Kitts trunk/matplotlib/lib/pytz/zoneinfo/America/St_Lucia trunk/matplotlib/lib/pytz/zoneinfo/America/St_Thomas trunk/matplotlib/lib/pytz/zoneinfo/America/St_Vincent trunk/matplotlib/lib/pytz/zoneinfo/America/Swift_Current trunk/matplotlib/lib/pytz/zoneinfo/America/Tegucigalpa trunk/matplotlib/lib/pytz/zoneinfo/America/Thule trunk/matplotlib/lib/pytz/zoneinfo/America/Thunder_Bay trunk/matplotlib/lib/pytz/zoneinfo/America/Tijuana trunk/matplotlib/lib/pytz/zoneinfo/America/Toronto trunk/matplotlib/lib/pytz/zoneinfo/America/Tortola trunk/matplotlib/lib/pytz/zoneinfo/America/Vancouver trunk/matplotlib/lib/pytz/zoneinfo/America/Virgin trunk/matplotlib/lib/pytz/zoneinfo/America/Whitehorse trunk/matplotlib/lib/pytz/zoneinfo/America/Winnipeg trunk/matplotlib/lib/pytz/zoneinfo/America/Yakutat trunk/matplotlib/lib/pytz/zoneinfo/America/Yellowknife trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/ trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/Casey trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/Davis trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/DumontDUrville trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/Mawson trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/McMurdo trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/Palmer trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/Rothera trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/South_Pole trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/Syowa trunk/matplotlib/lib/pytz/zoneinfo/Antarctica/Vostok trunk/matplotlib/lib/pytz/zoneinfo/Arctic/ trunk/matplotlib/lib/pytz/zoneinfo/Arctic/Longyearbyen trunk/matplotlib/lib/pytz/zoneinfo/Asia/ trunk/matplotlib/lib/pytz/zoneinfo/Asia/Aden trunk/matplotlib/lib/pytz/zoneinfo/Asia/Almaty trunk/matplotlib/lib/pytz/zoneinfo/Asia/Amman trunk/matplotlib/lib/pytz/zoneinfo/Asia/Anadyr trunk/matplotlib/lib/pytz/zoneinfo/Asia/Aqtau trunk/matplotlib/lib/pytz/zoneinfo/Asia/Aqtobe trunk/matplotlib/lib/pytz/zoneinfo/Asia/Ashgabat trunk/matplotlib/lib/pytz/zoneinfo/Asia/Ashkhabad trunk/matplotlib/lib/pytz/zoneinfo/Asia/Baghdad trunk/matplotlib/lib/pytz/zoneinfo/Asia/Bahrain trunk/matplotlib/lib/pytz/zoneinfo/Asia/Baku trunk/matplotlib/lib/pytz/zoneinfo/Asia/Bangkok trunk/matplotlib/lib/pytz/zoneinfo/Asia/Beirut trunk/matplotlib/lib/pytz/zoneinfo/Asia/Bishkek trunk/matplotlib/lib/pytz/zoneinfo/Asia/Brunei trunk/matplotlib/lib/pytz/zoneinfo/Asia/Calcutta trunk/matplotlib/lib/pytz/zoneinfo/Asia/Choibalsan trunk/matplotlib/lib/pytz/zoneinfo/Asia/Chongqing trunk/matplotlib/lib/pytz/zoneinfo/Asia/Chungking trunk/matplotlib/lib/pytz/zoneinfo/Asia/Colombo trunk/matplotlib/lib/pytz/zoneinfo/Asia/Dacca trunk/matplotlib/lib/pytz/zoneinfo/Asia/Damascus trunk/matplotlib/lib/pytz/zoneinfo/Asia/Dhaka trunk/matplotlib/lib/pytz/zoneinfo/Asia/Dili trunk/matplotlib/lib/pytz/zoneinfo/Asia/Dubai trunk/matplotlib/lib/pytz/zoneinfo/Asia/Dushanbe trunk/matplotlib/lib/pytz/zoneinfo/Asia/Gaza trunk/matplotlib/lib/pytz/zoneinfo/Asia/Harbin trunk/matplotlib/lib/pytz/zoneinfo/Asia/Hong_Kong trunk/matplotlib/lib/pytz/zoneinfo/Asia/Hovd trunk/matplotlib/lib/pytz/zoneinfo/Asia/Irkutsk trunk/matplotlib/lib/pytz/zoneinfo/Asia/Istanbul trunk/matplotlib/lib/pytz/zoneinfo/Asia/Jakarta trunk/matplotlib/lib/pytz/zoneinfo/Asia/Jayapura trunk/matplotlib/lib/pytz/zoneinfo/Asia/Jerusalem trunk/matplotlib/lib/pytz/zoneinfo/Asia/Kabul trunk/matplotlib/lib/pytz/zoneinfo/Asia/Kamchatka trunk/matplotlib/lib/pytz/zoneinfo/Asia/Karachi trunk/matplotlib/lib/pytz/zoneinfo/Asia/Kashgar trunk/matplotlib/lib/pytz/zoneinfo/Asia/Katmandu trunk/matplotlib/lib/pytz/zoneinfo/Asia/Krasnoyarsk trunk/matplotlib/lib/pytz/zoneinfo/Asia/Kuala_Lumpur trunk/matplotlib/lib/pytz/zoneinfo/Asia/Kuching trunk/matplotlib/lib/pytz/zoneinfo/Asia/Kuwait trunk/matplotlib/lib/pytz/zoneinfo/Asia/Macao trunk/matplotlib/lib/pytz/zoneinfo/Asia/Macau trunk/matplotlib/lib/pytz/zoneinfo/Asia/Magadan trunk/matplotlib/lib/pytz/zoneinfo/Asia/Makassar trunk/matplotlib/lib/pytz/zoneinfo/Asia/Manila trunk/matplotlib/lib/pytz/zoneinfo/Asia/Muscat trunk/matplotlib/lib/pytz/zoneinfo/Asia/Nicosia trunk/matplotlib/lib/pytz/zoneinfo/Asia/Novosibirsk trunk/matplotlib/lib/pytz/zoneinfo/Asia/Omsk trunk/matplotlib/lib/pytz/zoneinfo/Asia/Oral trunk/matplotlib/lib/pytz/zoneinfo/Asia/Phnom_Penh trunk/matplotlib/lib/pytz/zoneinfo/Asia/Pontianak trunk/matplotlib/lib/pytz/zoneinfo/Asia/Pyongyang trunk/matplotlib/lib/pytz/zoneinfo/Asia/Qatar trunk/matplotlib/lib/pytz/zoneinfo/Asia/Qyzylorda trunk/matplotlib/lib/pytz/zoneinfo/Asia/Rangoon trunk/matplotlib/lib/pytz/zoneinfo/Asia/Riyadh trunk/matplotlib/lib/pytz/zoneinfo/Asia/Riyadh87 trunk/matplotlib/lib/pytz/zoneinfo/Asia/Riyadh88 trunk/matplotlib/lib/pytz/zoneinfo/Asia/Riyadh89 trunk/matplotlib/lib/pytz/zoneinfo/Asia/Saigon trunk/matplotlib/lib/pytz/zoneinfo/Asia/Sakhalin trunk/matplotlib/lib/pytz/zoneinfo/Asia/Samarkand trunk/matplotlib/lib/pytz/zoneinfo/Asia/Seoul trunk/matplotlib/lib/pytz/zoneinfo/Asia/Shanghai trunk/matplotlib/lib/pytz/zoneinfo/Asia/Singapore trunk/matplotlib/lib/pytz/zoneinfo/Asia/Taipei trunk/matplotlib/lib/pytz/zoneinfo/Asia/Tashkent trunk/matplotlib/lib/pytz/zoneinfo/Asia/Tbilisi trunk/matplotlib/lib/pytz/zoneinfo/Asia/Tehran trunk/matplotlib/lib/pytz/zoneinfo/Asia/Tel_Aviv trunk/matplotlib/lib/pytz/zoneinfo/Asia/Thimbu trunk/matplotlib/lib/pytz/zoneinfo/Asia/Thimphu trunk/matplotlib/lib/pytz/zoneinfo/Asia/Tokyo trunk/matplotlib/lib/pytz/zoneinfo/Asia/Ujung_Pandang trunk/matplotlib/lib/pytz/zoneinfo/Asia/Ulaanbaatar trunk/matplotlib/lib/pytz/zoneinfo/Asia/Ulan_Bator trunk/matplotlib/lib/pytz/zoneinfo/Asia/Urumqi trunk/matplotlib/lib/pytz/zoneinfo/Asia/Vientiane trunk/matplotlib/lib/pytz/zoneinfo/Asia/Vladivostok trunk/matplotlib/lib/pytz/zoneinfo/Asia/Yakutsk trunk/matplotlib/lib/pytz/zoneinfo/Asia/Yekaterinburg trunk/matplotlib/lib/pytz/zoneinfo/Asia/Yerevan trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/ trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Azores trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Bermuda trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Canary trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Cape_Verde trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Faeroe trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Faroe trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Jan_Mayen trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Madeira trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Reykjavik trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/South_Georgia trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/St_Helena trunk/matplotlib/lib/pytz/zoneinfo/Atlantic/Stanley trunk/matplotlib/lib/pytz/zoneinfo/Australia/ trunk/matplotlib/lib/pytz/zoneinfo/Australia/ACT trunk/matplotlib/lib/pytz/zoneinfo/Australia/Adelaide trunk/matplotlib/lib/pytz/zoneinfo/Australia/Brisbane trunk/matplotlib/lib/pytz/zoneinfo/Australia/Broken_Hill trunk/matplotlib/lib/pytz/zoneinfo/Australia/Canberra trunk/matplotlib/lib/pytz/zoneinfo/Australia/Currie trunk/matplotlib/lib/pytz/zoneinfo/Australia/Darwin trunk/matplotlib/lib/pytz/zoneinfo/Australia/Eucla trunk/matplotlib/lib/pytz/zoneinfo/Australia/Hobart trunk/matplotlib/lib/pytz/zoneinfo/Australia/LHI trunk/matplotlib/lib/pytz/zoneinfo/Australia/Lindeman trunk/matplotlib/lib/pytz/zoneinfo/Australia/Lord_Howe trunk/matplotlib/lib/pytz/zoneinfo/Australia/Melbourne trunk/matplotlib/lib/pytz/zoneinfo/Australia/NSW trunk/matplotlib/lib/pytz/zoneinfo/Australia/North trunk/matplotlib/lib/pytz/zoneinfo/Australia/Perth trunk/matplotlib/lib/pytz/zoneinfo/Australia/Queensland trunk/matplotlib/lib/pytz/zoneinfo/Australia/South trunk/matplotlib/lib/pytz/zoneinfo/Australia/Sydney trunk/matplotlib/lib/pytz/zoneinfo/Australia/Tasmania trunk/matplotlib/lib/pytz/zoneinfo/Australia/Victoria trunk/matplotlib/lib/pytz/zoneinfo/Australia/West trunk/matplotlib/lib/pytz/zoneinfo/Australia/Yancowinna trunk/matplotlib/lib/pytz/zoneinfo/Brazil/ trunk/matplotlib/lib/pytz/zoneinfo/Brazil/Acre trunk/matplotlib/lib/pytz/zoneinfo/Brazil/DeNoronha trunk/matplotlib/lib/pytz/zoneinfo/Brazil/East trunk/matplotlib/lib/pytz/zoneinfo/Brazil/West trunk/matplotlib/lib/pytz/zoneinfo/CET trunk/matplotlib/lib/pytz/zoneinfo/CST6CDT trunk/matplotlib/lib/pytz/zoneinfo/Canada/ trunk/matplotlib/lib/pytz/zoneinfo/Canada/Atlantic trunk/matplotlib/lib/pytz/zoneinfo/Canada/Central trunk/matplotlib/lib/pytz/zoneinfo/Canada/East-Saskatchewan trunk/matplotlib/lib/pytz/zoneinfo/Canada/Eastern trunk/matplotlib/lib/pytz/zoneinfo/Canada/Mountain trunk/matplotlib/lib/pytz/zoneinfo/Canada/Newfoundland trunk/matplotlib/lib/pytz/zoneinfo/Canada/Pacific trunk/matplotlib/lib/pytz/zoneinfo/Canada/Saskatchewan trunk/matplotlib/lib/pytz/zoneinfo/Canada/Yukon trunk/matplotlib/lib/pytz/zoneinfo/Chile/ trunk/matplotlib/lib/pytz/zoneinfo/Chile/Continental trunk/matplotlib/lib/pytz/zoneinfo/Chile/EasterIsland trunk/matplotlib/lib/pytz/zoneinfo/Cuba trunk/matplotlib/lib/pytz/zoneinfo/EET trunk/matplotlib/lib/pytz/zoneinfo/EST trunk/matplotlib/lib/pytz/zoneinfo/EST5EDT trunk/matplotlib/lib/pytz/zoneinfo/Egypt trunk/matplotlib/lib/pytz/zoneinfo/Eire trunk/matplotlib/lib/pytz/zoneinfo/Etc/ trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+0 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+1 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+10 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+11 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+12 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+2 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+3 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+4 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+5 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+6 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+7 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+8 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT+9 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-0 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-1 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-10 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-11 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-12 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-13 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-14 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-2 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-3 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-4 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-5 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-6 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-7 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-8 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT-9 trunk/matplotlib/lib/pytz/zoneinfo/Etc/GMT0 trunk/matplotlib/lib/pytz/zoneinfo/Etc/Greenwich trunk/matplotlib/lib/pytz/zoneinfo/Etc/UCT trunk/matplotlib/lib/pytz/zoneinfo/Etc/UTC trunk/matplotlib/lib/pytz/zoneinfo/Etc/Universal trunk/matplotlib/lib/pytz/zoneinfo/Etc/Zulu trunk/matplotlib/lib/pytz/zoneinfo/Europe/ trunk/matplotlib/lib/pytz/zoneinfo/Europe/Amsterdam trunk/matplotlib/lib/pytz/zoneinfo/Europe/Andorra trunk/matplotlib/lib/pytz/zoneinfo/Europe/Athens trunk/matplotlib/lib/pytz/zoneinfo/Europe/Belfast trunk/matplotlib/lib/pytz/zoneinfo/Europe/Belgrade trunk/matplotlib/lib/pytz/zoneinfo/Europe/Berlin trunk/matplotlib/lib/pytz/zoneinfo/Europe/Bratislava trunk/matplotlib/lib/pytz/zoneinfo/Europe/Brussels trunk/matplotlib/lib/pytz/zoneinfo/Europe/Bucharest trunk/matplotlib/lib/pytz/zoneinfo/Europe/Budapest trunk/matplotlib/lib/pytz/zoneinfo/Europe/Chisinau trunk/matplotlib/lib/pytz/zoneinfo/Europe/Copenhagen trunk/matplotlib/lib/pytz/zoneinfo/Europe/Dublin trunk/matplotlib/lib/pytz/zoneinfo/Europe/Gibraltar trunk/matplotlib/lib/pytz/zoneinfo/Europe/Guernsey trunk/matplotlib/lib/pytz/zoneinfo/Europe/Helsinki trunk/matplotlib/lib/pytz/zoneinfo/Europe/Isle_of_Man trunk/matplotlib/lib/pytz/zoneinfo/Europe/Istanbul trunk/matplotlib/lib/pytz/zoneinfo/Europe/Jersey trunk/matplotlib/lib/pytz/zoneinfo/Europe/Kaliningrad trunk/matplotlib/lib/pytz/zoneinfo/Europe/Kiev trunk/matplotlib/lib/pytz/zoneinfo/Europe/Lisbon trunk/matplotlib/lib/pytz/zoneinfo/Europe/Ljubljana trunk/matplotlib/lib/pytz/zoneinfo/Europe/London trunk/matplotlib/lib/pytz/zoneinfo/Europe/Luxembourg trunk/matplotlib/lib/pytz/zoneinfo/Europe/Madrid trunk/matplotlib/lib/pytz/zoneinfo/Europe/Malta trunk/matplotlib/lib/pytz/zoneinfo/Europe/Mariehamn trunk/matplotlib/lib/pytz/zoneinfo/Europe/Minsk trunk/matplotlib/lib/pytz/zoneinfo/Europe/Monaco trunk/matplotlib/lib/pytz/zoneinfo/Europe/Moscow trunk/matplotlib/lib/pytz/zoneinfo/Europe/Nicosia trunk/matplotlib/lib/pytz/zoneinfo/Europe/Oslo trunk/matplotlib/lib/pytz/zoneinfo/Europe/Paris trunk/matplotlib/lib/pytz/zoneinfo/Europe/Podgorica trunk/matplotlib/lib/pytz/zoneinfo/Europe/Prague trunk/matplotlib/lib/pytz/zoneinfo/Europe/Riga trunk/matplotlib/lib/pytz/zoneinfo/Europe/Rome trunk/matplotlib/lib/pytz/zoneinfo/Europe/Samara trunk/matplotlib/lib/pytz/zoneinfo/Europe/San_Marino trunk/matplotlib/lib/pytz/zoneinfo/Europe/Sarajevo trunk/matplotlib/lib/pytz/zoneinfo/Europe/Simferopol trunk/matplotlib/lib/pytz/zoneinfo/Europe/Skopje trunk/matplotlib/lib/pytz/zoneinfo/Europe/Sofia trunk/matplotlib/lib/pytz/zoneinfo/Europe/Stockholm trunk/matplotlib/lib/pytz/zoneinfo/Europe/Tallinn trunk/matplotlib/lib/pytz/zoneinfo/Europe/Tirane trunk/matplotlib/lib/pytz/zoneinfo/Europe/Tiraspol trunk/matplotlib/lib/pytz/zoneinfo/Europe/Uzhgorod trunk/matplotlib/lib/pytz/zoneinfo/Europe/Vaduz trunk/matplotlib/lib/pytz/zoneinfo/Europe/Vatican trunk/matplotlib/lib/pytz/zoneinfo/Europe/Vienna trunk/matplotlib/lib/pytz/zoneinfo/Europe/Vilnius trunk/matplotlib/lib/pytz/zoneinfo/Europe/Volgograd trunk/matplotlib/lib/pytz/zoneinfo/Europe/Warsaw trunk/matplotlib/lib/pytz/zoneinfo/Europe/Zagreb trunk/matplotlib/lib/pytz/zoneinfo/Europe/Zaporozhye trunk/matplotlib/lib/pytz/zoneinfo/Europe/Zurich trunk/matplotlib/lib/pytz/zoneinfo/Factory trunk/matplotlib/lib/pytz/zoneinfo/GB trunk/matplotlib/lib/pytz/zoneinfo/GB-Eire trunk/matplotlib/lib/pytz/zoneinfo/GMT trunk/matplotlib/lib/pytz/zoneinfo/GMT+0 trunk/matplotlib/lib/pytz/zoneinfo/GMT-0 trunk/matplotlib/lib/pytz/zoneinfo/GMT0 trunk/matplotlib/lib/pytz/zoneinfo/Greenwich trunk/matplotlib/lib/pytz/zoneinfo/HST trunk/matplotlib/lib/pytz/zoneinfo/Hongkong trunk/matplotlib/lib/pytz/zoneinfo/Iceland trunk/matplotlib/lib/pytz/zoneinfo/Indian/ trunk/matplotlib/lib/pytz/zoneinfo/Indian/Antananarivo trunk/matplotlib/lib/pytz/zoneinfo/Indian/Chagos trunk/matplotlib/lib/pytz/zoneinfo/Indian/Christmas trunk/matplotlib/lib/pytz/zoneinfo/Indian/Cocos trunk/matplotlib/lib/pytz/zoneinfo/Indian/Comoro trunk/matplotlib/lib/pytz/zoneinfo/Indian/Kerguelen trunk/matplotlib/lib/pytz/zoneinfo/Indian/Mahe trunk/matplotlib/lib/pytz/zoneinfo/Indian/Maldives trunk/matplotlib/lib/pytz/zoneinfo/Indian/Mauritius trunk/matplotlib/lib/pytz/zoneinfo/Indian/Mayotte trunk/matplotlib/lib/pytz/zoneinfo/Indian/Reunion trunk/matplotlib/lib/pytz/zoneinfo/Iran trunk/matplotlib/lib/pytz/zoneinfo/Israel trunk/matplotlib/lib/pytz/zoneinfo/Jamaica trunk/matplotlib/lib/pytz/zoneinfo/Japan trunk/matplotlib/lib/pytz/zoneinfo/Kwajalein trunk/matplotlib/lib/pytz/zoneinfo/Libya trunk/matplotlib/lib/pytz/zoneinfo/MET trunk/matplotlib/lib/pytz/zoneinfo/MST trunk/matplotlib/lib/pytz/zoneinfo/MST7MDT trunk/matplotlib/lib/pytz/zoneinfo/Mexico/ trunk/matplotlib/lib/pytz/zoneinfo/Mexico/BajaNorte trunk/matplotlib/lib/pytz/zoneinfo/Mexico/BajaSur trunk/matplotlib/lib/pytz/zoneinfo/Mexico/General trunk/matplotlib/lib/pytz/zoneinfo/Mideast/ trunk/matplotlib/lib/pytz/zoneinfo/Mideast/Riyadh87 trunk/matplotlib/lib/pytz/zoneinfo/Mideast/Riyadh88 trunk/matplotlib/lib/pytz/zoneinfo/Mideast/Riyadh89 trunk/matplotlib/lib/pytz/zoneinfo/NZ trunk/matplotlib/lib/pytz/zoneinfo/NZ-CHAT trunk/matplotlib/lib/pytz/zoneinfo/Navajo trunk/matplotlib/lib/pytz/zoneinfo/PRC trunk/matplotlib/lib/pytz/zoneinfo/PST8PDT trunk/matplotlib/lib/pytz/zoneinfo/Pacific/ trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Apia trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Auckland trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Chatham trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Easter trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Efate trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Enderbury trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Fakaofo trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Fiji trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Funafuti trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Galapagos trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Gambier trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Guadalcanal trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Guam trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Honolulu trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Johnston trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Kiritimati trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Kosrae trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Kwajalein trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Majuro trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Marquesas trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Midway trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Nauru trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Niue trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Norfolk trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Noumea trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Pago_Pago trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Palau trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Pitcairn trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Ponape trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Port_Moresby trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Rarotonga trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Saipan trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Samoa trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Tahiti trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Tarawa trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Tongatapu trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Truk trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Wake trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Wallis trunk/matplotlib/lib/pytz/zoneinfo/Pacific/Yap trunk/matplotlib/lib/pytz/zoneinfo/Poland trunk/matplotlib/lib/pytz/zoneinfo/Portugal trunk/matplotlib/lib/pytz/zoneinfo/ROC trunk/matplotlib/lib/pytz/zoneinfo/ROK trunk/matplotlib/lib/pytz/zoneinfo/Singapore trunk/matplotlib/lib/pytz/zoneinfo/Turkey trunk/matplotlib/lib/pytz/zoneinfo/UCT trunk/matplotlib/lib/pytz/zoneinfo/US/ trunk/matplotlib/lib/pytz/zoneinfo/US/Alaska trunk/matplotlib/lib/pytz/zoneinfo/US/Aleutian trunk/matplotlib/lib/pytz/zoneinfo/US/Arizona trunk/matplotlib/lib/pytz/zoneinfo/US/Central trunk/matplotlib/lib/pytz/zoneinfo/US/East-Indiana trunk/matplotlib/lib/pytz/zoneinfo/US/Eastern trunk/matplotlib/lib/pytz/zoneinfo/US/Hawaii trunk/matplotlib/lib/pytz/zoneinfo/US/Indiana-Starke trunk/matplotlib/lib/pytz/zoneinfo/US/Michigan trunk/matplotlib/lib/pytz/zoneinfo/US/Mountain trunk/matplotlib/lib/pytz/zoneinfo/US/Pacific trunk/matplotlib/lib/pytz/zoneinfo/US/Pacific-New trunk/matplotlib/lib/pytz/zoneinfo/US/Samoa trunk/matplotlib/lib/pytz/zoneinfo/UTC trunk/matplotlib/lib/pytz/zoneinfo/Universal trunk/matplotlib/lib/pytz/zoneinfo/W-SU trunk/matplotlib/lib/pytz/zoneinfo/WET trunk/matplotlib/lib/pytz/zoneinfo/Zulu trunk/matplotlib/lib/pytz/zoneinfo/iso3166.tab trunk/matplotlib/lib/pytz/zoneinfo/localtime trunk/matplotlib/lib/pytz/zoneinfo/posixrules trunk/matplotlib/lib/pytz/zoneinfo/zone.tab Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-09 01:37:02 UTC (rev 4172) +++ trunk/matplotlib/CHANGELOG 2007-11-09 01:40:54 UTC (rev 4173) @@ -1,5 +1,7 @@ -2007-11-08 Update pyparsing to version 1.4.8 - DSD +2007-11-08 Updated pytz to version 2007g - DSD +2007-11-08 Updated pyparsing to version 1.4.8 - DSD + 2007-11-08 Moved csv2rec to recutils and added other record array utilities - JDH Added: trunk/matplotlib/lib/pytz/CHANGES.txt =================================================================== --- trunk/matplotlib/lib/pytz/CHANGES.txt (rev 0) +++ trunk/matplotlib/lib/pytz/CHANGES.txt 2007-11-09 01:40:54 UTC (rev 4173) @@ -0,0 +1,46 @@ +2004-07-25 + + - Improved localtime handling, and added a localize() method enabling + correct creation of local times. + +2005-02-16 + + - Made available under the Zope Public Licence 2.1 (ZPL) and checked + into the Zope3 project. pytz may now be used and redistributed + under either the original MIT license or the ZPL 2.1. + +2005-05-13 + + - Move UTC into the top level pytz module and provide special + case pickle support for this singleton. + +2005-08-14 + + - Ensure all tzinfo instances are efficiently picklable. + +2005-12-31 + + - Add fixed offset timezone classes required by Zope 3 + - Generate and distribute a PO template file listing all timezone + names. Translations are not yet available. + +2007-03-03 + + - Import work by James Henstridge, making pytz load timezone + information from zic compiled binaries at runtime rather than + processing them into Python classes. + +2007-03-26 + + - Update database to version 2007d + - Fix windows incompatibilities, working around limitations on that + platform. + - Fix 2.3 incompatibilities. Installation now requires distutils. + - Passing an invalid timezone name to timezone() now raises an + UnknownTimezoneError, which is a KeyError subclass for backwards + compatibility. + +2007-03-27 + + - Ensure API can accept Unicode strings (Bug #96957) + Added: trunk/matplotlib/lib/pytz/LICENSE.txt =================================================================== --- trunk/matplotlib/lib/pytz/LICENSE.txt (rev 0) +++ trunk/matplotlib/lib/pytz/LICENSE.txt 2007-11-09 01:40:54 UTC (rev 4173) @@ -0,0 +1,19 @@ +Copyright (c) 2003-2007 Stuart Bishop <st...@st...> + +Permission is hereby granted, free of charge, to any person obtaining a +copy of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. Added: trunk/matplotlib/lib/pytz/README.txt =================================================================== --- trunk/matplotlib/lib/pytz/README.txt (rev 0) +++ trunk/matplotlib/lib/pytz/README.txt 2007-11-09 01:40:54 UTC (rev 4173) @@ -0,0 +1,326 @@ +pytz - World Timezone Definitions for Python +============================================ + +:Author: Stuart Bishop <st...@st...> + +Introduction +~~~~~~~~~~~~ + +pytz brings the Olson tz database into Python. This library allows +accurate and cross platform timezone calculations using Python 2.3 +or higher. It also solves the issue of ambiguous times at the end +of daylight savings, which you can read more about in the Python +Library Reference (datetime.tzinfo). + +Amost all (over 540) of the Olson timezones are supported [*]_. + +Note that if you perform date arithmetic on local times that cross +DST boundaries, the results may be in an incorrect timezone (ie. +subtract 1 minute from 2002-10-27 1:00 EST and you get 2002-10-27 +0:59 EST instead of the correct 2002-10-27 1:59 EDT). This cannot +be resolved without modifying the Python datetime implementation. +However, these tzinfo classes provide a normalize() method which +allows you to correct these values. + + +Installation +~~~~~~~~~~~~ + +This is a standard Python distutils distribution. To install the +package, run the following command as an administrative user:: + + python setup.py install + + +Example & Usage +~~~~~~~~~~~~~~~ + +>>> from datetime import datetime, timedelta +>>> from pytz import timezone +>>> import pytz +>>> utc = pytz.utc +>>> utc.zone +'UTC' +>>> eastern = timezone('US/Eastern') +>>> eastern.zone +'US/Eastern' +>>> fmt = '%Y-%m-%d %H:%M:%S %Z%z' + +The preferred way of dealing with times is to always work in UTC, +converting to localtime only when generating output to be read +by humans. + +>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) +>>> loc_dt = utc_dt.astimezone(eastern) +>>> loc_dt.strftime(fmt) +'2002-10-27 01:00:00 EST-0500' + +This library also allows you to do date arithmetic using local +times, although it is more complicated than working in UTC as you +need to use the `normalize` method to handle daylight savings time +and other timezone transitions. In this example, `loc_dt` is set +to the instant when daylight savings time ends in the US/Eastern +timezone. + +>>> before = loc_dt - timedelta(minutes=10) +>>> before.strftime(fmt) +'2002-10-27 00:50:00 EST-0500' +>>> eastern.normalize(before).strftime(fmt) +'2002-10-27 01:50:00 EDT-0400' +>>> after = eastern.normalize(before + timedelta(minutes=20)) +>>> after.strftime(fmt) +'2002-10-27 01:10:00 EST-0500' + +Creating localtimes is also tricky, and the reason why working with +local times is not recommended. Unfortunately, you cannot just pass +a 'tzinfo' argument when constructing a datetime (see the next section +for more details) + +>>> dt = datetime(2002, 10, 27, 1, 30, 0) +>>> dt1 = eastern.localize(dt, is_dst=True) +>>> dt1.strftime(fmt) +'2002-10-27 01:30:00 EDT-0400' +>>> dt2 = eastern.localize(dt, is_dst=False) +>>> dt2.strftime(fmt) +'2002-10-27 01:30:00 EST-0500' + +Converting between timezones also needs special attention. This also needs +to use the normalize method to ensure the conversion is correct. + +>>> utc_dt = utc.localize(datetime.utcfromtimestamp(1143408899)) +>>> utc_dt.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> au_tz = timezone('Australia/Sydney') +>>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) +>>> au_dt.strftime(fmt) +'2006-03-27 08:34:59 EST+1100' +>>> utc_dt2 = utc.normalize(au_dt.astimezone(utc)) +>>> utc_dt2.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' + +You can also take shortcuts when dealing with the UTC side of timezone +conversions. Normalize and localize are not really necessary because there +are no daylight savings time transitions to deal with. + +>>> utc_dt = datetime.utcfromtimestamp(1143408899).replace(tzinfo=utc) +>>> utc_dt.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' +>>> au_tz = timezone('Australia/Sydney') +>>> au_dt = au_tz.normalize(utc_dt.astimezone(au_tz)) +>>> au_dt.strftime(fmt) +'2006-03-27 08:34:59 EST+1100' +>>> utc_dt2 = au_dt.astimezone(utc) +>>> utc_dt2.strftime(fmt) +'2006-03-26 21:34:59 UTC+0000' + + +Problems with Localtime +~~~~~~~~~~~~~~~~~~~~~~~ + +The major problem we have to deal with is that certain datetimes +may occur twice in a year. For example, in the US/Eastern timezone +on the last Sunday morning in October, the following sequence +happens: + + - 01:00 EDT occurs + - 1 hour later, instead of 2:00am the clock is turned back 1 hour + and 01:00 happens again (this time 01:00 EST) + +In fact, every instant between 01:00 and 02:00 occurs twice. This means +that if you try and create a time in the US/Eastern timezone using +the standard datetime syntax, there is no way to specify if you meant +before of after the end-of-daylight-savings-time transition. + +>>> loc_dt = datetime(2002, 10, 27, 1, 30, 00, tzinfo=eastern) +>>> loc_dt.strftime(fmt) +'2002-10-27 01:30:00 EST-0500' + +As you can see, the system has chosen one for you and there is a 50% +chance of it being out by one hour. For some applications, this does +not matter. However, if you are trying to schedule meetings with people +in different timezones or analyze log files it is not acceptable. + +The best and simplest solution is to stick with using UTC. The pytz package +encourages using UTC for internal timezone representation by including a +special UTC implementation based on the standard Python reference +implementation in the Python documentation. This timezone unpickles to be +the same instance, and pickles to a relatively small size. The UTC +implementation can be obtained as pytz.utc, pytz.UTC, or +pytz.timezone('UTC'). Note that this instance is not the same +instance (or implementation) as other timezones with the same meaning +(GMT, Greenwich, Universal, etc.). + +>>> import pickle, pytz +>>> dt = datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) +>>> naive = dt.replace(tzinfo=None) +>>> p = pickle.dumps(dt, 1) +>>> naive_p = pickle.dumps(naive, 1) +>>> len(p), len(naive_p), len(p) - len(naive_p) +(60, 43, 17) +>>> new = pickle.loads(p) +>>> new == dt +True +>>> new is dt +False +>>> new.tzinfo is dt.tzinfo +True +>>> pytz.utc is pytz.UTC is pytz.timezone('UTC') +True +>>> utc is pytz.timezone('GMT') +False + +If you insist on working with local times, this library provides a +facility for constructing them almost unambiguously. + +>>> loc_dt = datetime(2002, 10, 27, 1, 30, 00) +>>> est_dt = eastern.localize(loc_dt, is_dst=True) +>>> edt_dt = eastern.localize(loc_dt, is_dst=False) +>>> print est_dt.strftime(fmt), '/', edt_dt.strftime(fmt) +2002-10-27 01:30:00 EDT-0400 / 2002-10-27 01:30:00 EST-0500 + +Note that although this handles many cases, it is still not possible +to handle all. In cases where countries change their timezone definitions, +cases like the end-of-daylight-savings-time occur with no way of resolving +the ambiguity. For example, in 1915 Warsaw switched from Warsaw time to +Central European time. So at the stroke of midnight on August 4th 1915 +the clocks were wound back 24 minutes creating a ambiguous time period +that cannot be specified without referring to the timezone abbreviation +or the actual UTC offset. + +The 'Standard' Python way of handling all these ambiguities is not to, +such as demonstrated in this example using the US/Eastern timezone +definition from the Python documentation (Note that this implementation +only works for dates between 1987 and 2006 - it is included for tests only!): + +>>> from pytz.reference import Eastern # pytz.reference only for tests +>>> dt = datetime(2002, 10, 27, 0, 30, tzinfo=Eastern) +>>> str(dt) +'2002-10-27 00:30:00-04:00' +>>> str(dt + timedelta(hours=1)) +'2002-10-27 01:30:00-05:00' +>>> str(dt + timedelta(hours=2)) +'2002-10-27 02:30:00-05:00' +>>> str(dt + timedelta(hours=3)) +'2002-10-27 03:30:00-05:00' + +Notice the first two results? At first glance you might think they are +correct, but taking the UTC offset into account you find that they are +actually two hours appart instead of the 1 hour we asked for. + +>>> from pytz.reference import UTC # pytz.reference only for tests +>>> str(dt.astimezone(UTC)) +'2002-10-27 04:30:00+00:00' +>>> str((dt + timedelta(hours=1)).astimezone(UTC)) +'2002-10-27 06:30:00+00:00' + + +What is UTC +~~~~~~~~~~~ + +`UTC` is Universal Time, formerly known as Greenwich Mean Time or GMT. +All other timezones are given as offsets from UTC. No daylight savings +time occurs in UTC, making it a useful timezone to perform date arithmetic +without worrying about the confusion and ambiguities caused by daylight +savings time transitions, your country changing its timezone, or mobile +computers that move roam through multiple timezones. + + +Helpers +~~~~~~~ + +There are two lists of timezones provided. + +`all_timezones` is the exhaustive list of the timezone names that can be used. + +>>> from pytz import all_timezones +>>> len(all_timezones) >= 500 +True +>>> 'Etc/Greenwich' in all_timezones +True + +`common_timezones` is a list of useful, current timezones. It doesn't +contain deprecated zones or historical zones. It is also a sequence of +strings. + +>>> from pytz import common_timezones +>>> len(common_timezones) < len(all_timezones) +True +>>> 'Etc/Greenwich' in common_timezones +False + +You can also retrieve lists of timezones used by particular countries +using the `country_timezones()` method. It requires an ISO-3166 two letter +country code. + +>>> from pytz import country_timezones +>>> country_timezones('ch') +['Europe/Zurich'] +>>> country_timezones('CH') +['Europe/Zurich'] + +License +~~~~~~~ + +MIT license. + +This code is also available as part of Zope 3 under the Zope Public +License, Version 2.1 (ZPL). + +I'm happy to relicense this code if necessary for inclusion in other +open source projects. + +Latest Versions +~~~~~~~~~~~~~~~ + +This package will be updated after releases of the Olson timezone database. +The latest version can be downloaded from the Python Cheeseshop_ or +Sourceforge_. The code that is used to generate this distribution is +available using the Bazaar_ revision control system using:: + + bzr branch http://bazaar.launchpad.net/~stub/pytz/devel + +.. _Cheeseshop: http://cheeseshop.python.org/pypi/pytz/ +.. _Sourceforge: http://sourceforge.net/projects/pytz/ +.. _Bazaar: http://bazaar-vcs.org/ + +Bugs, Feature Requests & Patches +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Bugs can be reported using Launchpad at +https://bugs.launchpad.net/products/pytz + +Issues & Limitations +~~~~~~~~~~~~~~~~~~~~ + +- Offsets from UTC are rounded to the nearest whole minute, so timezones + such as Europe/Amsterdam pre 1937 will be up to 30 seconds out. This is + a limitation of the Python datetime library. + +- If you think a timezone definition is incorrect, I probably can't fix + it. pytz is a direct translation of the Olson timezone database, and + changes to the timezone definitions need to be made to this source. + If you find errors they should be reported to the time zone mailing + list, linked from http://www.twinsun.com/tz/tz-link.htm + +Further Reading +~~~~~~~~~~~~~~~ + +More info than you want to know about timezones: +http://www.twinsun.com/tz/tz-link.htm + + +Contact +~~~~~~~ + +Stuart Bishop <st...@st...> + +.. [*] The missing few are for Riyadh Solar Time in 1987, 1988 and 1989. + As Saudi Arabia gave up trying to cope with their timezone + definition, I see no reason to complicate my code further + to cope with them. (I understand the intention was to set + sunset to 0:00 local time, the start of the Islamic day. + In the best case caused the DST offset to change daily and + worst case caused the DST offset to change each instant + depending on how you interpreted the ruling.) + + Property changes on: trunk/matplotlib/lib/pytz/README.txt ___________________________________________________________________ Name: svn:eol-style + CRLF Added: trunk/matplotlib/lib/pytz/__init__.py =================================================================== --- trunk/matplotlib/lib/pytz/__init__.py (rev 0) +++ trunk/matplotlib/lib/pytz/__init__.py 2007-11-09 01:40:54 UTC (rev 4173) @@ -0,0 +1,1396 @@ +''' +datetime.tzinfo timezone definitions generated from the +Olson timezone database: + + ftp://elsie.nci.nih.gov/pub/tz*.tar.gz + +See the datetime section of the Python Library Reference for information +on how to use these modules. +''' + +# The Olson database has historically been updated about 4 times a year +OLSON_VERSION = '2007g' +VERSION = OLSON_VERSION +#VERSION = OLSON_VERSION + '.2' +__version__ = OLSON_VERSION + +OLSEN_VERSION = OLSON_VERSION # Old releases had this misspelling + +__all__ = [ + 'timezone', 'utc', 'country_timezones', + 'AmbiguousTimeError', 'UnknownTimeZoneError', + 'all_timezones', 'all_timezones_set', + 'common_timezones', 'common_timezones_set', + ] + +import sys, datetime, os.path, gettext + +try: + from pkg_resources import resource_stream +except ImportError: + resource_stream = None + +from tzinfo import AmbiguousTimeError, unpickler +from tzfile import build_tzinfo + +# Use 2.3 sets module implementation if set builtin is not available +try: + set +except NameError: + from sets import Set as set + + +def open_resource(name): + """Open a resource from the zoneinfo subdir for reading. + + Uses the pkg_resources module if available. + """ + if resource_stream is not None: + return resource_stream(__name__, 'zoneinfo/' + name) + else: + name_parts = name.lstrip('/').split('/') + for part in name_parts: + if part == os.path.pardir or os.path.sep in part: + raise ValueError('Bad path segment: %r' % part) + filename = os.path.join(os.path.dirname(__file__), + 'zoneinfo', *name_parts) + return open(filename, 'rb') + + +# Enable this when we get some translations? +# We want an i18n API that is useful to programs using Python's gettext +# module, as well as the Zope3 i18n package. Perhaps we should just provide +# the POT file and translations, and leave it up to callers to make use +# of them. +# +# t = gettext.translation( +# 'pytz', os.path.join(os.path.dirname(__file__), 'locales'), +# fallback=True +# ) +# def _(timezone_name): +# """Translate a timezone name using the current locale, returning Unicode""" +# return t.ugettext(timezone_name) + + +class UnknownTimeZoneError(KeyError): + '''Exception raised when pytz is passed an unknown timezone. + + >>> isinstance(UnknownTimeZoneError(), LookupError) + True + + This class is actually a subclass of KeyError to provide backwards + compatibility with code relying on the undocumented behavior of earlier + pytz releases. + + >>> isinstance(UnknownTimeZoneError(), KeyError) + True + ''' + pass + + +_tzinfo_cache = {} + +def timezone(zone): + r''' Return a datetime.tzinfo implementation for the given timezone + + >>> from datetime import datetime, timedelta + >>> utc = timezone('UTC') + >>> eastern = timezone('US/Eastern') + >>> eastern.zone + 'US/Eastern' + >>> timezone(u'US/Eastern') is eastern + True + >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) + >>> loc_dt = utc_dt.astimezone(eastern) + >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' + >>> loc_dt.strftime(fmt) + '2002-10-27 01:00:00 EST (-0500)' + >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) + '2002-10-27 00:50:00 EST (-0500)' + >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) + '2002-10-27 01:50:00 EDT (-0400)' + >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) + '2002-10-27 01:10:00 EST (-0500)' + + Raises UnknownTimeZoneError if passed an unknown zone. + + >>> timezone('Asia/Shangri-La') + Traceback (most recent call last): + ... + UnknownTimeZoneError: 'Asia/Shangri-La' + + >>> timezone(u'\N{TRADE MARK SIGN}') + Traceback (most recent call last): + ... + UnknownTimeZoneError: u'\u2122' + ''' + if zone.upper() == 'UTC': + return utc + + try: + zone = zone.encode('US-ASCII') + except UnicodeEncodeError: + # All valid timezones are ASCII + raise UnknownTimeZoneError(zone) + + zone = _unmunge_zone(zone) + if zone not in _tzinfo_cache: + if zone in all_timezones_set: + _tzinfo_cache[zone] = build_tzinfo(zone, open_resource(zone)) + else: + raise UnknownTimeZoneError(zone) + + return _tzinfo_cache[zone] + + +def _unmunge_zone(zone): + """Undo the time zone name munging done by older versions of pytz.""" + return zone.replace('_plus_', '+').replace('_minus_', '-') + + +ZERO = datetime.timedelta(0) +HOUR = datetime.timedelta(hours=1) + + +class UTC(datetime.tzinfo): + """UTC + + Identical to the reference UTC implementation given in Python docs except + that it unpickles using the single module global instance defined beneath + this class declaration. + + Also contains extra attributes and methods to match other pytz tzinfo + instances. + """ + zone = "UTC" + + def utcoffset(self, dt): + return ZERO + + def tzname(self, dt): + return "UTC" + + def dst(self, dt): + return ZERO + + def __reduce__(self): + return _UTC, () + + def localize(self, dt, is_dst=False): + '''Convert naive time to local time''' + if dt.tzinfo is not None: + raise ValueError, 'Not naive datetime (tzinfo is already set)' + return dt.replace(tzinfo=self) + + def normalize(self, dt, is_dst=False): + '''Correct the timezone information on the given datetime''' + if dt.tzinfo is None: + raise ValueError, 'Naive time - no tzinfo set' + return dt.replace(tzinfo=self) + + def __repr__(self): + return "<UTC>" + + def __str__(self): + return "UTC" + + +UTC = utc = UTC() # UTC is a singleton + + +def _UTC(): + """Factory function for utc unpickling. + + Makes sure that unpickling a utc instance always returns the same + module global. + + These examples belong in the UTC class above, but it is obscured; or in + the README.txt, but we are not depending on Python 2.4 so integrating + the README.txt examples with the unit tests is not trivial. + + >>> import datetime, pickle + >>> dt = datetime.datetime(2005, 3, 1, 14, 13, 21, tzinfo=utc) + >>> naive = dt.replace(tzinfo=None) + >>> p = pickle.dumps(dt, 1) + >>> naive_p = pickle.dumps(naive, 1) + >>> len(p), len(naive_p), len(p) - len(naive_p) + (60, 43, 17) + >>> new = pickle.loads(p) + >>> new == dt + True + >>> new is dt + False + >>> new.tzinfo is dt.tzinfo + True + >>> utc is UTC is timezone('UTC') + True + >>> utc is timezone('GMT') + False + """ + return utc +_UTC.__safe_for_unpickling__ = True + + +def _p(*args): + """Factory function for unpickling pytz tzinfo instances. + + Just a wrapper around tzinfo.unpickler to save a few bytes in each pickle + by shortening the path. + """ + return unpickler(*args) +_p.__safe_for_unpickling__ = True + +_country_timezones_cache = {} + +def country_timezones(iso3166_code): + """Return a list of timezones used in a particular country. + + iso3166_code is the two letter code used to identify the country. + + >>> country_timezones('ch') + ['Europe/Zurich'] + >>> country_timezones('CH') + ['Europe/Zurich'] + >>> country_timezones(u'ch') + ['Europe/Zurich'] + >>> country_timezones('XXX') + Traceback (most recent call last): + ... + KeyError: 'XXX' + """ + iso3166_code = iso3166_code.upper() + if not _country_timezones_cache: + zone_tab = open_resource('zone.tab') + for line in zone_tab: + if line.startswith('#'): + continue + code, coordinates, zone = line.split(None, 4)[:3] + try: + _country_timezones_cache[code].append(zone) + except KeyError: + _country_timezones_cache[code] = [zone] + return _country_timezones_cache[iso3166_code] + + +# Time-zone info based solely on fixed offsets + +class _FixedOffset(datetime.tzinfo): + + zone = None # to match the standard pytz API + + def __init__(self, minutes): + if abs(minutes) >= 1440: + raise ValueError("absolute offset is too large", minutes) + self._minutes = minutes + self._offset = datetime.timedelta(minutes=minutes) + + def utcoffset(self, dt): + return self._offset + + def __reduce__(self): + return FixedOffset, (self._minutes, ) + + def dst(self, dt): + return None + + def tzname(self, dt): + return None + + def __repr__(self): + return 'pytz.FixedOffset(%d)' % self._minutes + + def localize(self, dt, is_dst=False): + '''Convert naive time to local time''' + if dt.tzinfo is not None: + raise ValueError, 'Not naive datetime (tzinfo is already set)' + return dt.replace(tzinfo=self) + + def normalize(self, dt, is_dst=False): + '''Correct the timezone information on the given datetime''' + if dt.tzinfo is None: + raise ValueError, 'Naive time - no tzinfo set' + return dt.replace(tzinfo=self) + + +def FixedOffset(offset, _tzinfos = {}): + """return a fixed-offset timezone based off a number of minutes. + + >>> one = FixedOffset(-330) + >>> one + pytz.FixedOffset(-330) + >>> one.utcoffset(datetime.datetime.now()) + datetime.timedelta(-1, 66600) + + >>> two = FixedOffset(1380) + >>> two + pytz.FixedOffset(1380) + >>> two.utcoffset(datetime.datetime.now()) + datetime.timedelta(0, 82800) + + The datetime.timedelta must be between the range of -1 and 1 day, + non-inclusive. + + >>> FixedOffset(1440) + Traceback (most recent call last): + ... + ValueError: ('absolute offset is too large', 1440) + + >>> FixedOffset(-1440) + Traceback (most recent call last): + ... + ValueError: ('absolute offset is too large', -1440) + + An offset of 0 is special-cased to return UTC. + + >>> FixedOffset(0) is UTC + True + + There should always be only one instance of a FixedOffset per timedelta. + This should be true for multiple creation calls. + + >>> FixedOffset(-330) is one + True + >>> FixedOffset(1380) is two + True + + It should also be true for pickling. + + >>> import pickle + >>> pickle.loads(pickle.dumps(one)) is one + True + >>> pickle.loads(pickle.dumps(two)) is two + True + """ + if offset == 0: + return UTC + + info = _tzinfos.get(offset) + if info is None: + # We haven't seen this one before. we need to save it. + + # Use setdefault to avoid a race condition and make sure we have + # only one + info = _tzinfos.setdefault(offset, _FixedOffset(offset)) + + return info + +FixedOffset.__safe_for_unpickling__ = True + + +def _test(): + import doctest, os, sys + sys.path.insert(0, os.pardir) + import pytz + return doctest.testmod(pytz) + +if __name__ == '__main__': + _test() + +common_timezones = \ +['Africa/Abidjan', + 'Africa/Accra', + 'Africa/Addis_Ababa', + 'Africa/Algiers', + 'Africa/Asmara', + 'Africa/Asmera', + 'Africa/Bamako', + 'Africa/Bangui', + 'Africa/Banjul', + 'Africa/Bissau', + 'Africa/Blantyre', + 'Africa/Brazzaville', + 'Africa/Bujumbura', + 'Africa/Cairo', + 'Africa/Casablanca', + 'Africa/Ceuta', + 'Africa/Conakry', + 'Africa/Dakar', + 'Africa/Dar_es_Salaam', + 'Africa/Djibouti', + 'Africa/Douala', + 'Africa/El_Aaiun', + 'Africa/Freetown', + 'Africa/Gaborone', + 'Africa/Harare', + 'Africa/Johannesburg', + 'Africa/Kampala', + 'Africa/Khartoum', + 'Africa/Kigali', + 'Africa/Kinshasa', + 'Africa/Lagos', + 'Africa/Libreville', + 'Africa/Lome', + 'Africa/Luanda', + 'Africa/Lubumbashi', + 'Africa/Lusaka', + 'Africa/Malabo', + 'Africa/Maputo', + 'Africa/Maseru', + 'Africa/Mbabane', + 'Africa/Mogadishu', + 'Africa/Monrovia', + 'Africa/Nairobi', + 'Africa/Ndjamena', + 'Africa/Niamey', + 'Africa/Nouakchott', + 'Africa/Ouagadougou', + 'Africa/Porto-Novo', + 'Africa/Sao_Tome', + 'Africa/Timbuktu', + 'Africa/Tripoli', + 'Africa/Tunis', + 'Africa/Windhoek', + 'America/Adak', + 'America/Anchorage', + 'America/Anguilla', + 'America/Antigua', + 'America/Araguaina', + 'America/Aruba', + 'America/Asuncion', + 'America/Atikokan', + 'America/Atka', + 'America/Bahia', + 'America/Barbados', + 'America/Belem', + 'America/B... [truncated message content] |
From: <jd...@us...> - 2007-11-09 02:56:05
|
Revision: 4175 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4175&view=rev Author: jdh2358 Date: 2007-11-08 18:55:47 -0800 (Thu, 08 Nov 2007) Log Message: ----------- reverted rec utils to mlab Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/examples/date_index_formatter.py trunk/matplotlib/examples/loadrec.py trunk/matplotlib/lib/matplotlib/mlab.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-09 01:43:18 UTC (rev 4174) +++ trunk/matplotlib/CHANGELOG 2007-11-09 02:55:47 UTC (rev 4175) @@ -1,3 +1,6 @@ +2007-11-08 Added additional record array utilites to mlab (rec2excel, + rec2gtk, rec_join, rec_append_field, rec_drop_field) - JDH + 2007-11-08 Updated pytz to version 2007g - DSD 2007-11-08 Updated pyparsing to version 1.4.8 - DSD Modified: trunk/matplotlib/examples/date_index_formatter.py =================================================================== --- trunk/matplotlib/examples/date_index_formatter.py 2007-11-09 01:43:18 UTC (rev 4174) +++ trunk/matplotlib/examples/date_index_formatter.py 2007-11-09 02:55:47 UTC (rev 4175) @@ -9,7 +9,7 @@ """ import numpy -from matplotlib.recutils import csv2rec +from matplotlib.mlab import csv2rec from pylab import figure, show from matplotlib.ticker import Formatter Modified: trunk/matplotlib/examples/loadrec.py =================================================================== --- trunk/matplotlib/examples/loadrec.py 2007-11-09 01:43:18 UTC (rev 4174) +++ trunk/matplotlib/examples/loadrec.py 2007-11-09 02:55:47 UTC (rev 4175) @@ -1,7 +1,7 @@ -from matplotlib.recutils import csv2rec +from matplotlib import mlab from pylab import figure, show -a = csv2rec('data/msft.csv') +a = mlab.csv2rec('data/msft.csv') print a.dtype fig = figure() Modified: trunk/matplotlib/lib/matplotlib/mlab.py =================================================================== --- trunk/matplotlib/lib/matplotlib/mlab.py 2007-11-09 01:43:18 UTC (rev 4174) +++ trunk/matplotlib/lib/matplotlib/mlab.py 2007-11-09 02:55:47 UTC (rev 4175) @@ -44,18 +44,47 @@ compute it for a lot of pairs. This function is optimized to do this efficiently by caching the direct FFTs. -Credits: += record array helper functions = - Unless otherwise noted, these functions were written by - Author: John D. Hunter <jdh...@ac...> + rec2csv : store record array in CSV file + rec2excel : store record array in excel worksheet - required pyExcelerator + rec2gtk : put record array in GTK treeview - requires gtk + csv2rec : import record array from CSV file with type inspection + rec_append_field : add a field/array to record array + rec_drop_fields : drop fields from record array + rec_join : join two record arrays on sequence of fields - Some others are from the Numeric documentation, or imported from - MLab or other Numeric packages +For the rec viewer clases (rec2csv, rec2excel and rec2gtk), there are +a bunch of Format objects you can pass into the functions that will do +things like color negative values red, set percent formatting and +scaling, etc. + +Example usage: + + r = csv2rec('somefile.csv', checkrows=0) + + formatd = dict( + weight = FormatFloat(2), + change = FormatPercent(2), + cost = FormatThousands(2), + ) + + + rec2excel(r, 'test.xls', formatd=formatd) + rec2csv(r, 'test.csv', formatd=formatd) + scroll = rec2gtk(r, formatd=formatd) + + win = gtk.Window() + win.set_size_request(600,800) + win.add(scroll) + win.show_all() + gtk.main() + """ from __future__ import division -import sys, datetime, csv, warnings +import sys, datetime, csv, warnings, copy import numpy as npy @@ -1907,3 +1936,762 @@ return x ### end mlab2 functions + +#Classes for manipulating and viewing numpy record arrays + + + + + +def safe_isnan(x): + 'isnan for arbitrary types' + try: b = npy.isnan(x) + except NotImplementedError: return False + else: return b + + +def rec_append_field(rec, name, arr, dtype=None): + 'return a new record array with field name populated with data from array arr' + arr = npy.asarray(arr) + if dtype is None: + dtype = arr.dtype + newdtype = npy.dtype(rec.dtype.descr + [(name, dtype)]) + newrec = npy.empty(rec.shape, dtype=newdtype) + for field in rec.dtype.fields: + newrec[field] = rec[field] + newrec[name] = arr + return newrec.view(npy.recarray) + + +def rec_drop_fields(rec, names): + 'return a new numpy record array with fields in names dropped' + + names = set(names) + Nr = len(rec) + + newdtype = npy.dtype([(name, rec.dtype[name]) for name in rec.dtype.names + if name not in names]) + + newrec = npy.empty(Nr, dtype=newdtype) + for field in newdtype.names: + newrec[field] = rec[field] + + return newrec.view(npy.recarray) + + +def rec_join(key, r1, r2): + """ + join record arrays r1 and r2 on key; key is a tuple of field + names. if r1 and r2 have equal values on all the keys in the key + tuple, then their fields will be merged into a new record array + containing the union of the fields of r1 and r2 + """ + + for name in key: + if name not in r1.dtype.names: + raise ValueError('r1 does not have key field %s'%name) + if name not in r2.dtype.names: + raise ValueError('r2 does not have key field %s'%name) + + def makekey(row): + return tuple([row[name] for name in key]) + + + names = list(r1.dtype.names) + [name for name in r2.dtype.names if name not in set(r1.dtype.names)] + + + + r1d = dict([(makekey(row),i) for i,row in enumerate(r1)]) + r2d = dict([(makekey(row),i) for i,row in enumerate(r2)]) + + r1keys = set(r1d.keys()) + r2keys = set(r2d.keys()) + + keys = r1keys & r2keys + + r1ind = [r1d[k] for k in keys] + r2ind = [r2d[k] for k in keys] + + + r1 = r1[r1ind] + r2 = r2[r2ind] + + r2 = rec_drop_fields(r2, r1.dtype.names) + + + def key_desc(name): + 'if name is a string key, use the larger size of r1 or r2 before merging' + dt1 = r1.dtype[name] + if dt1.type != npy.string_: + return (name, dt1.descr[0][1]) + + dt2 = r1.dtype[name] + assert dt2==dt1 + if dt1.num>dt2.num: + return (name, dt1.descr[0][1]) + else: + return (name, dt2.descr[0][1]) + + + + keydesc = [key_desc(name) for name in key] + + newdtype = npy.dtype(keydesc + + [desc for desc in r1.dtype.descr if desc[0] not in key ] + + [desc for desc in r2.dtype.descr if desc[0] not in key ] ) + + + newrec = npy.empty(len(r1), dtype=newdtype) + for field in r1.dtype.names: + newrec[field] = r1[field] + + for field in r2.dtype.names: + newrec[field] = r2[field] + + return newrec.view(npy.recarray) + + +def csv2rec(fname, comments='#', skiprows=0, checkrows=5, delimiter=',', + converterd=None, names=None, missing=None): + """ + Load data from comma/space/tab delimited file in fname into a + numpy record array and return the record array. + + If names is None, a header row is required to automatically assign + the recarray names. The headers will be lower cased, spaces will + be converted to underscores, and illegal attribute name characters + removed. If names is not None, it is a sequence of names to use + for the column names. In this case, it is assumed there is no header row. + + + fname - can be a filename or a file handle. Support for gzipped + files is automatic, if the filename ends in .gz + + comments - the character used to indicate the start of a comment + in the file + + skiprows - is the number of rows from the top to skip + + checkrows - is the number of rows to check to validate the column + data type. When set to zero all rows are validated. + + converterd, if not None, is a dictionary mapping column number or + munged column name to a converter function + + names, if not None, is a list of header names. In this case, no + header will be read from the file + + if no rows are found, None is returned See examples/loadrec.py + """ + + if converterd is None: + converterd = dict() + + import dateutil.parser + parsedate = dateutil.parser.parse + + + fh = cbook.to_filehandle(fname) + + + class FH: + """ + for space delimited files, we want different behavior than + comma or tab. Generally, we want multiple spaces to be + treated as a single separator, whereas with comma and tab we + want multiple commas to return multiple (empty) fields. The + join/strip trick below effects this + """ + def __init__(self, fh): + self.fh = fh + + def close(self): + self.fh.close() + + def seek(self, arg): + self.fh.seek(arg) + + def fix(self, s): + return ' '.join(s.split()) + + + def next(self): + return self.fix(self.fh.next()) + + def __iter__(self): + for line in self.fh: + yield self.fix(line) + + if delimiter==' ': + fh = FH(fh) + + reader = csv.reader(fh, delimiter=delimiter) + def process_skiprows(reader): + if skiprows: + for i, row in enumerate(reader): + if i>=(skiprows-1): break + + return fh, reader + + process_skiprows(reader) + + + def myfloat(x): + if x==missing: + return npy.nan + else: + return float(x) + + def get_func(item, func): + # promote functions in this order + funcmap = {int:myfloat, myfloat:dateutil.parser.parse, dateutil.parser.parse:str} + try: func(item) + except: + if func==str: + raise ValueError('Could not find a working conversion function') + else: return get_func(item, funcmap[func]) # recurse + else: return func + + + # map column names that clash with builtins -- TODO - extend this list + itemd = { + 'return' : 'return_', + 'file' : 'file_', + 'print' : 'print_', + } + + def get_converters(reader): + + converters = None + for i, row in enumerate(reader): + if i==0: + converters = [int]*len(row) + if checkrows and i>checkrows: + break + #print i, len(names), len(row) + #print 'converters', zip(converters, row) + for j, (name, item) in enumerate(zip(names, row)): + func = converterd.get(j) + if func is None: + func = converterd.get(name) + if func is None: + if not item.strip(): continue + func = converters[j] + if len(item.strip()): + func = get_func(item, func) + converters[j] = func + return converters + + # Get header and remove invalid characters + needheader = names is None + if needheader: + headers = reader.next() + # remove these chars + delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") + delete.add('"') + + names = [] + seen = dict() + for i, item in enumerate(headers): + item = item.strip().lower().replace(' ', '_') + item = ''.join([c for c in item if c not in delete]) + if not len(item): + item = 'column%d'%i + + item = itemd.get(item, item) + cnt = seen.get(item, 0) + if cnt>0: + names.append(item + '%d'%cnt) + else: + names.append(item) + seen[item] = cnt+1 + + # get the converter functions by inspecting checkrows + converters = get_converters(reader) + if converters is None: + raise ValueError('Could not find any valid data in CSV file') + + # reset the reader and start over + fh.seek(0) + process_skiprows(reader) + if needheader: + skipheader = reader.next() + + # iterate over the remaining rows and convert the data to date + # objects, ints, or floats as approriate + rows = [] + for i, row in enumerate(reader): + if not len(row): continue + if row[0].startswith(comments): continue + rows.append([func(val) for func, val in zip(converters, row)]) + fh.close() + + if not len(rows): + return None + r = npy.rec.fromrecords(rows, names=names) + return r + + +# a series of classes for describing the format intentions of various rec views +class FormatObj: + def tostr(self, x): + return str(self.toval(x)) + + def toval(self, x): + return x + + +class FormatString(FormatObj): + def tostr(self, x): + return '"%s"'%self.toval(x) + + +class FormatFormatStr(FormatObj): + def __init__(self, fmt): + self.fmt = fmt + + def tostr(self, x): + if x is None: return 'None' + return self.fmt%self.toval(x) + +class FormatFloat(FormatFormatStr): + def __init__(self, precision=4, scale=1.): + FormatFormatStr.__init__(self, '%%1.%df'%precision) + self.precision = precision + self.scale = scale + + def toval(self, x): + if x is not None: + x = x * self.scale + return x + +class FormatInt(FormatObj): + pass + +class FormatPercent(FormatFloat): + def __init__(self, precision=4): + FormatFloat.__init__(self, precision, scale=100.) + +class FormatThousands(FormatFloat): + def __init__(self, precision=4): + FormatFloat.__init__(self, precision, scale=1e-3) + +class FormatMillions(FormatFloat): + def __init__(self, precision=4): + FormatFloat.__init__(self, precision, scale=1e-6) + + +class FormatDate(FormatString): + def __init__(self, fmt): + self.fmt = fmt + + def toval(self, x): + if x is None: return 'None' + return x.strftime(self.fmt) + +class FormatDatetime(FormatDate): + def __init__(self, fmt='%Y-%m-%d %H:%M:%S'): + FormatDate.__init__(self, fmt) + + +defaultformatd = { + npy.int16 : FormatInt(), + npy.int32 : FormatInt(), + npy.int64 : FormatInt(), + npy.float32 : FormatFloat(), + npy.float64 : FormatFloat(), + npy.object_ : FormatObj(), + npy.string_ : FormatString(), + } + +def get_formatd(r, formatd=None): + 'build a formatd guaranteed to have a key for every dtype name' + if formatd is None: + formatd = dict() + + for i, name in enumerate(r.dtype.names): + dt = r.dtype[name] + format = formatd.get(name) + if format is None: + format = defaultformatd.get(dt.type, FormatObj()) + formatd[name] = format + return formatd + +def csvformat_factory(format): + format = copy.deepcopy(format) + if isinstance(format, FormatFloat): + format.scale = 1. # override scaling for storage + format.fmt = '%g' # maximal precision + return format + +def rec2csv(r, fname, delimiter=',', formatd=None): + """ + Save the data from numpy record array r into a comma/space/tab + delimited file. The record array dtype names will be used for + column headers. + + + fname - can be a filename or a file handle. Support for gzipped + files is automatic, if the filename ends in .gz + """ + formatd = get_formatd(r, formatd) + funcs = [] + for i, name in enumerate(r.dtype.names): + funcs.append(csvformat_factory(formatd[name]).tostr) + + fh = cbook.to_filehandle(fname, 'w') + writer = csv.writer(fh, delimiter=delimiter) + header = r.dtype.names + writer.writerow(header) + for row in r: + writer.writerow([func(val) for func, val in zip(funcs, row)]) + fh.close() + +# if pyExcelerator is installed, provide an excel view +try: + import pyExcelerator as excel +except ImportError: + pass +else: + + def xlformat_factory(format): + """ + copy the format, perform any overrides, and attach an xlstyle instance + copied format is returned + """ + format = copy.deepcopy(format) + + + + xlstyle = excel.XFStyle() + if isinstance(format, FormatFloat): + zeros = ''.join(['0']*format.precision) + xlstyle.num_format_str = '#,##0.%s;[RED]-#,##0.%s'%(zeros, zeros) + elif isinstance(format, FormatInt): + xlstyle.num_format_str = '#,##;[RED]-#,##' + elif isinstance(format, FormatPercent): + zeros = ''.join(['0']*format.precision) + xlstyle.num_format_str = '0.%s%;[RED]-0.%s%'%(zeros, zeros) + format.scale = 1. + else: + xlstyle = None + + format.xlstyle = xlstyle + + return format + + def rec2excel(r, ws, formatd=None, rownum=0): + """ + save record array r to excel pyExcelerator worksheet ws + starting at rownum. if ws is string like, assume it is a + filename and save to it + + formatd is a dictionary mapping dtype name -> FormatXL instances + + The next rownum after writing is returned + """ + + autosave = False + if cbook.is_string_like(ws): + filename = ws + wb = excel.Workbook() + ws = wb.add_sheet('worksheet') + autosave = True + + + if formatd is None: + formatd = dict() + + formats = [] + for i, name in enumerate(r.dtype.names): + dt = r.dtype[name] + format = formatd.get(name) + if format is None: + format = defaultformatd.get(dt.type, FormatObj()) + + format = xlformat_factory(format) + ws.write(rownum, i, name) + formats.append(format) + + rownum+=1 + + + ind = npy.arange(len(r.dtype.names)) + for row in r: + for i in ind: + val = row[i] + format = formats[i] + val = format.toval(val) + if format.xlstyle is None: + ws.write(rownum, i, val) + else: + if safe_isnan(val): + ws.write(rownum, i, 'NaN') + else: + ws.write(rownum, i, val, format.xlstyle) + rownum += 1 + + if autosave: + wb.save(filename) + return rownum + + + + +# if gtk is installed, provide a gtk view +try: + import gtk, gobject +except ImportError: + pass +except RuntimeError: + pass +else: + + + def gtkformat_factory(format, colnum): + """ + copy the format, perform any overrides, and attach an gtk style attrs + + + xalign = 0. + cell = None + + """ + + format = copy.copy(format) + format.xalign = 0. + format.cell = None + + def negative_red_cell(column, cell, model, thisiter): + val = model.get_value(thisiter, colnum) + try: val = float(val) + except: cell.set_property('foreground', 'black') + else: + if val<0: + cell.set_property('foreground', 'red') + else: + cell.set_property('foreground', 'black') + + + if isinstance(format, FormatFloat) or isinstance(format, FormatInt): + format.cell = negative_red_cell + format.xalign = 1. + elif isinstance(format, FormatDate): + format.xalign = 1. + return format + + + + class SortedStringsScrolledWindow(gtk.ScrolledWindow): + """ + A simple treeview/liststore assuming all columns are strings. + Supports ascending/descending sort by clicking on column header + """ + + def __init__(self, colheaders, formatterd=None): + """ + xalignd if not None, is a dict mapping col header to xalignent (default 1) + + formatterd if not None, is a dict mapping col header to a ColumnFormatter + """ + + + gtk.ScrolledWindow.__init__(self) + self.colheaders = colheaders + self.seq = None # not initialized with accts + self.set_shadow_type(gtk.SHADOW_ETCHED_IN) + self.set_policy(gtk.POLICY_AUTOMATIC, + gtk.POLICY_AUTOMATIC) + + types = [gobject.TYPE_STRING] * len(colheaders) + model = self.model = gtk.ListStore(*types) + + + treeview = gtk.TreeView(self.model) + treeview.show() + treeview.get_selection().set_mode(gtk.SELECTION_MULTIPLE) + treeview.set_rules_hint(True) + + + class Clicked: + def __init__(self, parent, i): + self.parent = parent + self.i = i + self.num = 0 + + def __call__(self, column): + ind = [] + dsu = [] + for rownum, thisiter in enumerate(self.parent.iters): + val = model.get_value(thisiter, self.i) + try: val = float(val.strip().rstrip('%')) + except ValueError: pass + if npy.isnan(val): val = npy.inf # force nan to sort uniquely + dsu.append((val, rownum)) + dsu.sort() + if not self.num%2: dsu.reverse() + + vals, otherind = zip(*dsu) + ind.extend(otherind) + + self.parent.model.reorder(ind) + newiters = [] + for i in ind: + newiters.append(self.parent.iters[i]) + self.parent.iters = newiters[:] + for i, thisiter in enumerate(self.parent.iters): + key = tuple([self.parent.model.get_value(thisiter, j) for j in range(len(colheaders))]) + self.parent.rownumd[i] = key + + self.num+=1 + + + if formatterd is None: + formatterd = dict() + + formatterd = formatterd.copy() + + for i, header in enumerate(colheaders): + renderer = gtk.CellRendererText() + if header not in formatterd: + formatterd[header] = ColumnFormatter() + formatter = formatterd[header] + + column = gtk.TreeViewColumn(header, renderer, text=i) + renderer.set_property('xalign', formatter.xalign) + column.connect('clicked', Clicked(self, i)) + column.set_property('clickable', True) + + if formatter.cell is not None: + column.set_cell_data_func(renderer, formatter.cell) + + treeview.append_column(column) + + + + self.formatterd = formatterd + self.lastcol = column + self.add(treeview) + self.treeview = treeview + self.clear() + + def clear(self): + self.iterd = dict() + self.iters = [] # an ordered list of iters + self.rownumd = dict() # a map from rownum -> symbol + self.model.clear() + self.datad = dict() + + + def flat(self, row): + seq = [] + for i,val in enumerate(row): + formatter = self.formatterd.get(self.colheaders[i]) + seq.extend([i,formatter.tostr(val)]) + return seq + + def __delete_selected(self, *unused): # untested + + + keyd = dict([(thisiter, key) for key, thisiter in self.iterd.values()]) + for row in self.get_selected(): + key = tuple(row) + thisiter = self.iterd[key] + self.model.remove(thisiter) + del self.datad[key] + del self.iterd[key] + self.iters.remove(thisiter) + + for i, thisiter in enumerate(self.iters): + self.rownumd[i] = keyd[thisiter] + + + + def delete_row(self, row): + key = tuple(row) + thisiter = self.iterd[key] + self.model.remove(thisiter) + + + del self.datad[key] + del self.iterd[key] + self.rownumd[len(self.iters)] = key + self.iters.remove(thisiter) + + for rownum, thiskey in self.rownumd.items(): + if thiskey==key: del self.rownumd[rownum] + + def add_row(self, row): + thisiter = self.model.append() + self.model.set(thisiter, *self.flat(row)) + key = tuple(row) + self.datad[key] = row + self.iterd[key] = thisiter + self.rownumd[len(self.iters)] = key + self.iters.append(thisiter) + + def update_row(self, rownum, newrow): + key = self.rownumd[rownum] + thisiter = self.iterd[key] + newkey = tuple(newrow) + + self.rownumd[rownum] = newkey + del self.datad[key] + del self.iterd[key] + self.datad[newkey] = newrow + self.iterd[newkey] = thisiter + + + self.model.set(thisiter, *self.flat(newrow)) + + def get_row(self, rownum): + key = self.rownumd[rownum] + return self.datad[key] + + def get_selected(self): + selected = [] + def foreach(model, path, iter, selected): + selected.append(model.get_value(iter, 0)) + + self.treeview.get_selection().selected_foreach(foreach, selected) + return selected + + + + def rec2gtk(r, formatd=None, rownum=0): + """ + save record array r to excel pyExcelerator worksheet ws + starting at rownum. if ws is string like, assume it is a + filename and save to it + + formatd is a dictionary mapping dtype name -> FormatXL instances + + The next rownum after writing is returned + """ + + + + if formatd is None: + formatd = dict() + + formats = [] + for i, name in enumerate(r.dtype.names): + dt = r.dtype[name] + format = formatd.get(name) + if format is None: + format = defaultformatd.get(dt.type, FormatObj()) + #print 'gtk fmt factory', i, name, format, type(format) + format = gtkformat_factory(format, i) + formatd[name] = format + + + colheaders = r.dtype.names + scroll = SortedStringsScrolledWindow(colheaders, formatd) + + ind = npy.arange(len(r.dtype.names)) + for row in r: + scroll.add_row(row) + + return scroll + + This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ef...@us...> - 2007-11-09 07:25:10
|
Revision: 4176 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4176&view=rev Author: efiring Date: 2007-11-08 23:25:08 -0800 (Thu, 08 Nov 2007) Log Message: ----------- Pylab uses numpy instead of oldnumeric Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/examples/image_demo2.py trunk/matplotlib/examples/mathtext_demo.py trunk/matplotlib/examples/mri_with_eeg.py trunk/matplotlib/lib/matplotlib/pylab.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-09 02:55:47 UTC (rev 4175) +++ trunk/matplotlib/CHANGELOG 2007-11-09 07:25:08 UTC (rev 4176) @@ -1,3 +1,6 @@ +2007-11-08 Made pylab use straight numpy instead of oldnumeric + by default - EF + 2007-11-08 Added additional record array utilites to mlab (rec2excel, rec2gtk, rec_join, rec_append_field, rec_drop_field) - JDH @@ -8,16 +11,22 @@ 2007-11-08 Moved csv2rec to recutils and added other record array utilities - JDH -2007-11-08 If available, use existing pyparsing installation - DSD +2007-11-08 If available, use existing pyparsing installation - DSD -2007-11-07 Removed old enthought.traits from lib/matplotlib, added - Gael Varoquaux's enthought.traits-2.6b1, which is stripped - of setuptools. The package is installed to site-packages +2007-11-07 Removed old enthought.traits from lib/matplotlib, added + Gael Varoquaux's enthought.traits-2.6b1, which is stripped + of setuptools. The package is installed to site-packages if not already available - DSD -2007-11-02 Commited Phil Thompson's patch 1599876, fixes to Qt4Agg - backend and qt4 blitting demo - DSD +2007-11-05 Added easy access to minor tick properties; slight mod + of patch by Pierre G-M - EF +2007-11-02 Commited Phil Thompson's patch 1599876, fixes to Qt4Agg + backend and qt4 blitting demo - DSD + +2007-11-02 Commited Phil Thompson's patch 1599876, fixes to Qt4Agg + backend and qt4 blitting demo - DSD + 2007-10-31 Made log color scale easier to use with contourf; automatic level generation now works. - EF @@ -41,7 +50,7 @@ generator expressions are not supported by python-2.3 - DSD 2007-10-01 Made matplotlib.use() raise an exception if called after - backends has been imported. + backends has been imported. - EF 2007-09-30 Modified update* methods of Bbox and Interval so they work with reversed axes. Prior to this, trying to @@ -201,7 +210,7 @@ 2007-07-19 completed numpification of most trivial cases - NN -2007-07-19 converted non-numpy relicts troughout the code - NN +2007-07-19 converted non-numpy relicts throughout the code - NN 2007-07-19 replaced the Python code in numerix/ by a minimal wrapper around numpy that explicitly mentions all symbols that need to be Modified: trunk/matplotlib/examples/image_demo2.py =================================================================== --- trunk/matplotlib/examples/image_demo2.py 2007-11-09 02:55:47 UTC (rev 4175) +++ trunk/matplotlib/examples/image_demo2.py 2007-11-09 07:25:08 UTC (rev 4176) @@ -3,7 +3,7 @@ w, h = 512, 512 s = file('data/ct.raw', 'rb').read() -A = fromstring(s, UInt16).astype(Float) +A = fromstring(s, uint16).astype(float) A *= 1.0/max(A) A.shape = w, h Modified: trunk/matplotlib/examples/mathtext_demo.py =================================================================== --- trunk/matplotlib/examples/mathtext_demo.py 2007-11-09 02:55:47 UTC (rev 4175) +++ trunk/matplotlib/examples/mathtext_demo.py 2007-11-09 07:25:08 UTC (rev 4176) @@ -4,7 +4,8 @@ latex rendering, see the text.usetex option """ import numpy as npy -from pylab import figure, show +from matplotlib.pyplot import figure, show + fig = figure() fig.subplots_adjust(bottom=0.2) @@ -21,7 +22,8 @@ ax.legend(("Foo", "Testing $x^2$")) -#title(r'$\Delta_i^j \hspace{0.4} \rm{versus} \hspace{0.4} \Delta_{i+1}^j$', fontsize=20) -fig.savefig('mathtext_demo') +ax.set_title(r'$\Delta_i^j \hspace{0.4} \rm{versus} \hspace{0.4} \Delta_{i+1}^j$', fontsize=20) +#fig.savefig('mathtext_demo') show() + Modified: trunk/matplotlib/examples/mri_with_eeg.py =================================================================== --- trunk/matplotlib/examples/mri_with_eeg.py 2007-11-09 02:55:47 UTC (rev 4175) +++ trunk/matplotlib/examples/mri_with_eeg.py 2007-11-09 07:25:08 UTC (rev 4176) @@ -15,7 +15,7 @@ if 1: # load the data # data are 256x256 16 bit integers dfile = 'data/s1045.ima' - im = fromstring(file(dfile, 'rb').read(), UInt16).astype(Float) + im = fromstring(file(dfile, 'rb').read(), uint16).astype(float) im.shape = 256, 256 if 1: # plot the MRI in pcolor @@ -37,7 +37,7 @@ if 1: # plot the EEG # load the data numSamples, numRows = 800,4 - data = fromstring(file('data/eeg.dat', 'rb').read(), Float) + data = fromstring(file('data/eeg.dat', 'rb').read(), float) data.shape = numSamples, numRows t = arange(numSamples)/float(numSamples)*10.0 ticklocs = [] Modified: trunk/matplotlib/lib/matplotlib/pylab.py =================================================================== --- trunk/matplotlib/lib/matplotlib/pylab.py 2007-11-09 02:55:47 UTC (rev 4175) +++ trunk/matplotlib/lib/matplotlib/pylab.py 2007-11-09 07:25:08 UTC (rev 4176) @@ -230,38 +230,6 @@ # bring all the symbols in so folks can import them from # pylab in one fell swoop -from numpy.oldnumeric import array, zeros, shape, rank, size, fromstring,\ - take, put, putmask, reshape, repeat, choose, searchsorted,\ - cumsum, product, cumproduct, alltrue, sometrue, allclose,\ - arrayrange, arange, asarray, convolve, swapaxes, concatenate,\ - transpose, sort, argsort, argmax, argmin, innerproduct, dot,\ - outerproduct, resize, indices, fromfunction, diagonal, trace,\ - ravel, nonzero, shape, where, compress, clip, zeros, ones,\ - identity, add, logical_or, exp, subtract, logical_xor,\ - log, multiply, logical_not, log10, divide, maximum, sin,\ - minimum, sinh, conjugate, bitwise_and, sqrt, power, bitwise_or,\ - tan, absolute, bitwise_xor, tanh, negative, ceil, greater, fabs,\ - greater_equal, floor, less, arccos, arctan2, less_equal, arcsin,\ - fmod, equal, arctan, hypot, not_equal, cos, around, logical_and,\ - cosh, arccosh, arcsinh, arctanh, cross_correlate,\ - pi, ArrayType, matrixmultiply - -from numpy.oldnumeric import sum as asum - -from numpy.oldnumeric import Int8, UInt8, Int16, UInt16, Int32, UInt32, Float32,\ - Float64, Complex32, Complex64, Float, Int, Complex - -from numpy.fft import fft # Why just fft? -from numpy.linalg import inv as inverse -from numpy.oldnumeric.linear_algebra import eigenvectors - # not quite the same as linalg.eig - - -pymin, pymax = min, max -from numpy.oldnumeric.mlab import * -min, max = pymin, pymax -from numpy import amin, amax - from matplotlib.mlab import window_hanning, window_none,\ conv, detrend, detrend_mean, detrend_none, detrend_linear,\ polyfit, polyval, entropy, normpdf,\ @@ -271,24 +239,43 @@ diagonal_matrix, base_repr, binary_repr, log2, ispower2,\ bivariate_normal, load, save, stineman_interp -from numpy import meshgrid, linspace, logspace, corrcoef, vander +from numpy import * +from numpy.fft import * +from numpy.random import * +from numpy.linalg import * -""" -problem syms - - cross_correlate - getting from convolve -average -sarray -dump -dumps -load -loads -divide_safe -invert -left_shift -right_shift -sign -""" +# old style--if True, override standard numpy with oldnumeric +if False: + from numpy.oldnumeric import array, zeros, shape, rank, size, fromstring,\ + take, put, putmask, reshape, repeat, choose, searchsorted,\ + cumsum, product, cumproduct, alltrue, sometrue, allclose,\ + arrayrange, arange, asarray, convolve, swapaxes, concatenate,\ + transpose, sort, argsort, argmax, argmin, innerproduct, dot,\ + outerproduct, resize, indices, fromfunction, diagonal, trace,\ + ravel, nonzero, shape, where, compress, clip, zeros, ones,\ + identity, add, logical_or, exp, subtract, logical_xor,\ + log, multiply, logical_not, log10, divide, maximum, sin,\ + minimum, sinh, conjugate, bitwise_and, sqrt, power, bitwise_or,\ + tan, absolute, bitwise_xor, tanh, negative, ceil, greater, fabs,\ + greater_equal, floor, less, arccos, arctan2, less_equal, arcsin,\ + fmod, equal, arctan, hypot, not_equal, cos, around, logical_and,\ + cosh, arccosh, arcsinh, arctanh, cross_correlate,\ + pi, ArrayType, matrixmultiply + from numpy.oldnumeric import sum as asum + + from numpy.oldnumeric import Int8, UInt8, Int16, UInt16, Int32, UInt32, Float32,\ + Float64, Complex32, Complex64, Float, Int, Complex + + pymin, pymax = min, max + from numpy.oldnumeric.mlab import * + min, max = pymin, pymax + from numpy import amin, amax + from numpy.oldnumeric.linear_algebra import eigenvectors + # not quite the same as linalg.eig + from numpy.linalg import inv as inverse + + from matplotlib.pyplot import * This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-09 13:08:49
|
Revision: 4177 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4177&view=rev Author: dsdale Date: 2007-11-09 05:08:48 -0800 (Fri, 09 Nov 2007) Log Message: ----------- move pyparsing back into mpl namespace Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py trunk/matplotlib/lib/matplotlib/mathtext.py trunk/matplotlib/setup.py trunk/matplotlib/setupext.py Added Paths: ----------- trunk/matplotlib/lib/matplotlib/pyparsing.py Removed Paths: ------------- trunk/matplotlib/lib/pyparsing.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-09 07:25:08 UTC (rev 4176) +++ trunk/matplotlib/CHANGELOG 2007-11-09 13:08:48 UTC (rev 4177) @@ -1,3 +1,7 @@ +2007-11-09 Moved pyparsing back into matplotlib namespace. Don't use + system pyparsing, API is too variable from one release + to the next - DSD + 2007-11-08 Made pylab use straight numpy instead of oldnumeric by default - EF Modified: trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py =================================================================== --- trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py 2007-11-09 07:25:08 UTC (rev 4176) +++ trunk/matplotlib/lib/matplotlib/fontconfig_pattern.py 2007-11-09 13:08:48 UTC (rev 4177) @@ -18,7 +18,7 @@ License : matplotlib license (PSF compatible) """ import re -from pyparsing import Literal, OneOrMore, ZeroOrMore, Optional, Regex, \ +from matplotlib.pyparsing import Literal, OneOrMore, ZeroOrMore, Optional, Regex, \ StringEnd, ParseException, Suppress family_punc = r'\\\-:,' Modified: trunk/matplotlib/lib/matplotlib/mathtext.py =================================================================== --- trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-09 07:25:08 UTC (rev 4176) +++ trunk/matplotlib/lib/matplotlib/mathtext.py 2007-11-09 13:08:48 UTC (rev 4177) @@ -134,7 +134,7 @@ from numpy import inf, isinf from matplotlib import verbose -from pyparsing import Literal, Word, OneOrMore, ZeroOrMore, Combine, Group, \ +from matplotlib.pyparsing import Literal, Word, OneOrMore, ZeroOrMore, Combine, Group, \ Optional, Forward, NotAny, alphas, nums, alphanums, StringStart, \ StringEnd, ParseFatalException, FollowedBy, Regex, operatorPrecedence, \ opAssoc, ParseResults, Or, Suppress, oneOf, ParseException, MatchFirst, \ Copied: trunk/matplotlib/lib/matplotlib/pyparsing.py (from rev 4176, trunk/matplotlib/lib/pyparsing.py) =================================================================== --- trunk/matplotlib/lib/matplotlib/pyparsing.py (rev 0) +++ trunk/matplotlib/lib/matplotlib/pyparsing.py 2007-11-09 13:08:48 UTC (rev 4177) @@ -0,0 +1,3217 @@ +# module pyparsing.py +# +# Copyright (c) 2003-2007 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +#from __future__ import generators + +__doc__ = \ +""" +pyparsing module - Classes and methods to define and execute parsing grammars + +The pyparsing module is an alternative approach to creating and executing simple grammars, +vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you +don't need to learn a new syntax for defining grammars or matching expressions - the parsing module +provides a library of classes that you use to construct the grammar directly in Python. + +Here is a program to parse "Hello, World!" (or any greeting of the form "<salutation>, <addressee>!"):: + + from pyparsing import Word, alphas + + # define grammar of a greeting + greet = Word( alphas ) + "," + Word( alphas ) + "!" + + hello = "Hello, World!" + print hello, "->", greet.parseString( hello ) + +The program outputs the following:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + +The Python representation of the grammar is quite readable, owing to the self-explanatory +class names, and the use of '+', '|' and '^' operators. + +The parsed results returned from parseString() can be accessed as a nested list, a dictionary, or an +object with named attributes. + +The pyparsing module handles some of the problems that are typically vexing when writing text parsers: + - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) + - quoted strings + - embedded comments +""" + +__version__ = "1.4.8" +__versionTime__ = "7 October 2007 00:25" +__author__ = "Paul McGuire <pt...@us...>" + +import string +from weakref import ref as wkref +import copy,sys +import warnings +import re +import sre_constants +import xml.sax.saxutils +#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) + +def _ustr(obj): + """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries + str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It + then < returns the unicode object | encodes it with the default encoding | ... >. + """ + try: + # If this works, then _ustr(obj) has the same behaviour as str(obj), so + # it won't break any existing code. + return str(obj) + + except UnicodeEncodeError, e: + # The Python docs (http://docs.python.org/ref/customization.html#l2h-182) + # state that "The return value must be a string object". However, does a + # unicode object (being a subclass of basestring) count as a "string + # object"? + # If so, then return a unicode object: + return unicode(obj) + # Else encode it... but how? There are many choices... :) + # Replace unprintables with escape codes? + #return unicode(obj).encode(sys.getdefaultencoding(), 'backslashreplace_errors') + # Replace unprintables with question marks? + #return unicode(obj).encode(sys.getdefaultencoding(), 'replace') + # ... + +def _str2dict(strg): + return dict( [(c,0) for c in strg] ) + #~ return set( [c for c in strg] ) + +class _Constants(object): + pass + +alphas = string.lowercase + string.uppercase +nums = string.digits +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + __slots__ = ( "loc","msg","pstr","parserElement" ) + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, pstr, loc=0, msg=None, elem=None ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parserElement = elem + + def __getattr__( self, aname ): + """supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + """ + if( aname == "lineno" ): + return lineno( self.loc, self.pstr ) + elif( aname in ("col", "column") ): + return col( self.loc, self.pstr ) + elif( aname == "line" ): + return line( self.loc, self.pstr ) + else: + raise AttributeError, aname + + def __str__( self ): + return "%s (at char %d), (line:%d, col:%d)" % \ + ( self.msg, self.loc, self.lineno, self.column ) + def __repr__( self ): + return _ustr(self) + def markInputline( self, markerString = ">!<" ): + """Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join( [line_str[:line_column], + markerString, line_str[line_column:]]) + return line_str.strip() + +class ParseException(ParseBaseException): + """exception thrown when parse expressions don't match class; + supported attributes by name are: + - lineno - returns the line number of the exception text + - col - returns the column number of the exception text + - line - returns the line containing the exception text + """ + pass + +class ParseFatalException(ParseBaseException): + """user-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately""" + pass + +#~ class ReparseException(ParseBaseException): + #~ """Experimental class - parse actions can raise this exception to cause + #~ pyparsing to reparse the input string: + #~ - with a modified input string, and/or + #~ - with a modified start location + #~ Set the values of the ReparseException in the constructor, and raise the + #~ exception in a parse action to cause pyparsing to use the new string/location. + #~ Setting the values as None causes no change to be made. + #~ """ + #~ def __init_( self, newstring, restartLoc ): + #~ self.newParseText = newstring + #~ self.reparseLoc = restartLoc + +class RecursiveGrammarException(Exception): + """exception thrown by validate() if the grammar could be improperly recursive""" + def __init__( self, parseElementList ): + self.parseElementTrace = parseElementList + + def __str__( self ): + return "RecursiveGrammarException: %s" % self.parseElementTrace + +class _ParseResultsWithOffset(object): + def __init__(self,p1,p2): + self.tup = (p1,p2) + def __getitem__(self,i): + return self.tup[i] + def __repr__(self): + return repr(self.tup) + +class ParseResults(object): + """Structured parse results, to provide multiple means of access to the parsed data: + - as a list (len(results)) + - by list index (results[0], results[1], etc.) + - by attribute (results.<resultsName>) + """ + __slots__ = ( "__toklist", "__tokdict", "__doinit", "__name", "__parent", "__accumNames", "__weakref__" ) + def __new__(cls, toklist, name=None, asList=True, modal=True ): + if isinstance(toklist, cls): + return toklist + retobj = object.__new__(cls) + retobj.__doinit = True + return retobj + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( self, toklist, name=None, asList=True, modal=True ): + if self.__doinit: + self.__doinit = False + self.__name = None + self.__parent = None + self.__accumNames = {} + if isinstance(toklist, list): + self.__toklist = toklist[:] + else: + self.__toklist = [toklist] + self.__tokdict = dict() + + # this line is related to debugging the asXML bug + #~ asList = False + + if name: + if not modal: + self.__accumNames[name] = 0 + if isinstance(name,int): + name = _ustr(name) # will always return a str, but use _ustr for consistency + self.__name = name + if not toklist in (None,'',[]): + if isinstance(toklist,basestring): + toklist = [ toklist ] + if asList: + if isinstance(toklist,ParseResults): + self[name] = _ParseResultsWithOffset(toklist.copy(),-1) + else: + self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),-1) + self[name].__name = name + else: + try: + self[name] = toklist[0] + except (KeyError,TypeError): + self[name] = toklist + + def __getitem__( self, i ): + if isinstance( i, (int,slice) ): + return self.__toklist[i] + else: + if i not in self.__accumNames: + return self.__tokdict[i][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[i] ]) + + def __setitem__( self, k, v ): + if isinstance(v,_ParseResultsWithOffset): + self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] + sub = v[0] + elif isinstance(k,int): + self.__toklist[k] = v + sub = v + else: + self.__tokdict[k] = self.__tokdict.get(k,list()) + [(v,0)] + sub = v + if isinstance(sub,ParseResults): + sub.__parent = wkref(self) + + def __delitem__( self, i ): + if isinstance(i,(int,slice)): + del self.__toklist[i] + else: + del self.__tokdict[i] + + def __contains__( self, k ): + return self.__tokdict.has_key(k) + + def __len__( self ): return len( self.__toklist ) + def __bool__(self): return len( self.__toklist ) > 0 + def __nonzero__( self ): return self.__bool__() + def __iter__( self ): return iter( self.__toklist ) + def keys( self ): + """Returns all named result keys.""" + return self.__tokdict.keys() + + def items( self ): + """Returns all named result keys and values as a list of tuples.""" + return [(k,self[k]) for k in self.__tokdict.keys()] + + def values( self ): + """Returns all named result values.""" + return [ v[-1][0] for v in self.__tokdict.values() ] + + def __getattr__( self, name ): + if name not in self.__slots__: + if self.__tokdict.has_key( name ): + if name not in self.__accumNames: + return self.__tokdict[name][-1][0] + else: + return ParseResults([ v[0] for v in self.__tokdict[name] ]) + else: + return "" + return None + + def __add__( self, other ): + ret = self.copy() + ret += other + return ret + + def __iadd__( self, other ): + if other.__tokdict: + offset = len(self.__toklist) + addoffset = ( lambda a: (a<0 and offset) or (a+offset) ) + otheritems = other.__tokdict.items() + otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) + for (k,vlist) in otheritems for v in vlist] + for k,v in otherdictitems: + self[k] = v + if isinstance(v[0],ParseResults): + v[0].__parent = wkref(self) + self.__toklist += other.__toklist + self.__accumNames.update( other.__accumNames ) + del other + return self + + def __repr__( self ): + return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) + + def __str__( self ): + out = "[" + sep = "" + for i in self.__toklist: + if isinstance(i, ParseResults): + out += sep + _ustr(i) + else: + out += sep + repr(i) + sep = ", " + out += "]" + return out + + def _asStringList( self, sep='' ): + out = [] + for item in self.__toklist: + if out and sep: + out.append(sep) + if isinstance( item, ParseResults ): + out += item._asStringList() + else: + out.append( _ustr(item) ) + return out + + def asList( self ): + """Returns the parse results as a nested list of matching tokens, all converted to strings.""" + out = [] + for res in self.__toklist: + if isinstance(res,ParseResults): + out.append( res.asList() ) + else: + out.append( res ) + return out + + def asDict( self ): + """Returns the named parse results as dictionary.""" + return dict( self.items() ) + + def copy( self ): + """Returns a new copy of a ParseResults object.""" + ret = ParseResults( self.__toklist ) + ret.__tokdict = self.__tokdict.copy() + ret.__parent = self.__parent + ret.__accumNames.update( self.__accumNames ) + ret.__name = self.__name + return ret + + def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): + """Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.""" + nl = "\n" + out = [] + namedItems = dict( [ (v[1],k) for (k,vlist) in self.__tokdict.items() + for v in vlist ] ) + nextLevelIndent = indent + " " + + # collapse out indents if formatting is not desired + if not formatted: + indent = "" + nextLevelIndent = "" + nl = "" + + selfTag = None + if doctag is not None: + selfTag = doctag + else: + if self.__name: + selfTag = self.__name + + if not selfTag: + if namedItemsOnly: + return "" + else: + selfTag = "ITEM" + + out += [ nl, indent, "<", selfTag, ">" ] + + worklist = self.__toklist + for i,res in enumerate(worklist): + if isinstance(res,ParseResults): + if i in namedItems: + out += [ res.asXML(namedItems[i], + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + out += [ res.asXML(None, + namedItemsOnly and doctag is None, + nextLevelIndent, + formatted)] + else: + # individual token, see if there is a name for it + resTag = None + if i in namedItems: + resTag = namedItems[i] + if not resTag: + if namedItemsOnly: + continue + else: + resTag = "ITEM" + xmlBodyText = xml.sax.saxutils.escape(_ustr(res)) + out += [ nl, nextLevelIndent, "<", resTag, ">", + xmlBodyText, + "</", resTag, ">" ] + + out += [ nl, indent, "</", selfTag, ">" ] + return "".join(out) + + def __lookup(self,sub): + for k,vlist in self.__tokdict.items(): + for v,loc in vlist: + if sub is v: + return k + return None + + def getName(self): + """Returns the results name for this token expression.""" + if self.__name: + return self.__name + elif self.__parent: + par = self.__parent() + if par: + return par.__lookup(self) + else: + return None + elif (len(self) == 1 and + len(self.__tokdict) == 1 and + self.__tokdict.values()[0][0][1] in (0,-1)): + return self.__tokdict.keys()[0] + else: + return None + + def dump(self,indent='',depth=0): + """Diagnostic method for listing out the contents of a ParseResults. + Accepts an optional indent argument so that this string can be embedded + in a nested display of other data.""" + out = [] + out.append( indent+_ustr(self.asList()) ) + keys = self.items() + keys.sort() + for k,v in keys: + if out: + out.append('\n') + out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) + if isinstance(v,ParseResults): + if v.keys(): + #~ out.append('\n') + out.append( v.dump(indent,depth+1) ) + #~ out.append('\n') + else: + out.append(_ustr(v)) + else: + out.append(_ustr(v)) + #~ out.append('\n') + return "".join(out) + + # add support for pickle protocol + def __getstate__(self): + return ( self.__toklist, + ( self.__tokdict.copy(), + self.__parent is not None and self.__parent() or None, + self.__accumNames, + self.__name ) ) + + def __setstate__(self,state): + self.__toklist = state[0] + self.__tokdict, \ + par, \ + inAccumNames, \ + self.__name = state[1] + self.__accumNames = {} + self.__accumNames.update(inAccumNames) + if par is not None: + self.__parent = wkref(par) + else: + self.__parent = None + + +def col (loc,strg): + """Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information + on parsing strings containing <TAB>s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + """ + return (loc<len(strg) and strg[loc] == '\n') and 1 or loc - strg.rfind("\n", 0, loc) + +def lineno(loc,strg): + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information + on parsing strings containing <TAB>s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + """ + return strg.count("\n",0,loc) + 1 + +def line( loc, strg ): + """Returns the line of text containing loc within a string, counting newlines as line separators. + """ + lastCR = strg.rfind("\n", 0, loc) + nextCR = strg.find("\n", loc) + if nextCR > 0: + return strg[lastCR+1:nextCR] + else: + return strg[lastCR+1:] + +def _defaultStartDebugAction( instring, loc, expr ): + print "Match",_ustr(expr),"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ) + +def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): + print "Matched",_ustr(expr),"->",toks.asList() + +def _defaultExceptionDebugAction( instring, loc, expr, exc ): + print "Exception raised:", _ustr(exc) + +def nullDebugAction(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + pass + +class ParserElement(object): + """Abstract base level parser element class.""" + DEFAULT_WHITE_CHARS = " \n\t\r" + + def setDefaultWhitespaceChars( chars ): + """Overrides the default whitespace chars + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + setDefaultWhitespaceChars = staticmethod(setDefaultWhitespaceChars) + + def __init__( self, savelist=False ): + self.parseAction = list() + self.failAction = None + #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall + self.strRepr = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + self.copyDefaultWhiteChars = True + self.mayReturnEmpty = False # used when checking for left-recursion + self.keepTabs = False + self.ignoreExprs = list() + self.debug = False + self.streamlined = False + self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index + self.errmsg = "" + self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) + self.debugActions = ( None, None, None ) #custom debug actions + self.re = None + self.callPreparse = True # used to avoid redundant calls to preParse + self.callDuringTry = False + + def copy( self ): + """Make a copy of this ParserElement. Useful for defining different parse actions + for the same parsing pattern, using copies of the original parse element.""" + cpy = copy.copy( self ) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS + return cpy + + def setName( self, name ): + """Define name for this expression, for use in debugging.""" + self.name = name + self.errmsg = "Expected " + self.name + if hasattr(self,"exception"): + self.exception.msg = self.errmsg + return self + + def setResultsName( self, name, listAllMatches=False ): + """Define name for referencing matching tokens as a nested attribute + of the returned parse results. + NOTE: this returns a *copy* of the original ParserElement object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + """ + newself = self.copy() + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def setBreak(self,breakFlag = True): + """Method to invoke the Python pdb debugger when this element is + about to be parsed. Set breakFlag to True to enable, False to + disable. + """ + if breakFlag: + _parseMethod = self._parse + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + pdb.set_trace() + _parseMethod( instring, loc, doActions, callPreParse ) + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse,"_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def normalizeParseActionArgs( f ): + """Internal method used to decorate parse actions that take fewer than 3 arguments, + so that all parse actions can be called as f(s,l,t).""" + STAR_ARGS = 4 + + try: + restore = None + if isinstance(f,type): + restore = f + f = f.__init__ + if f.func_code.co_flags & STAR_ARGS: + return f + numargs = f.func_code.co_argcount + if hasattr(f,"im_self"): + numargs -= 1 + if restore: + f = restore + except AttributeError: + try: + # not a function, must be a callable object, get info from the + # im_func binding of its bound __call__ method + if f.__call__.im_func.func_code.co_flags & STAR_ARGS: + return f + numargs = f.__call__.im_func.func_code.co_argcount + if hasattr(f.__call__,"im_self"): + numargs -= 1 + except AttributeError: + # not a bound method, get info directly from __call__ method + if f.__call__.func_code.co_flags & STAR_ARGS: + return f + numargs = f.__call__.func_code.co_argcount + if hasattr(f.__call__,"im_self"): + numargs -= 1 + + #~ print "adding function %s with %d args" % (f.func_name,numargs) + if numargs == 3: + return f + else: + if numargs == 2: + def tmp(s,l,t): + return f(l,t) + elif numargs == 1: + def tmp(s,l,t): + return f(t) + else: #~ numargs == 0: + def tmp(s,l,t): + return f() + try: + tmp.__name__ = f.__name__ + except AttributeError: + # no need for special handling if attribute doesnt exist + pass + try: + tmp.__doc__ = f.__doc__ + except AttributeError: + # no need for special handling if attribute doesnt exist + pass + try: + tmp.__dict__.update(f.__dict__) + except AttributeError: + # no need for special handling if attribute doesnt exist + pass + return tmp + normalizeParseActionArgs = staticmethod(normalizeParseActionArgs) + + def setParseAction( self, *fns, **kwargs ): + """Define action to perform when successfully matching parse element definition. + Parse action fn is a callable method with 0-3 arguments, called as fn(s,loc,toks), + fn(loc,toks), fn(toks), or just fn(), where: + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a ParseResults object + If the functions in fns modify the tokens, they can return them as the return + value from fn, and the modified list of tokens will replace the original. + Otherwise, fn does not need to return any value. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See L{I{parseString}<parseString>} for more information + on parsing strings containing <TAB>s, and suggested methods to maintain a + consistent view of the parsed string, the parse location, and line and column + positions within the parsed string. + """ + self.parseAction = map(self.normalizeParseActionArgs, list(fns)) + self.callDuringTry = ("callDuringTry" in kwargs and kwargs["callDuringTry"]) + return self + + def addParseAction( self, *fns, **kwargs ): + """Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.""" + self.parseAction += map(self.normalizeParseActionArgs, list(fns)) + self.callDuringTry = self.callDuringTry or ("callDuringTry" in kwargs and kwargs["callDuringTry"]) + return self + + def setFailAction( self, fn ): + """Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + fn(s,loc,expr,err) where: + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + The function returns no value. It may throw ParseFatalException + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def skipIgnorables( self, instring, loc ): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc,dummy = e._parse( instring, loc ) + exprsFound = True + except ParseException: + pass + return loc + + def preParse( self, instring, loc ): + if self.ignoreExprs: + loc = self.skipIgnorables( instring, loc ) + + if self.skipWhitespace: + wt = self.whiteChars + instrlen = len(instring) + while loc < instrlen and instring[loc] in wt: + loc += 1 + + return loc + + def parseImpl( self, instring, loc, doActions=True ): + return loc, [] + + def postParse( self, instring, loc, tokenlist ): + return tokenlist + + #~ @profile + def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): + debugging = ( self.debug ) #and doActions ) + + if debugging or self.failAction: + #~ print "Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ) + if (self.debugActions[0] ): + self.debugActions[0]( instring, loc, self ) + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = loc + try: + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + except ParseException, err: + #~ print "Exception raised:", err + if self.debugActions[2]: + self.debugActions[2]( instring, tokensStart, self, err ) + if self.failAction: + self.failAction( instring, tokensStart, self, err ) + raise + else: + if callPreParse and self.callPreparse: + preloc = self.preParse( instring, loc ) + else: + preloc = loc + tokensStart = loc + if self.mayIndexError or loc >= len(instring): + try: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + except IndexError: + raise ParseException( instring, len(instring), self.errmsg, self ) + else: + loc,tokens = self.parseImpl( instring, preloc, doActions ) + + tokens = self.postParse( instring, loc, tokens ) + + retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + tokens = fn( instring, tokensStart, retTokens ) + if tokens is not None: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + except ParseException, err: + #~ print "Exception raised in user parse action:", err + if (self.debugActions[2] ): + self.debugActions[2]( instring, tokensStart, self, err ) + raise + else: + for fn in self.parseAction: + tokens = fn( instring, tokensStart, retTokens ) + if tokens is not None: + retTokens = ParseResults( tokens, + self.resultsName, + asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), + modal=self.modalResults ) + + if debugging: + #~ print "Matched",self,"->",retTokens.asList() + if (self.debugActions[1] ): + self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) + + return loc, retTokens + + def tryParse( self, instring, loc ): + return self._parse( instring, loc, doActions=False )[0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): + lookup = (self,instring,loc,callPreParse,doActions) + if lookup in ParserElement._exprArgCache: + value = ParserElement._exprArgCache[ lookup ] + if isinstance(value,Exception): + if isinstance(value,ParseBaseException): + value.loc = loc + raise value + return (value[0],value[1].copy()) + else: + try: + value = self._parseNoCache( instring, loc, doActions, callPreParse ) + ParserElement._exprArgCache[ lookup ] = (value[0],value[1].copy()) + return value + except ParseBaseException, pe: + ParserElement._exprArgCache[ lookup ] = pe + raise + + _parse = _parseNoCache + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + _exprArgCache = {} + def resetCache(): + ParserElement._exprArgCache.clear() + resetCache = staticmethod(resetCache) + + _packratEnabled = False + def enablePackrat(): + """Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method ParserElement.enablePackrat(). If + your program uses psyco to "compile as you go", you must call + enablePackrat before calling psyco.full(). If you do not do this, + Python will crash. For best results, call enablePackrat() immediately + after importing pyparsing. + """ + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + ParserElement._parse = ParserElement._parseCache + enablePackrat = staticmethod(enablePackrat) + + def parseString( self, instring ): + """Execute the parse expression with the given string. + This is the main interface to the client code, once the complete + expression has been built. + + Note: parseString implicitly calls expandtabs() on the input string, + in order to report proper column numbers in parse actions. + If the input string contains tabs and + the grammar uses parse actions that use the loc argument to index into the + string being parsed, you can ensure you have a consistent view of the input + string by: + - calling parseWithTabs on your grammar before calling parseString + (see L{I{parseWithTabs}<parseWithTabs>}) + - define your parse action using the full (s,loc,toks) signature, and + reference the input string using the parse action's s argument + - explictly expand the tabs in your input string before calling + parseString + """ + ParserElement.resetCache() + if not self.streamlined: + self.streamline() + #~ self.saveAsList = True + for e in self.ignoreExprs: + e.streamline() + if self.keepTabs: + loc, tokens = self._parse( instring, 0 ) + else: + loc, tokens = self._parse( instring.expandtabs(), 0 ) + return tokens + + def scanString( self, instring, maxMatches=sys.maxint ): + """Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + maxMatches argument, to clip scanning after 'n' matches are found. + + Note that the start and end locations are reported relative to the string + being parsed. See L{I{parseString}<parseString>} for more information on parsing + strings with embedded tabs.""" + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = _ustr(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn( instring, loc ) + nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) + except ParseException: + loc = preloc+1 + else: + matches += 1 + yield tokens, preloc, nextLoc + loc = nextLoc + + def transformString( self, instring ): + """Extension to scanString, to modify matching text with modified tokens that may + be returned from a parse action. To use transformString, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking transformString() on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. transformString() returns the resulting transformed string.""" + out = [] + lastE = 0 + # force preservation of <TAB>s, to minimize unwanted transformation of string, and to + # keep string locs straight between transformString and scanString + self.keepTabs = True + for t,s,e in self.scanString( instring ): + out.append( instring[lastE:s] ) + if t: + if isinstance(t,ParseResults): + out += t.asList() + elif isinstance(t,list): + out += t + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + return "".join(map(_ustr,out)) + + def searchString( self, instring, maxMatches=sys.maxint ): + """Another extension to scanString, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + maxMatches argument, to clip searching after 'n' matches are found. + """ + return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) + + def __add__(self, other ): + """Implementation of + operator - returns And""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return And( [ self, other ] ) + + def __radd__(self, other ): + """Implementation of += operator""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return other + self + + def __or__(self, other ): + """Implementation of | operator - returns MatchFirst""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return MatchFirst( [ self, other ] ) + + def __ror__(self, other ): + """Implementation of |= operator""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return other | self + + def __xor__(self, other ): + """Implementation of ^ operator - returns Or""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return Or( [ self, other ] ) + + def __rxor__(self, other ): + """Implementation of ^= operator""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return other ^ self + + def __and__(self, other ): + """Implementation of & operator - returns Each""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return Each( [ self, other ] ) + + def __rand__(self, other ): + """Implementation of right-& operator""" + if isinstance( other, basestring ): + other = Literal( other ) + if not isinstance( other, ParserElement ): + warnings.warn("Cannot add element of type %s to ParserElement" % type(other), + SyntaxWarning, stacklevel=2) + return other & self + + def __invert__( self ): + """Implementation of ~ operator - returns NotAny""" + return NotAny( self ) + + def __call__(self, name): + """Shortcut for setResultsName, with listAllMatches=default:: + userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") + could be written as:: + userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") + """ + return self.setResultsName(name) + + def suppress( self ): + """Suppresses the output of this ParserElement; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress( self ) + + def leaveWhitespace( self ): + """Disables the skipping of whitespace before matching the characters in the + ParserElement's defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + """ + self.skipWhitespace = False + return self + + def setWhitespaceChars( self, chars ): + """Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = chars + self.copyDefaultWhiteChars = False + return self + + def parseWithTabs( self ): + """Overrides default behavior to expand <TAB>s to spaces before parsing the input string. + Must be called before parseString when the input grammar contains elements that + match <TAB> characters.""" + self.keepTabs = True + return self + + def ignore( self, other ): + """Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + """ + if isinstance( other, Suppress ): + if other not in self.ignoreExprs: + self.ignoreExprs.append( other ) + else: + self.ignoreExprs.append( Suppress( other ) ) + return self + + def setDebugActions( self, startAction, successAction, exceptionAction ): + """Enable display of debugging messages while doing pattern matching.""" + self.debugActions = (startAction or _defaultStartDebugAction, + successAction or _defaultSuccessDebugAction, + exceptionAction or _defaultExceptionDebugAction) + self.debug = True + return self + + def setDebug( self, flag=True ): + """Enable display of debugging messages while doing pattern matching. + Set flag to True to enable, False to disable.""" + if flag: + self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) + else: + self.debug = False + return self + + def __str__( self ): + return self.name + + def __repr__( self ): + return _ustr(self) + + def streamline( self ): + self.streamlined = True + self.strRepr = None + return self + + def checkRecursion( self, parseElementList ): + pass + + def validate( self, validateTrace=[] ): + """Check defined expressions for valid structure, check for infinite recursive definitions.""" + self.checkRecursion( [] ) + + def parseFile( self, file_or_filename ): + """Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + try: + file_contents = file_or_filename.read() + except AttributeError: + f = open(file_or_filename, "rb") + file_contents = f.read() + f.close() + return self.parseString(file_contents) + + def getException(self): + return ParseException("",0,self.errmsg,self) + + def __getattr__(self,aname): + if aname == "myException": + self.myException = ret = self.getException(); + return ret; + else: + raise AttributeError, "no such attribute " + aname + +class Token(ParserElement): + """Abstract ParserElement subclass, for defining atomic matching patterns.""" + def __init__( self ): + super(Token,self).__init__( savelist=False ) + #self.myException = ParseException("",0,"",self) + + def setName(self, name): + s = super(Token,self).setName(name) + self.errmsg = "Expected " + self.name + #s.myException.msg = self.errmsg + return s + + +class Empty(Token): + """An empty token, will always match.""" + def __init__( self ): + super(Empty,self).__init__() + self.name = "Empty" + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """A token that will never match.""" + def __init__( self ): + super(NoMatch,self).__init__() + self.name = "NoMatch" + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + #self.myException.msg = self.errmsg + + def parseImpl( self, instring, loc, doActions=True ): + exc = self.myException + exc.loc = loc + exc.pstr = instring + raise exc + + +class Literal(Token): + """Token to exactly match a specified string.""" + def __init__( self, matchString ): + super(Literal,self).__init__() + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Literal; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.__class__ = Empty + self.name = '"%s"' % _ustr(self.match) + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + #self.myException.msg = self.errmsg + self.mayIndexError = False + + # Performance tuning: this routine gets called a *lot* + # if this is a single character match string and the first character matches, + # short-circuit as quickly as possible, and avoid calling startswith + #~ @profile + def parseImpl( self, instring, loc, doActions=True ): + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) ): + return loc+self.matchLen, self.match + #~ raise ParseException( instring, loc, self.errmsg ) + exc = self.myException + exc.loc = loc + exc.pstr = instring + raise exc + +class Keyword(Token): + """Token to exactly match a specified string as a keyword, that is, it must be + immediately followed by a non-keyword character. Compare with Literal:: + Literal("if") will match the leading 'if' in 'ifAndOnlyIf'. + Keyword("if") will not; it will only match the leading 'if in 'if x=1', or 'if(y==2)' + Accepts two optional constructor arguments in addition to the keyword string: + identChars is a string of characters that would be valid identifier characters, + defaulting to all alphanumerics + "_" and "$"; caseless allows case-insensitive + matching, default is False. + """ + DEFAULT_KEYWORD_CHARS = alphanums+"_$" + + def __init__( self, matchString, identChars=DEFAULT_KEYWORD_CHARS, caseless=False ): + super(Keyword,self).__init__() + self.match = matchString + self.matchLen = len(matchString) + try: + self.firstMatchChar = matchString[0] + except IndexError: + warnings.warn("null string passed to Keyword; use Empty() instead", + SyntaxWarning, stacklevel=2) + self.name = '"%s"' % self.match + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + #self.myException.msg = self.errmsg + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = matchString.upper() + identChars = identChars.upper() + self.identChars = _str2dict(identChars) + + def parseImpl( self, instring, loc, doActions=True ): + if self.caseless: + if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and + (loc == 0 or instring[loc-1].upper() not in self.identChars) ): + return loc+self.matchLen, self.match + else: + if (instring[loc] == self.firstMatchChar and + (self.matchLen==1 or instring.startswith(self.match,loc)) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and + (loc == 0 or instring[loc-1] not in self.identChars) ): + return loc+self.matchLen, self.match + #~ raise ParseException( instring, loc, self.errmsg ) + exc = self.myException + exc.loc = loc + exc.pstr = instring + raise exc + + def copy(self): + c = super(Keyword,self).copy() + c.identChars = Keyword.DEFAULT_KEYWORD_CHARS + return c + + def setDefaultKeywordChars( chars ): + """Overrides the default Keyword chars + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + setDefaultKeywordChars = staticmethod(setDefaultKeywordChars) + + +class CaselessLiteral(Literal): + """Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + """ + def __init__( self, matchString ): + super(CaselessLiteral,self).__init__( matchString.upper() ) + # Preserve the defining literal. + self.returnString = matchString + self.name = "'%s'" % self.returnString + self.errmsg = "Expected " + self.name + #self.myException.msg = self.errmsg + + def parseImpl( self, instring, loc, doActions=True ): + if instring[ loc:loc+self.matchLen ].upper() == self.match: + return loc+self.matchLen, self.returnString + #~ raise ParseException( instring, loc, self.errmsg ) + exc = self.myException + exc.loc = loc + exc.pstr = instring + raise exc + +class CaselessKeyword(Keyword): + def __init__( self, matchString, identChars=Keyword.DEFAULT_KEYWORD_CHARS ): + super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) + + def parseImpl( self, instring, loc, doActions=True ): + if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and + (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): + return loc+self.matchLen, self.match + #~ raise ParseException( instring, loc, self.errmsg ) + exc = self.myException + exc.loc = loc + exc.pstr = instring + raise exc + +class Word(Token): + """Token for matching words composed of allowed character sets. + Defined with string containing all allowed initial characters, + an optional string containing allowed body characters (if omitted, + defaults to the initial character set), and an optional minimum, + maximum, and/or exact length. The default value for min is 1 (a + minimum value < 1 is not valid); the default values for max and exact + are 0, meaning no maximum or exact length restriction. + """ + def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False ): + super(Word,self).__init__() + self.initCharsOrig = initChars + self.initChars = _str2dict(initChars) + if bodyChars : + self.bodyCharsOrig = bodyChars + self.bodyChars = _str2dict(bodyChars) + else: + self.bodyCharsOrig = initChars + self.bodyChars = _str2dict(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError, "cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted" + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = sys.maxint + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.name = _ustr(self)... [truncated message content] |
From: <ds...@us...> - 2007-11-09 13:20:12
|
Revision: 4180 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4180&view=rev Author: dsdale Date: 2007-11-09 05:20:10 -0800 (Fri, 09 Nov 2007) Log Message: ----------- committed Martin Teichmann's patch 1828813 to fix qt4 error messages related to QPainter Modified Paths: -------------- trunk/matplotlib/CHANGELOG trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-09 13:19:38 UTC (rev 4179) +++ trunk/matplotlib/CHANGELOG 2007-11-09 13:20:10 UTC (rev 4180) @@ -1,3 +1,9 @@ +2007-11-09 Applied Martin Teichmann's patch 1828813: a QPainter is used in + paintEvent, which has to be destroyed using the method end(). If + matplotlib raises an exception before the call to end - and it + does if you feed it with bad data - this method end() is never + called and Qt4 will start spitting error messages - DSD + 2007-11-09 Moved pyparsing back into matplotlib namespace. Don't use system pyparsing, API is too variable from one release to the next - DSD Modified: trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py =================================================================== --- trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py 2007-11-09 13:19:38 UTC (rev 4179) +++ trunk/matplotlib/lib/matplotlib/backends/backend_qt4agg.py 2007-11-09 13:20:10 UTC (rev 4180) @@ -116,6 +116,7 @@ qImage = QtGui.QImage(stringBuffer, w, h, QtGui.QImage.Format_ARGB32) p = QtGui.QPainter(self) pixmap = QtGui.QPixmap.fromImage(qImage) + p = QtGui.QPainter( self ) p.drawPixmap(QtCore.QPoint(l, self.renderer.height-t), pixmap) p.end() self.replot = False This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-09 13:49:55
|
Revision: 4182 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4182&view=rev Author: dsdale Date: 2007-11-09 05:49:54 -0800 (Fri, 09 Nov 2007) Log Message: ----------- updated CHANGELOG and API_CHANGES Modified Paths: -------------- trunk/matplotlib/API_CHANGES trunk/matplotlib/CHANGELOG Modified: trunk/matplotlib/API_CHANGES =================================================================== --- trunk/matplotlib/API_CHANGES 2007-11-09 13:47:40 UTC (rev 4181) +++ trunk/matplotlib/API_CHANGES 2007-11-09 13:49:54 UTC (rev 4182) @@ -1,7 +1,3 @@ - Removed matplotlib.pyparsing. We now use the system's pyparsing - if it is available, and if not, we install pyparsing directly into - site-packages - Moved mlab.csv2rec -> recutils.csv2rec Added ax kwarg to pyplot.colorbar and Figure.colorbar so that Modified: trunk/matplotlib/CHANGELOG =================================================================== --- trunk/matplotlib/CHANGELOG 2007-11-09 13:47:40 UTC (rev 4181) +++ trunk/matplotlib/CHANGELOG 2007-11-09 13:49:54 UTC (rev 4182) @@ -2,7 +2,7 @@ paintEvent, which has to be destroyed using the method end(). If matplotlib raises an exception before the call to end - and it does if you feed it with bad data - this method end() is never - called and Qt4 will start spitting error messages - DSD + called and Qt4 will start spitting error messages 2007-11-09 Moved pyparsing back into matplotlib namespace. Don't use system pyparsing, API is too variable from one release This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ds...@us...> - 2007-11-09 14:24:44
|
Revision: 4183 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4183&view=rev Author: dsdale Date: 2007-11-09 06:24:41 -0800 (Fri, 09 Nov 2007) Log Message: ----------- updated dependency report during build process Modified Paths: -------------- trunk/matplotlib/lib/matplotlib/__init__.py trunk/matplotlib/lib/matplotlib/config/checkdep.py trunk/matplotlib/setup.py trunk/matplotlib/setupext.py Modified: trunk/matplotlib/lib/matplotlib/__init__.py =================================================================== --- trunk/matplotlib/lib/matplotlib/__init__.py 2007-11-09 13:49:54 UTC (rev 4182) +++ trunk/matplotlib/lib/matplotlib/__init__.py 2007-11-09 14:24:41 UTC (rev 4183) @@ -256,7 +256,7 @@ v = match.group(0) float(v) return v - except (IndexError, ValueError): + except (IndexError, ValueError, AttributeError): return None def checkdep_pdftops(): Modified: trunk/matplotlib/lib/matplotlib/config/checkdep.py =================================================================== --- trunk/matplotlib/lib/matplotlib/config/checkdep.py 2007-11-09 13:49:54 UTC (rev 4182) +++ trunk/matplotlib/lib/matplotlib/config/checkdep.py 2007-11-09 14:24:41 UTC (rev 4183) @@ -35,7 +35,7 @@ pattern = '3\.1\d+' match = re.search(pattern, line) return match.group(0) - except (IndexError, ValueError): + except (IndexError, ValueError, AttributeError): return None def pdftops(): Modified: trunk/matplotlib/setup.py =================================================================== --- trunk/matplotlib/setup.py 2007-11-09 13:49:54 UTC (rev 4182) +++ trunk/matplotlib/setup.py 2007-11-09 14:24:41 UTC (rev 4183) @@ -83,7 +83,8 @@ print_raw, check_for_freetype, check_for_libpng, check_for_gtk, \ check_for_tk, check_for_wx, check_for_numpy, check_for_qt, check_for_qt4, \ check_for_cairo, check_for_traits, check_for_pytz, check_for_dateutil, \ - check_for_configobj + check_for_configobj, check_for_dvipng, check_for_ghostscript, \ + check_for_latex, check_for_pdftops, check_for_datetime #import distutils.sysconfig # jdh @@ -184,55 +185,12 @@ build_contour(ext_modules, packages) build_nxutils(ext_modules, packages) -print_raw("") -print_raw("OPTIONAL DEPENDENCIES") - -try: import datetime -except ImportError: hasdatetime = False -else: hasdatetime = True - -if hasdatetime: # dates require python23 datetime - # only install pytz and dateutil if the user hasn't got them - def add_pytz(): - packages.append('pytz') - resources = ['zone.tab', 'locales/pytz.pot'] - # install pytz subdirs - for dirpath, dirname, filenames in os.walk(os.path.join('lib', 'pytz', - 'zoneinfo')): - if '.svn' not in dirpath: - # remove the 'lib/pytz' part of the path - basepath = dirpath.split(os.path.sep, 2)[2] - resources.extend([os.path.join(basepath, filename) - for filename in filenames]) - package_data['pytz'] = resources - assert len(resources) > 10, 'pytz zoneinfo files not found!' -# packages.append('/'.join(dirpath.split(os.sep)[1:])) - - def add_dateutil(): - packages.append('dateutil') - packages.append('dateutil/zoneinfo') - package_data['dateutil'] = ['zoneinfo/zoneinfo*.tar.*'] - - haspytz = check_for_pytz() - hasdateutil = check_for_dateutil() - - if sys.platform=='win32': - # always add these to the win32 installer - add_pytz() - add_dateutil() - else: - # only add them if we need them - if not haspytz: add_pytz() - if not hasdateutil: add_dateutil() - build_swigagg(ext_modules, packages) build_transforms(ext_modules, packages) -# for the traited config package: -if not check_for_configobj(): py_modules.append('configobj') +print_raw("") +print_raw("OPTIONAL BACKEND DEPENDENCIES") -if not check_for_traits(): build_traits(ext_modules, packages) - if check_for_gtk() and (BUILD_GTK or BUILD_GTKAGG): if BUILD_GTK: build_gdk(ext_modules, packages) @@ -279,6 +237,58 @@ mod.extra_compile_args.append('-DVERBOSE') print_raw("") +print_raw("OPTIONAL DATE/TIMEZONE DEPENDENCIES") + +hasdatetime = check_for_datetime() +hasdateutil = check_for_dateutil(hasdatetime) +haspytz = check_for_pytz(hasdatetime) + +if hasdatetime: # dates require python23 datetime + # only install pytz and dateutil if the user hasn't got them + + def add_pytz(): + packages.append('pytz') + resources = ['zone.tab', 'locales/pytz.pot'] + # install pytz subdirs + for dirpath, dirname, filenames in os.walk(os.path.join('lib', 'pytz', + 'zoneinfo')): + if '.svn' not in dirpath: + # remove the 'lib/pytz' part of the path + basepath = dirpath.split(os.path.sep, 2)[2] + resources.extend([os.path.join(basepath, filename) + for filename in filenames]) + package_data['pytz'] = resources + assert len(resources) > 10, 'pytz zoneinfo files not found!' +# packages.append('/'.join(dirpath.split(os.sep)[1:])) + + def add_dateutil(): + packages.append('dateutil') + packages.append('dateutil/zoneinfo') + package_data['dateutil'] = ['zoneinfo/zoneinfo*.tar.*'] + + if sys.platform=='win32': + # always add these to the win32 installer + add_pytz() + add_dateutil() + else: + # only add them if we need them + if not haspytz: add_pytz() + if not hasdateutil: add_dateutil() + +print_raw("") +print_raw("OPTIONAL USETEX DEPENDENCIES") +check_for_dvipng() +check_for_ghostscript() +check_for_latex() +check_for_pdftops() + +# TODO: comment out for mpl release: +print_raw("") +print_raw("EXPERIMENTAL CONFIG PACKAGE DEPENDENCIES") +if not check_for_configobj(): py_modules.append('configobj') +if not check_for_traits(): build_traits(ext_modules, packages) + +print_raw("") print_raw("[Edit setup.cfg to suppress the above messages]") print_line() Modified: trunk/matplotlib/setupext.py =================================================================== --- trunk/matplotlib/setupext.py 2007-11-09 13:49:54 UTC (rev 4182) +++ trunk/matplotlib/setupext.py 2007-11-09 14:24:41 UTC (rev 4183) @@ -42,6 +42,7 @@ """ import os +import re basedir = { @@ -325,21 +326,33 @@ print_status("Cairo", cairo.version) return True -def check_for_pytz(): +def check_for_datetime(): try: + import datetime + except ImportError: + print_status("datetime", "no") + return False + else: + print_status("datetime", "present, version unknown") + return True + +def check_for_pytz(hasdatetime=True): + try: import pytz except ImportError: - print_status("pytz", "mpl-provided") + if hasdatetime: print_status("pytz", "mpl-provided") + else: print_status("pytz", "no") return False else: print_status("pytz", pytz.__version__) return True -def check_for_dateutil(): +def check_for_dateutil(hasdatetime=True): try: import dateutil except ImportError: - print_status("dateutil", "mpl-provided") + if hasdatetime: print_status("dateutil", "mpl-provided") + else: print_status("dateutil", "no") return False else: try: @@ -375,6 +388,51 @@ print_status("enthought.traits", "no") return gotit +def check_for_dvipng(): + try: + stdin, stdout = os.popen4('dvipng -version') + print_status("dvipng", stdout.readlines()[1].split()[-1]) + return True + except (IndexError, ValueError): + print_status("dvipng", "no") + return False + +def check_for_ghostscript(): + try: + if sys.platform == 'win32': + command = 'gswin32c --version' + else: + command = 'gs --version' + stdin, stdout = os.popen4(command) + print_status("ghostscript", stdout.read()[:-1]) + return True + except (IndexError, ValueError): + print_status("ghostscript", "no") + return False + +def check_for_latex(): + try: + stdin, stdout = os.popen4('latex -version') + line = stdout.readlines()[0] + pattern = '3\.1\d+' + match = re.search(pattern, line) + print_status("latex", match.group(0)) + return True + except (IndexError, ValueError, AttributeError): + print_status("latex", "no") + return False + +def check_for_pdftops(): + try: + stdin, stdout = os.popen4('pdftops -v') + for line in stdout.readlines(): + if 'version' in line: + print_status("pdftops", line.split()[-1]) + return True + except (IndexError, ValueError): + print_status("pdftops", "no") + return False + def check_for_numpy(): gotit = False try: This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |
From: <ef...@us...> - 2007-11-09 19:37:20
|
Revision: 4191 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=4191&view=rev Author: efiring Date: 2007-11-09 11:37:15 -0800 (Fri, 09 Nov 2007) Log Message: ----------- Remove numerix as nx from pylab Modified Paths: -------------- trunk/matplotlib/examples/animation_blit_fltk.py trunk/matplotlib/examples/annotation_demo.py trunk/matplotlib/examples/barcode_demo.py trunk/matplotlib/examples/broken_barh.py trunk/matplotlib/examples/clippath_test.py trunk/matplotlib/examples/custom_figure_class.py trunk/matplotlib/examples/date_demo_convert.py trunk/matplotlib/examples/dynamic_collection.py trunk/matplotlib/examples/fill_demo2.py trunk/matplotlib/examples/gradient_bar.py trunk/matplotlib/examples/interp_demo.py trunk/matplotlib/examples/lasso_demo.py trunk/matplotlib/examples/pick_event_demo.py trunk/matplotlib/examples/scatter_custom_symbol.py trunk/matplotlib/examples/scatter_star_poly.py trunk/matplotlib/examples/spy_demos.py trunk/matplotlib/examples/xcorr_demo.py trunk/matplotlib/examples/zoom_window.py trunk/matplotlib/lib/matplotlib/pylab.py Modified: trunk/matplotlib/examples/animation_blit_fltk.py =================================================================== --- trunk/matplotlib/examples/animation_blit_fltk.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/animation_blit_fltk.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -3,7 +3,7 @@ import matplotlib matplotlib.use('FltkAgg') import pylab as p -import numpy as nx +import numpy as npy import time @@ -29,7 +29,7 @@ self.background = self.canvas.copy_from_bbox(self.ax.bbox) self.canvas.restore_region(self.background) # update the data - line.set_ydata(nx.sin(x+self.cnt/10.0)) + line.set_ydata(npy.sin(x+self.cnt/10.0)) # just draw the animated artist self.ax.draw_artist(line) # just redraw the axes rectangle @@ -45,8 +45,8 @@ p.subplots_adjust(left=0.3, bottom=0.3) # check for flipy bugs p.grid() # to ensure proper background restore # create the initial line -x = nx.arange(0,2*nx.pi,0.01) -line, = p.plot(x, nx.sin(x), animated=True) +x = npy.arange(0,2*npy.pi,0.01) +line, = p.plot(x, npy.sin(x), animated=True) p.draw() anim=animator(ax) Modified: trunk/matplotlib/examples/annotation_demo.py =================================================================== --- trunk/matplotlib/examples/annotation_demo.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/annotation_demo.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -32,17 +32,19 @@ """ -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show from matplotlib.patches import Ellipse +import numpy as npy + if 1: # if only one location is given, the text and xypoint being # annotated are assumed to be the same fig = figure() ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-3,5)) - t = nx.arange(0.0, 5.0, 0.01) - s = nx.cos(2*nx.pi*t) + t = npy.arange(0.0, 5.0, 0.01) + s = npy.cos(2*npy.pi*t) line, = ax.plot(t, s, lw=3, color='purple') ax.annotate('axes center', xy=(.5, .5), xycoords='axes fraction', @@ -85,8 +87,8 @@ # respected fig = figure() ax = fig.add_subplot(111, polar=True) - r = nx.arange(0,1,0.001) - theta = 2*2*nx.pi*r + r = npy.arange(0,1,0.001) + theta = 2*2*npy.pi*r line, = ax.plot(theta, r, color='#ee8d18', lw=3) ind = 800 @@ -115,8 +117,8 @@ ax.add_artist(el) el.set_clip_box(ax.bbox) ax.annotate('the top', - xy=(nx.pi/2., 10.), # theta, radius - xytext=(nx.pi/3, 20.), # theta, radius + xy=(npy.pi/2., 10.), # theta, radius + xytext=(npy.pi/3, 20.), # theta, radius xycoords='polar', textcoords='polar', arrowprops=dict(facecolor='black', shrink=0.05), Modified: trunk/matplotlib/examples/barcode_demo.py =================================================================== --- trunk/matplotlib/examples/barcode_demo.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/barcode_demo.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,14 +1,16 @@ -from pylab import figure, show, cm, nx +from matplotlib.pyplot import figure, show, cm +from numpy import where +from numpy.random import rand # the bar -x = nx.where(nx.mlab.rand(500)>0.7, 1.0, 0.0) +x = where(rand(500)>0.7, 1.0, 0.0) axprops = dict(xticks=[], yticks=[]) barprops = dict(aspect='auto', cmap=cm.binary, interpolation='nearest') fig = figure() -# a vertical barcode +# a vertical barcode -- this is broken at present x.shape = len(x), 1 ax = fig.add_axes([0.1, 0.3, 0.1, 0.6], **axprops) ax.imshow(x, **barprops) Modified: trunk/matplotlib/examples/broken_barh.py =================================================================== --- trunk/matplotlib/examples/broken_barh.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/broken_barh.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -2,7 +2,7 @@ """ Make a "broken" horizontal bar plot, ie one with gaps """ -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show fig = figure() ax = fig.add_subplot(111) Modified: trunk/matplotlib/examples/clippath_test.py =================================================================== --- trunk/matplotlib/examples/clippath_test.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/clippath_test.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,9 +1,10 @@ -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show import matplotlib.transforms as transforms from matplotlib.patches import RegularPolygon import matplotlib.agg as agg +from numpy import arange, sin, pi +from numpy.random import rand - class ClipWindow: def __init__(self, ax, line): self.ax = ax @@ -58,9 +59,9 @@ fig = figure(figsize=(8,8)) ax = fig.add_subplot(111) -t = nx.arange(0.0, 4.0, 0.01) -s = 2*nx.sin(2*nx.pi*8*t) +t = arange(0.0, 4.0, 0.01) +s = 2*sin(2*pi*8*t) -line, = ax.plot(t, 2*(nx.mlab.rand(len(t))-0.5), 'b-') +line, = ax.plot(t, 2*(rand(len(t))-0.5), 'b-') clipwin = ClipWindow(ax, line) show() Modified: trunk/matplotlib/examples/custom_figure_class.py =================================================================== --- trunk/matplotlib/examples/custom_figure_class.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/custom_figure_class.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,7 +1,7 @@ """ You can pass a custom Figure constructor to figure if youy want to derive from the default Figure. This simple example creates a figure with a figure title """ -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show from matplotlib.figure import Figure class MyFigure(Figure): Modified: trunk/matplotlib/examples/date_demo_convert.py =================================================================== --- trunk/matplotlib/examples/date_demo_convert.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/date_demo_convert.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,16 +1,16 @@ #!/usr/bin/env python import datetime -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show from matplotlib.dates import DayLocator, HourLocator, DateFormatter, drange +from numpy import arange - date1 = datetime.datetime( 2000, 3, 2) date2 = datetime.datetime( 2000, 3, 6) delta = datetime.timedelta(hours=6) dates = drange(date1, date2, delta) -y = nx.arange( len(dates)*1.0) +y = arange( len(dates)*1.0) fig = figure() ax = fig.add_subplot(111) @@ -25,7 +25,7 @@ # tick, not the base multiple ax.xaxis.set_major_locator( DayLocator() ) -ax.xaxis.set_minor_locator( HourLocator(nx.arange(0,25,6)) ) +ax.xaxis.set_minor_locator( HourLocator(arange(0,25,6)) ) ax.xaxis.set_major_formatter( DateFormatter('%Y-%m-%d') ) ax.fmt_xdata = DateFormatter('%Y-%m-%d %H:%M:%S') Modified: trunk/matplotlib/examples/dynamic_collection.py =================================================================== --- trunk/matplotlib/examples/dynamic_collection.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/dynamic_collection.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,7 +1,8 @@ import random from matplotlib.collections import RegularPolyCollection import matplotlib.cm as cm -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show +from numpy.random import rand fig = figure() ax = fig.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False) @@ -29,8 +30,8 @@ press 'a' to add a random point from the collection, 'd' to delete one """ if event.key=='a': - x,y = nx.mlab.rand(2) - color = cm.jet(nx.mlab.rand()) + x,y = rand(2) + color = cm.jet(rand()) offsets.append((x,y)) facecolors.append(color) fig.canvas.draw() Modified: trunk/matplotlib/examples/fill_demo2.py =================================================================== --- trunk/matplotlib/examples/fill_demo2.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/fill_demo2.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,8 +1,10 @@ -from pylab import figure, nx, show +from matplotlib.pyplot import figure, show +from numpy import arange, sin, pi + fig = figure() ax = fig.add_subplot(111) -t = nx.arange(0.0,3.01,0.01) -s = nx.sin(2*nx.pi*t) -c = nx.sin(4*nx.pi*t) +t = arange(0.0,3.01,0.01) +s = sin(2*pi*t) +c = sin(4*pi*t) ax.fill(t, s, 'b', t, c, 'g', alpha=0.2) show() Modified: trunk/matplotlib/examples/gradient_bar.py =================================================================== --- trunk/matplotlib/examples/gradient_bar.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/gradient_bar.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,4 +1,6 @@ -from pylab import figure, show, nx, cm +from matplotlib.pyplot import figure, show, cm +from numpy import arange +from numpy.random import rand def gbar(ax, x, y, width=0.5, bottom=0): X = [[.6, .6],[.7,.7]] @@ -19,8 +21,8 @@ extent=(xmin, xmax, ymin, ymax), alpha=1) N = 10 -x = nx.arange(N)+0.25 -y = nx.mlab.rand(N) +x = arange(N)+0.25 +y = rand(N) gbar(ax, x, y, width=0.7) ax.set_aspect('normal') show() Modified: trunk/matplotlib/examples/interp_demo.py =================================================================== --- trunk/matplotlib/examples/interp_demo.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/interp_demo.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,6 +1,9 @@ -from pylab import figure, show, nx, linspace, stineman_interp -x = linspace(0,2*nx.pi,20); -y = nx.sin(x); yp = None +from matplotlib.pyplot import figure, show +from numpy import pi, sin, linspace +from matplotlib.mlab import stineman_interp + +x = linspace(0,2*pi,20); +y = sin(x); yp = None xi = linspace(x[0],x[-1],100); yi = stineman_interp(xi,x,y,yp); Modified: trunk/matplotlib/examples/lasso_demo.py =================================================================== --- trunk/matplotlib/examples/lasso_demo.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/lasso_demo.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -13,7 +13,9 @@ from matplotlib.colors import colorConverter from matplotlib.collections import RegularPolyCollection -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show +from numpy import nonzero +from numpy.random import rand class Datum: colorin = colorConverter.to_rgba('red') @@ -47,9 +49,7 @@ self.cid = self.canvas.mpl_connect('button_press_event', self.onpress) def callback(self, verts): - #print 'all done', verts - #ind = matplotlib.mlab._inside_poly_deprecated(self.xys, verts) - ind = nx.nonzero(points_inside_poly(self.xys, verts)) + ind = nonzero(points_inside_poly(self.xys, verts))[0] for i in range(self.Nxy): if i in ind: self.facecolors[i] = Datum.colorin @@ -66,7 +66,7 @@ # acquire a lock on the widget drawing self.canvas.widgetlock(self.lasso) -data = [Datum(*xy) for xy in nx.mlab.rand(100, 2)] +data = [Datum(*xy) for xy in rand(100, 2)] fig = figure() ax = fig.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False) Modified: trunk/matplotlib/examples/pick_event_demo.py =================================================================== --- trunk/matplotlib/examples/pick_event_demo.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/pick_event_demo.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -63,23 +63,25 @@ The examples below illustrate each of these methods. """ -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show from matplotlib.lines import Line2D from matplotlib.patches import Patch, Rectangle from matplotlib.text import Text from matplotlib.image import AxesImage +import numpy as npy +from numpy.random import rand if 1: # simple picking, lines, rectangles and text fig = figure() ax1 = fig.add_subplot(211) ax1.set_title('click on points, rectangles or text', picker=True) ax1.set_ylabel('ylabel', picker=True, bbox=dict(facecolor='red')) - line, = ax1.plot(nx.mlab.rand(100), 'o', picker=5) # 5 points tolerance + line, = ax1.plot(rand(100), 'o', picker=5) # 5 points tolerance # pick the rectangle ax2 = fig.add_subplot(212) - bars = ax2.bar(range(10), nx.mlab.rand(10), picker=True) + bars = ax2.bar(range(10), rand(10), picker=True) for label in ax2.get_xticklabels(): # make the xtick labels pickable label.set_picker(True) @@ -90,7 +92,7 @@ xdata = thisline.get_xdata() ydata = thisline.get_ydata() ind = event.ind - print 'onpick1 line:', zip(nx.take(xdata, ind), nx.take(ydata, ind)) + print 'onpick1 line:', zip(npy.take(xdata, ind), npy.take(ydata, ind)) elif isinstance(event.artist, Rectangle): patch = event.artist print 'onpick1 patch:', patch.get_verts() @@ -122,12 +124,12 @@ xdata = line.get_xdata() ydata = line.get_ydata() maxd = 0.05 - d = nx.sqrt((xdata-mouseevent.xdata)**2. + (ydata-mouseevent.ydata)**2.) + d = npy.sqrt((xdata-mouseevent.xdata)**2. + (ydata-mouseevent.ydata)**2.) - ind = nx.nonzero(nx.less_equal(d, maxd)) + ind = npy.nonzero(npy.less_equal(d, maxd)) if len(ind): - pickx = nx.take(xdata, ind) - picky = nx.take(ydata, ind) + pickx = npy.take(xdata, ind) + picky = npy.take(ydata, ind) props = dict(ind=ind, pickx=pickx, picky=picky) return True, props else: @@ -139,16 +141,16 @@ fig = figure() ax1 = fig.add_subplot(111) ax1.set_title('custom picker for line data') - line, = ax1.plot(nx.mlab.rand(100), nx.mlab.rand(100), 'o', picker=line_picker) + line, = ax1.plot(rand(100), rand(100), 'o', picker=line_picker) fig.canvas.mpl_connect('pick_event', onpick2) if 1: # picking on a scatter plot (matplotlib.collections.RegularPolyCollection) - x, y, c, s = nx.mlab.rand(4, 100) + x, y, c, s = rand(4, 100) def onpick3(event): ind = event.ind - print 'onpick3 scatter:', ind, nx.take(x, ind), nx.take(y, ind) + print 'onpick3 scatter:', ind, npy.take(x, ind), npy.take(y, ind) fig = figure() ax1 = fig.add_subplot(111) @@ -159,10 +161,10 @@ if 1: # picking images (matplotlib.image.AxesImage) fig = figure() ax1 = fig.add_subplot(111) - im1 = ax1.imshow(nx.rand(10,5), extent=(1,2,1,2), picker=True) - im2 = ax1.imshow(nx.rand(5,10), extent=(3,4,1,2), picker=True) - im3 = ax1.imshow(nx.rand(20,25), extent=(1,2,3,4), picker=True) - im4 = ax1.imshow(nx.rand(30,12), extent=(3,4,3,4), picker=True) + im1 = ax1.imshow(rand(10,5), extent=(1,2,1,2), picker=True) + im2 = ax1.imshow(rand(5,10), extent=(3,4,1,2), picker=True) + im3 = ax1.imshow(rand(20,25), extent=(1,2,3,4), picker=True) + im4 = ax1.imshow(rand(30,12), extent=(3,4,3,4), picker=True) ax1.axis([0,5,0,5]) def onpick4(event): Modified: trunk/matplotlib/examples/scatter_custom_symbol.py =================================================================== --- trunk/matplotlib/examples/scatter_custom_symbol.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/scatter_custom_symbol.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,12 +1,14 @@ -from pylab import figure, nx, show +from matplotlib.pyplot import figure, show +from numpy import arange, pi, cos, sin, pi +from numpy.random import rand # unit area ellipse rx, ry = 3., 1. -area = rx * ry * nx.pi -theta = nx.arange(0, 2*nx.pi+0.01, 0.1) -verts = zip(rx/area*nx.cos(theta), ry/area*nx.sin(theta)) +area = rx * ry * pi +theta = arange(0, 2*pi+0.01, 0.1) +verts = zip(rx/area*cos(theta), ry/area*sin(theta)) -x,y,s,c = nx.mlab.rand(4, 30) +x,y,s,c = rand(4, 30) s*= 10**2. fig = figure() Modified: trunk/matplotlib/examples/scatter_star_poly.py =================================================================== --- trunk/matplotlib/examples/scatter_star_poly.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/scatter_star_poly.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,7 +1,7 @@ import pylab -x = pylab.nx.mlab.rand(10) -y = pylab.nx.mlab.rand(10) +x = pylab.rand(10) +y = pylab.rand(10) pylab.subplot(321) pylab.scatter(x,y,s=80,marker=">") Modified: trunk/matplotlib/examples/spy_demos.py =================================================================== --- trunk/matplotlib/examples/spy_demos.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/spy_demos.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -2,7 +2,8 @@ Plot the sparsity pattern of arrays """ -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show +import numpy fig = figure() ax1 = fig.add_subplot(221) @@ -10,7 +11,7 @@ ax3 = fig.add_subplot(223) ax4 = fig.add_subplot(224) -x = nx.mlab.randn(20,20) +x = numpy.random.randn(20,20) x[5] = 0. x[:,12] = 0. Modified: trunk/matplotlib/examples/xcorr_demo.py =================================================================== --- trunk/matplotlib/examples/xcorr_demo.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/xcorr_demo.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -1,6 +1,7 @@ -from pylab import figure, show, nx +from matplotlib.pylab import figure, show +import numpy -x,y = nx.mlab.randn(2,100) +x,y = numpy.random.randn(2,100) fig = figure() ax1 = fig.add_subplot(211) ax1.xcorr(x, y, usevlines=True, maxlags=50, normed=True) @@ -13,3 +14,4 @@ ax2.axhline(0, color='black', lw=2) show() + Modified: trunk/matplotlib/examples/zoom_window.py =================================================================== --- trunk/matplotlib/examples/zoom_window.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/examples/zoom_window.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -9,15 +9,17 @@ Note the diameter of the circles in the scatter are defined in points**2, so their size is independent of the zoom """ -from pylab import figure, show, nx +from matplotlib.pyplot import figure, show +import numpy figsrc = figure() figzoom = figure() axsrc = figsrc.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False) -axzoom = figzoom.add_subplot(111, xlim=(0.45,0.55), ylim=(0.4,.6), autoscale_on=False) +axzoom = figzoom.add_subplot(111, xlim=(0.45,0.55), ylim=(0.4,.6), + autoscale_on=False) axsrc.set_title('Click to zoom') axzoom.set_title('zoom window') -x,y,s,c = nx.mlab.rand(4,200) +x,y,s,c = numpy.random.rand(4,200) s *= 200 Modified: trunk/matplotlib/lib/matplotlib/pylab.py =================================================================== --- trunk/matplotlib/lib/matplotlib/pylab.py 2007-11-09 19:23:42 UTC (rev 4190) +++ trunk/matplotlib/lib/matplotlib/pylab.py 2007-11-09 19:37:15 UTC (rev 4191) @@ -202,8 +202,13 @@ from cbook import flatten, is_string_like, exception_to_str, popd, \ silent_list, iterable, enumerate, dedent -import matplotlib.numerix as nx import numpy as npy +# The masked array namespace is brought in as ma; getting +# this from numerix allows one to select either numpy.ma or +# Pierre G-M's maskedarray implementation, which may +# replace the present numpy.ma implementation in a future +# numpy release. +from matplotlib.numerix import npyma as ma from matplotlib import mpl # pulls in most modules This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |