|
From: <fer...@us...> - 2008-10-19 08:35:19
|
Revision: 6271
http://matplotlib.svn.sourceforge.net/matplotlib/?rev=6271&view=rev
Author: fer_perez
Date: 2008-10-19 08:30:58 +0000 (Sun, 19 Oct 2008)
Log Message:
-----------
Update more examples. Also removed some figures that can be
auto-generated and don't need to clutter the repo.
Added Paths:
-----------
trunk/py4science/examples/iterators.py
trunk/py4science/examples/iterators_example.py
trunk/py4science/examples/numpy_wrap/f2py/example3/test_example.py
trunk/py4science/examples/numpy_wrap/f2py/example3/test_fib.py
trunk/py4science/examples/numpytemps.py
trunk/py4science/examples/recarray/
trunk/py4science/examples/recarray/recarr_simple.py
trunk/py4science/examples/recarray/recarr_simple_data.txt
trunk/py4science/examples/soln/
trunk/py4science/examples/txt_data_load.py
trunk/py4science/examples/txt_data_sample.txt
Removed Paths:
-------------
trunk/py4science/examples/numpy-blitz_1000.png
trunk/py4science/examples/numpy-blitz_300.png
trunk/py4science/examples/numpy-blitz_500.png
trunk/py4science/examples/weave_blitz_comp.png
Added: trunk/py4science/examples/iterators.py
===================================================================
--- trunk/py4science/examples/iterators.py (rev 0)
+++ trunk/py4science/examples/iterators.py 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,82 @@
+import numpy as np
+from scipy.weave import inline
+from numpy.testing import assert_array_almost_equal, assert_almost_equal
+
+def prodsum(a, b, axis=None):
+ assert a.shape == b.shape, "cannot take prodsum of different size arrays"
+ nd = len(a.shape)
+ if axis is not None:
+ caxis = axis if axis >=0 else nd + axis
+ assert caxis < nd, "cannot perform operation in this axis: %d"%axis
+ dims = list(a.shape)
+ dims.pop(axis)
+ c = np.zeros(tuple(dims), np.float64)
+ else:
+ caxis = -1
+ c = np.array([0.0])
+
+ xtra = \
+"""
+double prodsum(double *d1, double *d2, int stride, int size)
+{
+ double sum = 0.0;
+
+ while(size--) {
+ sum += (*d1) * (*d2);
+ d1 += stride;
+ d2 += stride;
+ }
+ return sum;
+}
+"""
+
+ code = \
+"""
+double *d1, *d2, *d3;
+int sumall = caxis < 0 ? 1 : 0;
+PyArrayIterObject *itr1, *itr2, *itr3;
+
+itr1 = (PyArrayIterObject *) PyArray_IterAllButAxis(py_a, &caxis);
+itr2 = (PyArrayIterObject *) PyArray_IterAllButAxis(py_b, &caxis);
+if (!sumall) itr3 = (PyArrayIterObject *) PyArray_IterNew(py_c);
+
+//...... more definitions here
+
+// make use of auto defined arrays, be careful to use "axis" AFTER
+// creating iterators, in case it gets chosen for you
+int stride = Sa[caxis]/sizeof(double);
+int size = Na[caxis];
+while( PyArray_ITER_NOTDONE(itr1) ) {
+
+ //...... iter loop here
+ d1 = (double *) itr1->dataptr;
+ d2 = (double *) itr2->dataptr;
+ if(sumall) {
+ d3 = c;
+ } else {
+ d3 = (double *) itr3->dataptr;
+ PyArray_ITER_NEXT(itr3);
+ }
+ *d3 += prodsum(d1, d2, stride, size);
+ PyArray_ITER_NEXT(itr1);
+ PyArray_ITER_NEXT(itr2);
+}
+"""
+ inline(code, ['a', 'b', 'c', 'caxis'], compiler='gcc',
+ support_code=xtra)
+ return c[0] if axis is None else c
+
+
+def tests():
+ a = np.random.rand(4,2,9)
+ b = np.ones_like(a)
+
+ assert_almost_equal(prodsum(a,b), a.sum())
+ assert_array_almost_equal(prodsum(a,b,axis=-1), a.sum(axis=-1))
+ assert_array_almost_equal(prodsum(a[:2,:,1::2], b[:2,:,1::2], axis=0),
+ a[:2,:,1::2].sum(axis=0))
+ assert_array_almost_equal(prodsum(a[:,:,::-1], b[:,:,::-1], axis=-1),
+ a[:,:,::-1].sum(axis=-1))
+
+if __name__ == '__main__':
+ tests()
Added: trunk/py4science/examples/iterators_example.py
===================================================================
--- trunk/py4science/examples/iterators_example.py (rev 0)
+++ trunk/py4science/examples/iterators_example.py 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,229 @@
+#!/usr/bin/env python
+"""Blitz conversion is terrific, but sometimes you don't have fixed array sizes
+in your problem. Fortunately numpy iterators still make writing inline weave
+code very, very simple."""
+
+import sys
+
+import numpy as np
+from numpy.testing.utils import jiffies
+
+from matplotlib import pyplot as plt
+
+from scipy.weave import inline, converters, blitz
+
+# A little timing utility taken from the old scipy.testing
+def measure(code_str,times=1):
+ """ Return elapsed time for executing code_str in the
+ namespace of the caller for given times.
+ """
+ frame = sys._getframe(1)
+ locs,globs = frame.f_locals,frame.f_globals
+ code = compile(code_str,'<Timing code>','exec')
+ i = 0
+ elapsed = jiffies()
+ while i<times:
+ i += 1
+ exec code in globs,locs
+ elapsed = jiffies() - elapsed
+ return 0.01*elapsed
+
+
+def multi_iter_example():
+ # This is a very simple example of multi dimensional iterators, and
+ # their power to "broadcast" arrays of compatible shapes. It shows that
+ # the very same code that is entirely ignorant of dimensionality can
+ # achieve completely different computations based on the rules of
+ # broadcasting.
+
+ # it is important to know that the weave array conversion of "a"
+ # gives you access in C++ to:
+ # py_a -- PyObject *
+ # a_array -- PyArrayObject *
+ # a -- py_array->data cast to the proper data type
+
+ a = np.ones((4,4), np.float64)
+ # for the sake of driving home the "dynamic code" approach...
+ dtype2ctype = {
+ np.dtype(np.float64): 'double',
+ np.dtype(np.float32): 'float',
+ np.dtype(np.int32): 'int',
+ np.dtype(np.int16): 'short',
+ }
+ dt = dtype2ctype.get(a.dtype)
+
+ # this code does a = a*b inplace, broadcasting b to fit the shape of a
+ code = \
+"""
+%s *p1, *p2;
+PyObject *itr;
+itr = PyArray_MultiIterNew(2, a_array, b_array);
+while(PyArray_MultiIter_NOTDONE(itr)) {
+ p1 = (%s *) PyArray_MultiIter_DATA(itr, 0);
+ p2 = (%s *) PyArray_MultiIter_DATA(itr, 1);
+ *p1 = (*p1) * (*p2);
+ PyArray_MultiIter_NEXT(itr);
+}
+""" % (dt, dt, dt)
+
+ b = np.arange(4, dtype=a.dtype)
+ print '\n A B '
+ print a, b
+ # this reshaping is redundant, it would be the default broadcast
+ b.shape = (1,4)
+ inline(code, ['a', 'b'])
+ print "\ninline version of a*b,"
+ print a
+ a = np.ones((4,4), np.float64)
+ b.shape = (4,1)
+ inline(code, ['a', 'b'])
+ print "\ninline version of a*b[:,None],"
+ print a
+
+def data_casting_test():
+ # In my MR application, raw data is stored as a file with one or more
+ # (block-hdr, block-data) pairs. Block data is one or more
+ # rows of Npt complex samples in big-endian integer pairs (real, imag).
+ #
+ # At the block level, I encounter three different raw data layouts--
+ # 1) one plane, or slice: Y rows by 2*Npt samples
+ # 2) one volume: Z slices * Y rows by 2*Npt samples
+ # 3) one row sliced across the z-axis: Z slices by 2*Npt samples
+ #
+ # The task is to tease out one volume at a time from any given layout,
+ # and cast the integer precision data into a complex64 array.
+ # Given that contiguity is not guaranteed, and the number of dimensions
+ # can vary, Numpy iterators are useful to provide a single code that can
+ # carry out the conversion.
+ #
+ # Other solutions include:
+ # 1) working entirely with the string data from file.read() with string
+ # manipulations (simulated below).
+ # 2) letting numpy handle automatic byteorder/dtype conversion
+
+ nsl, nline, npt = (20,64,64)
+ hdr_dt = np.dtype('>V28')
+ # example 1: a block is one slice of complex samples in short integer pairs
+ blk_dt1 = np.dtype(('>i2', nline*npt*2))
+ dat_dt = np.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt1]})
+ # create an empty volume-- nsl contiguous blocks
+ vol = np.empty((nsl,), dat_dt)
+ t = time_casting(vol[:]['data'])
+ plt.plot(100*t/t.max(), 'b--', label='vol=20 contiguous blocks')
+ plt.plot(100*t/t.max(), 'bo')
+ # example 2: a block is one entire volume
+ blk_dt2 = np.dtype(('>i2', nsl*nline*npt*2))
+ dat_dt = np.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt2]})
+ # create an empty volume-- 1 block
+ vol = np.empty((1,), dat_dt)
+ t = time_casting(vol[0]['data'])
+ plt.plot(100*t/t.max(), 'g--', label='vol=1 contiguous block')
+ plt.plot(100*t/t.max(), 'go')
+ # example 3: a block slices across the z dimension, long integer precision
+ # ALSO--a given volume is sliced discontiguously
+ blk_dt3 = np.dtype(('>i4', nsl*npt*2))
+ dat_dt = np.dtype({'names': ['hdr', 'data'], 'formats': [hdr_dt, blk_dt3]})
+ # a real data set has volumes interleaved, so create two volumes here
+ vols = np.empty((2*nline,), dat_dt)
+ # and work on casting the first volume
+ t = time_casting(vols[0::2]['data'])
+ plt.plot(100*t/t.max(), 'r--', label='vol=64 discontiguous blocks')
+ plt.plot(100*t/t.max(), 'ro')
+ plt.xticks([0,1,2], ('strings', 'numpy auto', 'inline'))
+ plt.gca().set_xlim((-0.25, 2.25))
+ plt.gca().set_ylim((0, 110))
+ plt.gca().set_ylabel(r"% of slowest time")
+ plt.legend(loc=8)
+ plt.title('Casting raw file data to an MR volume')
+ plt.show()
+
+
+def time_casting(int_data):
+ nblk = 1 if len(int_data.shape) < 2 else int_data.shape[0]
+ bias = (np.random.rand(nblk) + \
+ 1j*np.random.rand(nblk)).astype(np.complex64)
+ dstr = int_data.tostring()
+ dt = np.int16 if int_data.dtype.itemsize == 2 else np.int32
+ fshape = list(int_data.shape)
+ fshape[-1] = fshape[-1]/2
+ float_data = np.empty(fshape, np.complex64)
+ # method 1: string conversion
+ float_data.shape = (np.product(fshape),)
+ tstr = measure("float_data[:] = complex_fromstring(dstr, dt)", times=25)
+ float_data.shape = fshape
+ print "to-/from- string: ", tstr, "shape=",float_data.shape
+
+ # method 2: numpy dtype magic
+ sl = [None, slice(None)] if len(fshape)<2 else [slice(None)]*len(fshape)
+ # need to loop since int_data need not be contiguous
+ tnpy = measure("""
+for fline, iline, b in zip(float_data[sl], int_data[sl], bias):
+ cast_to_complex_npy(fline, iline, bias=b)""", times=25)
+ print"numpy automagic: ", tnpy
+
+ # method 3: plain inline brute force!
+ twv = measure("cast_to_complex(float_data, int_data, bias=bias)",
+ times=25)
+ print"inline casting: ", twv
+ return np.array([tstr, tnpy, twv], np.float64)
+
+def complex_fromstring(data, numtype):
+ if sys.byteorder == "little":
+ return np.fromstring(
+ np.fromstring(data,numtype).byteswap().astype(np.float32).tostring(),
+ np.complex64)
+ else:
+ return np.fromstring(
+ np.fromstring(data,numtype).astype(np.float32).tostring(),
+ np.complex64)
+
+def cast_to_complex(cplx_float, cplx_integer, bias=None):
+ if cplx_integer.dtype.itemsize == 4:
+ replacements = tuple(["l", "long", "SWAPLONG", "l"]*2)
+ else:
+ replacements = tuple(["s", "short", "SWAPSHORT", "s"]*2)
+ if sys.byteorder == "big":
+ replacements[-2] = replacements[-6] = "NOP"
+
+ cast_code = """
+ #define SWAPSHORT(x) ((short) ((x >> 8) | (x << 8)) )
+ #define SWAPLONG(x) ((long) ((x >> 24) | (x << 24) | ((x & 0x00ff0000) >> 8) | ((x & 0x0000ff00) << 8)) )
+ #define NOP(x) x
+
+ unsigned short *s;
+ unsigned long *l;
+ float repart, impart;
+ PyObject *itr;
+ itr = PyArray_IterNew(py_cplx_integer);
+ while(PyArray_ITER_NOTDONE(itr)) {
+
+ // get real part
+ %s = (unsigned %s *) PyArray_ITER_DATA(itr);
+ repart = %s(*%s);
+ PyArray_ITER_NEXT(itr);
+ // get imag part
+ %s = (unsigned %s *) PyArray_ITER_DATA(itr);
+ impart = %s(*%s);
+ PyArray_ITER_NEXT(itr);
+ *(cplx_float++) = std::complex<float>(repart, impart);
+
+ }
+ """ % replacements
+
+ inline(cast_code, ['cplx_float', 'cplx_integer'])
+ if bias is not None:
+ if len(cplx_float.shape) > 1:
+ bsl = [slice(None)]*(len(cplx_float.shape)-1) + [None]
+ else:
+ bsl = slice(None)
+ np.subtract(cplx_float, bias[bsl], cplx_float)
+
+def cast_to_complex_npy(cplx_float, cplx_integer, bias=None):
+ cplx_float.real[:] = cplx_integer[0::2]
+ cplx_float.imag[:] = cplx_integer[1::2]
+ if bias is not None:
+ np.subtract(cplx_float, bias, cplx_float)
+
+if __name__=="__main__":
+ data_casting_test()
+ multi_iter_example()
Property changes on: trunk/py4science/examples/iterators_example.py
___________________________________________________________________
Added: svn:executable
+ *
Deleted: trunk/py4science/examples/numpy-blitz_1000.png
===================================================================
(Binary files differ)
Deleted: trunk/py4science/examples/numpy-blitz_300.png
===================================================================
(Binary files differ)
Deleted: trunk/py4science/examples/numpy-blitz_500.png
===================================================================
(Binary files differ)
Added: trunk/py4science/examples/numpy_wrap/f2py/example3/test_example.py
===================================================================
--- trunk/py4science/examples/numpy_wrap/f2py/example3/test_example.py (rev 0)
+++ trunk/py4science/examples/numpy_wrap/f2py/example3/test_example.py 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,9 @@
+import example
+import numpy
+
+x = numpy.arange(10.)
+
+yf = example.cumsum(x)
+yn = numpy.cumsum(x)
+
+numpy.testing.assert_array_almost_equal(yf, yn)
Added: trunk/py4science/examples/numpy_wrap/f2py/example3/test_fib.py
===================================================================
--- trunk/py4science/examples/numpy_wrap/f2py/example3/test_fib.py (rev 0)
+++ trunk/py4science/examples/numpy_wrap/f2py/example3/test_fib.py 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,13 @@
+import numpy as N
+
+import example
+
+n = 10
+fn = example.fib(n)
+
+print 'Fibonacci numbers:'
+print fn
+
+# Check validity
+assert N.alltrue(fn[2:]== fn[1:-1]+fn[:-2]), "Fibonacci mismatch"
+
Added: trunk/py4science/examples/numpytemps.py
===================================================================
--- trunk/py4science/examples/numpytemps.py (rev 0)
+++ trunk/py4science/examples/numpytemps.py 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+"""Demonstration of temporaries in Numpy.
+"""
+
+import numpy as np
+import numpy.testing as nptest
+
+import nose
+
+# convenience global names
+from numpy import (pi, sin, cos, add, subtract, multiply, power)
+
+def test1():
+ """Verify an expression using temporaries.
+ """
+ x = np.linspace(0,2*pi,100)
+
+ # We compute a simple mathematical expression using algebra and functions
+ # of x. This uses a lot of temporaries that are implicitly created. In
+ # total, the variable count is:
+ # sin(x): 1
+ # sin(2*x): 2
+ # 4.5*cos(3*x**2): 4
+ # The final temporaries for each term are added and the result stored as y,
+ # which is also created. So we have 1 array for the result and 7 temps.
+ y = sin(x) + sin(2*x) - 4.5*cos(3*x**2)
+
+ # Now we do it again, but here, we control the temporary creation
+ # ourselves. We use the output argument of all numpy functional forms of
+ # the operators.
+
+ # Initialize the array that will hold the output, empty
+ z = np.empty_like(x)
+ # This version in total uses 1 array for the result and one temporary.
+ tmp = np.empty_like(x)
+
+ # Now, we compute each term of the expression above. Each time, we either
+ # store the output back into the temporary or we accumulate it in z.
+
+ # sin(x)
+ sin(x,z)
+
+ # + sin(2*x)
+ add(z,sin(multiply(2,x,tmp),tmp),z)
+
+ # - 4.5*cos(3*x**2)
+ power(x,2,tmp)
+ multiply(3,tmp,tmp)
+ cos(tmp,tmp)
+ multiply(4.5,tmp,tmp)
+ subtract(z,tmp,z)
+
+ # Verify that the two forms match to 13 digits
+ nptest.assert_almost_equal(y,z,13)
+
+
+def test2():
+ """Compute the same expression, using in-place operations
+ """
+ x = np.linspace(0,2*pi,100)
+
+ y = sin(x) + sin(2*x) - 4.5*cos(3*x**2)
+
+ # This version of the code uses more in-place operators, which make it a
+ # bit more readable and still avoid temporaries
+ tmp = np.empty_like(x)
+
+ # sin(x)
+ z = sin(x)
+
+ # + sin(2*x)
+ z += sin(multiply(2,x,tmp),tmp)
+
+ # - 4.5*cos(3*x**2)
+ power(x,2,tmp)
+ tmp *= 3
+ cos(tmp,tmp)
+ tmp *= 4.5
+ z -= tmp
+
+ # Verify that the two forms match to 13 digits
+ nptest.assert_almost_equal(y,z,13)
+
+if __name__ == '__main__':
+ nose.runmodule(exit=False)
Property changes on: trunk/py4science/examples/numpytemps.py
___________________________________________________________________
Added: svn:executable
+ *
Added: trunk/py4science/examples/recarray/recarr_simple.py
===================================================================
--- trunk/py4science/examples/recarray/recarr_simple.py (rev 0)
+++ trunk/py4science/examples/recarray/recarr_simple.py 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,43 @@
+"""Utility module to load measurements into Numpy record arrays.
+
+Loading measurement files with the format:
+
+#Station Lat Long Elev
+BIRA 26.4840 87.2670 0.0120
+BUNG 27.8771 85.8909 1.1910
+etc...
+"""
+
+import numpy as np
+import pylab as plt
+
+# Simple example of usage
+
+# Data descriptor to make a proper array.
+dt = [('station','S4'),('lat',np.float32),('lon',np.float32),
+ ('elev',np.float32)]
+# This is an alternate and fully equivalent form:
+dt = dict(names = ('station','lat','lon','elev'),
+ formats = ('S4',np.float32,np.float32,np.float32) )
+
+# For more on dtypes, see:
+# http://mentat.za.net/numpy/refguide/arrays.recarray.xhtml
+import math
+
+def tlog(s):
+ return np.float32(math.log(float(s)))
+
+tab = np.loadtxt('HIMNTstations2.txt',dt,
+ converters={1:tlog})
+
+print 'Stations:',tab['station']
+print 'Elevations:',tab['elev']
+print 'First station:',tab[0]
+print 'Mean latitude:',tab['lat'].mean()
+
+plt.figure()
+plt.scatter(tab['lat'],tab['lon'],30*tab['elev'],
+ c=tab['elev'],
+ cmap=plt.cm.bone,
+ )
+plt.show()
Added: trunk/py4science/examples/recarray/recarr_simple_data.txt
===================================================================
--- trunk/py4science/examples/recarray/recarr_simple_data.txt (rev 0)
+++ trunk/py4science/examples/recarray/recarr_simple_data.txt 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,29 @@
+BIRA 26.4840 87.2670 0.0120
+BUNG 27.8771 85.8909 1.1910
+GAIG 26.8380 86.6318 0.1660
+HILE 27.0482 87.3242 2.0880
+ILAM 26.9102 87.9227 1.1810
+JIRI 27.6342 86.2303 1.8660
+NAMC 27.8027 86.7146 3.5230
+PHAP 27.5150 86.5842 2.4880
+PHID 27.1501 87.7645 1.1760
+RUMJ 27.3038 86.5482 1.3190
+SIND 27.2107 85.9088 0.4650
+THAK 27.5996 85.5566 1.5510
+TUML 27.3208 87.1950 0.3600
+LAZE 29.1403 87.5922 4.0110
+SAJA 28.9093 88.0209 4.3510
+ONRN 29.3020 87.2440 4.3500
+SSAN 29.4238 86.7290 4.5850
+SAGA 29.3292 85.2321 4.5240
+DINX 28.6646 87.1157 4.3740
+RBSH 28.1955 86.8280 5.1000
+NAIL 28.6597 86.4126 4.3780
+MNBU 28.7558 86.1610 4.5000
+NLMU 28.1548 85.9777 3.8890
+YALA 28.4043 86.1133 4.4340
+XIXI 28.7409 85.6904 4.6600
+RC14 29.4972 86.4373 4.7560
+MAZA 28.6713 87.8553 4.3670
+JANA 26.7106 85.9242 0.0770
+SUKT 27.7057 85.7611 0.7450
Added: trunk/py4science/examples/txt_data_load.py
===================================================================
--- trunk/py4science/examples/txt_data_load.py (rev 0)
+++ trunk/py4science/examples/txt_data_load.py 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,98 @@
+#!/usr/bin/env python
+"""Read files from Maribeth's format into arrays."""
+
+# Needed modules from the standard library
+import re
+import sys
+
+# Third party modules
+import numpy as N
+
+# Code begins
+def mwread(fname):
+ """Read mw file and return dict with arrays.
+
+ The input data is assumed to be in a file whose format is:
+
+ r:
+ 0.250029, 0.249549, 0.25019, 0.250232
+
+ A:
+ 0.399973, 0.199979, 0.200005, 0.200014
+ 0.199992, 0.400235, 0.200033, 0.200102
+
+ B:
+ 0.428502, 0.142868, 0.142897, 0.142838
+ 0.142884, 0.57165, 0.143053, 0.285911
+
+ The output is a dict whose keys are the letter labels ('r','A','B', etc)
+ and whose values are one-dimensional NumPy arrays with the numbers, in
+ double precision.
+
+ :Parameters:
+ fname : string
+ Name of the input file."""
+
+ fobj = open(fname)
+
+ # Regular expression to match array labels
+ label_re = re.compile('^([a-zA-Z])+:')
+
+ # Initialize output dict
+ dct = {}
+ # Start the state machine in 'scan' mode and switch to data reading mode
+ # ('read') whenever we find what looks like a label.
+ mode = 'scan'
+ for line in fobj:
+ if mode == 'scan':
+ match = label_re.match(line)
+ if match:
+ # Switch modes
+ mode = 'read'
+ # Prepare state for read mode
+ name = match.group(1)
+ data = []
+ elif mode == 'read':
+ if line.isspace():
+ # Pure whitespace lines force a mode switch back to
+ # scanning for variables
+ mode = 'scan'
+ # Store the data that we'd been accumulating for the
+ # current array
+ dct[name] = N.array(data,float)
+ else:
+ # Read data, assume line contains comma-separated strings
+ # of numbers
+ data.extend([float(n) for n in line.split(',')])
+
+ # Cleanup before exiting
+ fobj.close()
+
+ return dct
+
+
+# If run as a script
+if __name__ == '__main__':
+ # This allows calling it from the command line with the name of the file to
+ # read as an argument
+ try:
+ fname = sys.argv[1]
+ except IndexError:
+ print 'First argument must be filename to read'
+ sys.exit(1)
+
+ data = mwread(fname)
+ print 'Data dict:'
+ for k,val in data.iteritems():
+ print '%s:' % k
+ print val
+ print
+
+ # Now, load the names from the data dict as top-level variables (use
+ # specially named counters just in case the file declares something common
+ # like 'k'):
+ for _k,_val in data.iteritems():
+ exec '%s = _val' % _k
+
+ print "Now, you can use either the top-level dict 'data', or the variables:"
+ print data.keys()
Property changes on: trunk/py4science/examples/txt_data_load.py
___________________________________________________________________
Added: svn:executable
+ *
Added: trunk/py4science/examples/txt_data_sample.txt
===================================================================
--- trunk/py4science/examples/txt_data_sample.txt (rev 0)
+++ trunk/py4science/examples/txt_data_sample.txt 2008-10-19 08:30:58 UTC (rev 6271)
@@ -0,0 +1,21 @@
+r:
+ 0.250029, 0.249549, 0.25019, 0.250232
+
+A:
+ 0.399973, 0.199979, 0.200005, 0.200014
+ 0.199992, 0.400235, 0.200033, 0.200102
+ 0.2, 0.199919, 0.399979, 0.199954
+ 0.200035, 0.199867, 0.199984, 0.39993
+
+B:
+ 0.428502, 0.142868, 0.142897, 0.142838
+ 0.142884, 0.57165, 0.143053, 0.285911
+ 0.285747, 0.142757, 0.71391, 0.142854
+ 0.142867, 0.142725, 0.000140224, 0.428397
+
+C:
+ 0.666707, 0.166664, 0.166626, 0.16662
+ 0.166638, 0.500328, 0.166752, 0.166883
+ 0.166654, 0.166609, 0.333309, 0.166638
+1.49295e-07, 0.166399, 0.333314, 0.499859
+
Deleted: trunk/py4science/examples/weave_blitz_comp.png
===================================================================
(Binary files differ)
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|