From: <fer...@us...> - 2008-10-19 07:49:08
|
Revision: 6269 http://matplotlib.svn.sourceforge.net/matplotlib/?rev=6269&view=rev Author: fer_perez Date: 2008-10-19 07:48:51 +0000 (Sun, 19 Oct 2008) Log Message: ----------- Updated skeletons. Modified Paths: -------------- trunk/py4science/examples/skel/basemap1_skel.py trunk/py4science/examples/skel/basemap2_skel.py trunk/py4science/examples/skel/basemap3_skel.py trunk/py4science/examples/skel/basemap4_skel.py trunk/py4science/examples/skel/basemap5_skel.py trunk/py4science/examples/skel/convolution_demo_skel.py trunk/py4science/examples/skel/fft_imdenoise_skel.py trunk/py4science/examples/skel/fitting_skel.py trunk/py4science/examples/skel/glass_dots1_skel.py trunk/py4science/examples/skel/lotka_volterra_skel.py trunk/py4science/examples/skel/noisy_sine_skel.py trunk/py4science/examples/skel/qsort_skel.py trunk/py4science/examples/skel/quad_newton_skel.py trunk/py4science/examples/skel/stats_descriptives_skel.py trunk/py4science/examples/skel/stats_distributions_skel.py trunk/py4science/examples/skel/stock_records_skel.py trunk/py4science/examples/skel/trapezoid_skel.py trunk/py4science/examples/skel/wallis_pi_skel.py trunk/py4science/examples/skel/wordfreqs_skel.py Modified: trunk/py4science/examples/skel/basemap1_skel.py =================================================================== --- trunk/py4science/examples/skel/basemap1_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/basemap1_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,13 +1,11 @@ -import pylab, numpy -from matplotlib.toolkits.basemap import Basemap +from mpl_toolkits.basemap import Basemap +import matplotlib.pyplot as plt # create map by specifying lat/lon values at corners. -projection = 'lcc' # map projection -resolution = XX # resolution of boundaries ('c','l','i',or 'h') -lon_0=XX # longitude of origin of map projection domain (degrees). -lat_0=XX # standard parallel/latitude of origin of map projection domain. -llcrnrlat, llcrnrlon = XX, XX # lat/lon of lower left corner of map (degrees) -urcrnrlat, urcrnrlon = XX, XX # lat/lon of upper right corner of map -m = Basemap(lon_0=lon_0,lat_0=lat_0,\ +resolution = 'l'; projection = 'lcc' +lat_0 = 60; lon_0 = -50 +llcrnrlat, llcrnrlon = 8, -92 +urcrnrlat, urcrnrlon = 39, 63 +m = Basemap(lat_0=lat_0,lon_0=lon_0,\ llcrnrlat=llcrnrlat,llcrnrlon=llcrnrlon,\ urcrnrlat=urcrnrlat,urcrnrlon=urcrnrlon,\ resolution=resolution,projection=projection) @@ -20,5 +18,5 @@ # draw states and countries. m.drawcountries() m.drawstates() -pylab.title('map region specified using corner lat/lon values') -pylab.show() +plt.title('map region specified using corner lat/lon values') +plt.show() Modified: trunk/py4science/examples/skel/basemap2_skel.py =================================================================== --- trunk/py4science/examples/skel/basemap2_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/basemap2_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,13 +1,9 @@ -import pylab, numpy -from matplotlib.toolkits.basemap import Basemap, supported_projections +from mpl_toolkits.basemap import Basemap +import matplotlib.pyplot as plt # create map by specifying width and height in km. -projection = XX # map projection ('lcc','stere','laea','aea' etc) - # 'print supported_projections' gives a list -resolution = XX # resolution of boundaries ('c','l','i',or 'h') -lon_0= XX # longitude of origin of map projection domain (degrees). -lat_0= XX # standard parallel/latitude of origin of map projection domain. -width = XX # width of map projecton domain in km. -height = XX # height of map projection domain in km. +resolution = 'l'; projection = 'lcc' +lon_0 = -50; lat_0 = 60 +width = 12000000; height = 0.75*width m = Basemap(lon_0=lon_0,lat_0=lat_0,\ width=width,height=height,\ resolution=resolution,projection=projection) @@ -16,5 +12,5 @@ m.fillcontinents(color='coral',lake_color='aqua') m.drawcountries() m.drawstates() -pylab.title('map region specified using width and height') -pylab.show() +plt.title('map region specified using width and height') +plt.show() Modified: trunk/py4science/examples/skel/basemap3_skel.py =================================================================== --- trunk/py4science/examples/skel/basemap3_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/basemap3_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,34 +1,36 @@ -import pylab, numpy -from matplotlib.toolkits.basemap import Basemap +from mpl_toolkits.basemap import Basemap +import matplotlib.pyplot as plt # create map by specifying width and height in km. -resolution = 'l'; projection='lcc' -lon_0 = -50; lat_0 = 60. +resolution = 'l'; projection = 'lcc' +lon_0 = -50; lat_0 = 60 width = 12000000; height = 0.75*width -m = Basemap(lon_0=lon_0,lat_0=lat_0,width=width,height=height,\ +m = Basemap(lon_0=lon_0,lat_0=lat_0,width=width,height=height, resolution=resolution,projection=projection) -# lat/lon and name of location 1. -lat1 = XX; lon1 = XX; name = XX -# ditto for location 2. -lat2 = XX; lon2 = XX; name2 = XX +# nylat, nylon are lat/lon of New York +nylat = 40.78 +nylon = -73.98 +# lonlat, lonlon are lat/lon of London. +lonlat = 51.53 +lonlon = 0.08 # convert these points to map projection coordinates # (using __call__ method of Basemap instance) -x1, y1 = m(lon1, lat1) -x2, y2 = m(lon2, lat2) +ny_x, ny_y = m(nylon, nylat) +lon_x, lon_y = m(lonlon, lonlat) # plot black dots at the two points. # make sure dots are drawn on top of other plot elements (zorder=10) -m.scatter([x1,x2],[y1,y2],25,color='k',marker='o',zorder=10) +m.scatter([ny_x,lon_x],[ny_y,lon_y],25,color='k',marker='o',zorder=10) # connect the dots along a great circle. -m.drawgreatcircle(lon1,lat1,lon2,lat2,linewidth=2,color='k') +m.drawgreatcircle(nylon,nylat,lonlon,lonlat,linewidth=2,color='k') # put the names of the cities to the left of each dot, offset # by a little. Use a bold font. -pylab.text(x1-100000,y1+100000,name1,fontsize=12,\ +plt.text(ny_x-100000,ny_y+100000,'New York',fontsize=12,\ color='k',horizontalalignment='right',fontweight='bold') -pylab.text(x2-100000,y2+100000,name2,fontsize=12,\ +plt.text(lon_x-100000,lon_y+100000,'London',fontsize=12,\ color='k',horizontalalignment='right',fontweight='bold') m.drawcoastlines(linewidth=0.5) m.drawmapboundary(fill_color='aqua') m.fillcontinents(color='coral',lake_color='aqua') m.drawcountries() m.drawstates() -pylab.title(name1+' to '+name2+' Great Circle') -pylab.show() +plt.title('NY to London Great Circle') +plt.show() Modified: trunk/py4science/examples/skel/basemap4_skel.py =================================================================== --- trunk/py4science/examples/skel/basemap4_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/basemap4_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,28 +1,25 @@ -import pylab, numpy -from matplotlib.toolkits.basemap import Basemap +from mpl_toolkits.basemap import Basemap +import matplotlib.pyplot as plt +import numpy as np # create map by specifying width and height in km. -resolution = 'l'; projection='lcc' -lon_0 = -50; lat_0 = 60. -width = 12000000; height = 0.75*width -m = Basemap(lon_0=lon_0,lat_0=lat_0,width=width,height=height,\ +resolution = 'l' +lon_0 = -50 +lat_0 = 60 +projection = 'lcc' +width = 12000000 +height = 0.75*width +m = Basemap(lon_0=lon_0,lat_0=lat_0,width=width,height=height, resolution=resolution,projection=projection) m.drawcoastlines(linewidth=0.5) m.drawmapboundary(fill_color='aqua') m.fillcontinents(color='coral',lake_color='aqua') m.drawcountries() m.drawstates() -# draw and label parallels. -# labels is list of 4 values (default [0,0,0,0]) that control whether -# parallels are labelled where they intersect the left, right, top or -# bottom of the plot. For example labels=[1,0,0,1] will cause parallels -# to be labelled where they intersect the left and bottom of the plot, -# but not the right and top. -labels = XX -parallels = XX # a sequence of latitudes values -m.drawparallels(parallels,labels=labels) -# draw and label meridians. -labels = XX -meridians = XX # a sequence of longitude values -m.drawmeridians(meridians,labels=labels) -pylab.title('labelled meridians and parallels',y=1.075) -pylab.show() +# label meridians where they intersect the left, right and bottom +# of the plot frame. +m.drawmeridians(np.arange(-180,181,20),labels=[1,1,0,1]) +# label parallels where they intersect the left, right and top +# of the plot frame. +m.drawparallels(np.arange(-80,81,20),labels=[1,1,1,0]) +plt.title('labelled meridians and parallels',y=1.075) +plt.show() Modified: trunk/py4science/examples/skel/basemap5_skel.py =================================================================== --- trunk/py4science/examples/skel/basemap5_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/basemap5_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,37 +1,27 @@ -from matplotlib.toolkits.basemap import Basemap, NetCDFFile, cm -import pylab, numpy +from mpl_toolkits.basemap import Basemap, NetCDFFile +import matplotlib.pyplot as plt +import numpy as np # read in netCDF sea-surface temperature data # can be a local file, a URL for a remote opendap dataset, # or (if PyNIO is installed) a GRIB or HDF file. -# See http://nomads.ncdc.noaa.gov/ for some NOAA OPenDAP datasets. ncfile = NetCDFFile('data/sst.nc') -# uncommenting the next line will produce a very similar plot, -# but will read the data over the web instead of from a local file. -#ncfile = NetCDFFile('http://nomads.ncdc.noaa.gov:8085/thredds/dodsC/oisst/2007/AVHRR/sst4-navy-eot.20071201.nc') sst = ncfile.variables['sst'][:] lats = ncfile.variables['lat'][:] lons = ncfile.variables['lon'][:] -# Basemap comes with extra colormaps from Generic Mapping Tools -# (imported as cm, pylab colormaps in pylab.cm) -cmap = XX # create Basemap instance for mollweide projection. -projection = XX # try moll, robin, sinu or ortho. # coastlines not used, so resolution set to None to skip # continent processing (this speeds things up a bit) -m = Basemap(projection=projection,lon_0=lons.mean(),lat_0=0,resolution=None) +m = Basemap(projection='moll',lon_0=0,lat_0=0,resolution=None) # compute map projection coordinates of grid. -x, y = m(*numpy.meshgrid(lons, lats)) +x, y = m(*np.meshgrid(lons, lats)) # plot with pcolor -im = m.pcolormesh(x,y,sst,shading='flat',cmap=cmap) -# or try 100 filled contours. -#CS = m.contourf(x,y,sst,100,cmap=cmap) +im = m.pcolormesh(x,y,sst,shading='flat',cmap=plt.cm.gist_ncar) # draw parallels and meridians, but don't bother labelling them. -m.drawparallels(numpy.arange(-90.,120.,30.)) -m.drawmeridians(numpy.arange(0.,420.,60.)) +m.drawparallels(np.arange(-90.,120.,30.)) +m.drawmeridians(np.arange(0.,420.,60.)) # draw line around map projection limb. -# color map region background (missing values will be this color) -color = XX -m.drawmapboundary(fill_color=color) +# color map region background black (missing values will be this color) +m.drawmapboundary(fill_color='k') # draw horizontal colorbar. -pylab.colorbar(orientation='horizontal') -pylab.show() +plt.colorbar(orientation='horizontal') +plt.show() Modified: trunk/py4science/examples/skel/convolution_demo_skel.py =================================================================== --- trunk/py4science/examples/skel/convolution_demo_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/convolution_demo_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -27,33 +27,50 @@ # build the time, input, output and response arrays dt = 0.01 -t = XXX # the time vector from 0..20 +t = npy.arange(0.0, 20.0, dt) # the time vector from 0..20 Nt = len(t) def impulse_response(t): 'double exponential response function' - return XXX + return (npy.exp(-t) - npy.exp(-5*t))*dt -x = XXX # gaussian white noise +x = npy.random.randn(Nt) # gaussian white noise # evaluate the impulse response function, and numerically convolve it # with the input x -r = XXX # evaluate the impulse function -y = XXX # convolution of x with r -y = XXX # extract just the length Nt part +r = impulse_response(t) # evaluate the impulse function +y = npy.convolve(x, r, mode='full') # convultion of x with r +y = y[:Nt] # compute y by applying F^-1[F(x) * F(r)]. The fft assumes the signal # is periodic, so to avoid edge artificats, pad the fft with zeros up # to the length of r + x do avoid circular convolution artifacts -R = XXX # the zero padded FFT of r -X = XXX # the zero padded FFT of x -Y = XXX # the product of R and X +R = npy.fft.fft(r, len(r)+len(x)-1) +X = npy.fft.fft(x, len(r)+len(x)-1) +Y = R*X -# now inverse fft and extract the real part, just the part up to -# len(x) -yi = XXX +# now inverse fft and extract just the part up to len(x) +yi = npy.fft.ifft(Y)[:len(x)].real -# plot x vs t, y and yi vs t, and r vs t in three subplots -XXX +# plot t vs x, t vs y and yi, and t vs r in three subplots +fig = figure() +ax1 = fig.add_subplot(311) +ax1.plot(t, x) +ax1.set_ylabel('input x') + +ax2 = fig.add_subplot(312) +ax2.plot(t, y, label='convolve') +ax2.set_ylabel('output y') + +ax3 = fig.add_subplot(313) +ax3.plot(t, r) +ax3.set_ylabel('input response') +ax3.set_xlabel('time (s)') + +ax2.plot(t, yi, label='fft') +ax2.legend(loc='best') + +fig.savefig('convolution_demo.png', dpi=150) +fig.savefig('convolution_demo.eps') show() Modified: trunk/py4science/examples/skel/fft_imdenoise_skel.py =================================================================== --- trunk/py4science/examples/skel/fft_imdenoise_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/fft_imdenoise_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,64 +1,84 @@ #!/usr/bin/env python -"""Image denoising example using 2-dimensional FFT.""" +"""Simple image denoising example using 2-dimensional FFT.""" -XXX = None # a sentinel for missing pieces +import sys import numpy as np from matplotlib import pyplot as plt -def mag_phase(F): - """Return magnitude and phase components of spectrum F.""" - - # XXX Look at the absolute and angle functions in numpy... - def plot_spectrum(F, amplify=1000): """Normalise, amplify and plot an amplitude spectrum.""" - M = XXX # use mag_phase to get the magnitude... + # Note: the problem here is that we have a spectrum whose histogram is + # *very* sharply peaked at small values. To get a meaningful display, a + # simple strategy to improve the display quality consists of simply + # amplifying the values in the array and then clipping. - # XXX Now, rescale M by amplify/maximum_of_M. Numpy arrays can be scaled - # in-place with ARR *= number. For the max of an array, look for its max - # method. + # Compute the magnitude of the input F (call it mag). Then, rescale mag by + # amplify/maximum_of_mag. Numpy arrays can be scaled in-place with ARR *= + # number. For the max of an array, look for its max method. + raise NotImplementedError('insert missing code here') + + # Next, clip all values larger than one to one. You can set all elements + # of an array which satisfy a given condition with array indexing syntax: + # ARR[ARR<VALUE] = NEWVALUE, for example. + raise NotImplementedError('insert missing code here') - # XXX Next, clip all values larger than one to one. You can set all - # elements of an array which satisfy a given condition with array indexing - # syntax: ARR[ARR<VALUE] = NEWVALUE, for example. + # Display: this one already works, if you did everything right with mag + plt.imshow(mag, plt.cm.Blues) +if __name__ == '__main__': - # Display: this one already works, if you did everything right with M - plt.imshow(M, plt.cm.Blues) + try: + # Read in original image, convert to floating point for further + # manipulation; imread returns a MxNx4 RGBA image. Since the image is + # grayscale, just extract the 1st channel + # Hints: + # - use plt.imread() to load the file + # - convert to a float array with the .astype() method + # - extract all rows, all columns, 0-th plane to get the first + # channel + # - the resulting array should have 2 dimensions only + raise NotImplementedError('insert missing code here') + print "Image shape:",im.shape + except: + print "Could not open image." + sys.exit(-1) + # Compute the 2d FFT of the input image + # Hint: Look for a 2-d FFT in np.fft. + # Note: call this variable 'F', which is the name we'll be using below. + raise NotImplementedError('insert missing code here') -if __name__ == '__main__': + # In the lines following, we'll make a copy of the original spectrum and + # truncate coefficients. NO immediate code is to be written right here. - im = XXX # make an image array from the file 'moonlanding.png', using the - # pylab imread() function. You will need to just extract the red - # channel from the MxNx4 RGBA matrix to represent the grayscale - # intensities - - F = XXX # Compute the 2d FFT of the input image. Look for a 2-d FFT in - # np.fft. - # Define the fraction of coefficients (in each direction) we keep keep_fraction = 0.1 - # XXX Call ff a copy of the original transform. Numpy arrays have a copy + # Call ff a copy of the original transform. Numpy arrays have a copy # method for this purpose. + raise NotImplementedError('insert missing code here') - # XXX Set r and c to be the number of rows and columns of the array. Look - # for the shape attribute... + # Set r and c to be the number of rows and columns of the array. + # Hint: use the array's shape attribute. + raise NotImplementedError('insert missing code here') # Set to zero all rows with indices between r*keep_fraction and # r*(1-keep_fraction): + raise NotImplementedError('insert missing code here') # Similarly with the columns: + raise NotImplementedError('insert missing code here') - - # Reconstruct the denoised image from the filtered spectrum. There's an - # inverse 2d fft in the dft module as well. Call the result im_new - - # Show the results. - + # Reconstruct the denoised image from the filtered spectrum, keep only the + # real part for display. + # Hint: There's an inverse 2d fft in the np.fft module as well (don't + # forget that you only want the real part). + # Call the result im_new, + raise NotImplementedError('insert missing code here') + + # Show the results # The code below already works, if you did everything above right. plt.figure() @@ -78,6 +98,6 @@ plt.title('Reconstructed Image') plt.imshow(im_new, plt.cm.gray) - # Adjust the spacing between subplots for readability + # Adjust the spacing between subplots for readability plt.subplots_adjust(hspace=0.32) plt.show() Modified: trunk/py4science/examples/skel/fitting_skel.py =================================================================== --- trunk/py4science/examples/skel/fitting_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/fitting_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,16 +1,19 @@ #!/usr/bin/env python """Simple data fitting and smoothing example""" +XXX = None # placeholder for missing pieces + from numpy import exp,arange,array,linspace from numpy.random import normal from scipy.optimize import leastsq from scipy.interpolate import splrep,splev -import numpy as N -import scipy as S -import pylab as P +import numpy as np +import matplotlib as mpl +import matplotlib.pyplot as plt + def func(pars): a, alpha, k = pars return a*exp(alpha*x_vals) + k @@ -32,17 +35,18 @@ # now solve for the best fit paramters #XXX - use leastsq() here, call the output 'best' for code below to use +best = XXX print 'Least-squares fit to the data' print 'true', pars_true print 'best', best -print '|err|_l2 =',P.l2norm(pars_true-best) +print '|err|_l2 =',np.linalg.norm(pars_true-best) # scipy's splrep uses FITPACK's curfit (B-spline interpolation) print print 'Spline smoothing of the data' -sp = # XXX - use splrep() -smooth = # XXX use splev() +sp = XXX # use splrep() +smooth = XXX # use splev() print 'Spline information (see splrep and splev for details):',sp @@ -50,23 +54,23 @@ def plot_polyfit(x,y,n,fignum=None): """Do a polynomial fit of order n and plot it.""" if fignum is None: - fignum = P.figure().number - P.plot(x,y,label='Data') + fignum = plt.figure().number + plt.plot(x,y,label='Data') - fit_coefs = # XXX- use N.polyfit here - fit_val = # XXX - use N.polyval - P.plot(x,fit_val,label='Polynomial fit, $n=%d$' % n) - P.legend() + fit_coefs = XXX # use np.polyfit here + fit_val = XXX # use np.polyval + plt.plot(x,fit_val,label='Polynomial fit, $n=%d$' % n) + plt.legend() return fignum # Now use pylab to plot -P.figure() +plt.figure() # Plot the least-squares fit here... -P.plot(x_vals,y_noisy,label='Noisy data') -P.plot(x_vals,func(best),lw=2,label='Least-squares fit') -P.legend() +plt.plot(x_vals,y_noisy,label='Noisy data') +plt.plot(x_vals,func(best),lw=2,label='Least-squares fit') +plt.legend() -P.figure() +plt.figure() # Plot the splines fit here... # Plot the polynomials fits with this: @@ -74,4 +78,4 @@ plot_polyfit(x_vals,y_noisy,2,fignum) plot_polyfit(x_vals,y_noisy,3,fignum) -P.show() +plt.show() Modified: trunk/py4science/examples/skel/glass_dots1_skel.py =================================================================== --- trunk/py4science/examples/skel/glass_dots1_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/glass_dots1_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -5,60 +5,84 @@ See L. Glass. 'Moire effect from random dots' Nature 223, 578580 (1969). """ -import cmath # provides complex math functions +import cmath from numpy import cos, sin, pi, matrix import numpy as npy import numpy.linalg as linalg from pylab import figure, show + def myeig(M): """ compute eigen values and eigenvectors analytically + Solve quadratic: + lamba^2 - tau*lambda + Delta = 0 + where tau = trace(M) and Delta = Determinant(M) + + """ - Return value is lambda1, lambda2 - """ - XXX + a,b = M[0,0], M[0,1] + c,d = M[1,0], M[1,1] + tau = a+d # the trace + delta = a*d-b*c # the determinant + + lambda1 = (tau + cmath.sqrt(tau**2 - 4*delta))/2. + lambda2 = (tau - cmath.sqrt(tau**2 - 4*delta))/2. + return lambda1, lambda2 # 2000 random x,y points in the interval[-0.5 ... 0.5] -X1 = XXX +X1 = matrix(npy.random.rand(2,2000) + )-0.5 name = 'saddle' -#sx, sy, angle = XXX +sx, sy, angle = 1.05, 0.95, 0. #name = 'center' -#sx, sy, angle = XXX +#sx, sy, angle = 1., 1., 2.5 -name = 'spiral' #stable focus -sx, sy, angle = XXX +#name= 'stable focus' # spiral +#sx, sy, angle = 0.95, 0.95, 2.5 -theta = angle * pi/180. # the rotation in radians +theta = angle * pi/180. -# the scaling matrix -# | sx 0 | -# | 0 sy | -S = XXX +S = matrix([[sx, 0], + [0, sy]]) -# the rotation matrix -# | cos(theta) -sin(theta) | -# | sin(theta) cos(theta) | -R = XXX +R = matrix([[cos(theta), -sin(theta)], + [sin(theta), cos(theta)],]) -# the transformation is the matrix product of the scaling and rotation -M = XXX +M = S*R # rotate then stretch # compute the eigenvalues using numpy linear algebra -print 'numpy eigenvalues', XXX +vals, vecs = linalg.eig(M) +print 'numpy eigenvalues', vals # compare with the analytic values from myeig -print 'analytic eigenvalues', myeig(M) +avals = myeig(M) +print 'analytic eigenvalues', avals -# transform X1 by the matrix M -X2 = XXX +# transform X1 by the matrix +X2 = M*X1 -# plot the original X1 as green dots and the transformed X2 as red +# plot the original x,y as green dots and the transformed x, y as red # dots -XXX +fig = figure() +ax = fig.add_subplot(111) + +x1 = X1[0].flat +y1 = X1[1].flat +x2 = X2[0].flat +y2 = X2[1].flat + +ax = fig.add_subplot(111) +line1, line2 = ax.plot(x1, y1, 'go', x2, y2, 'ro', markersize=2) +ax.set_title(name) + + +fig.savefig('glass_dots1.png', dpi=100) +fig.savefig('glass_dots1.eps', dpi=100) +show() Modified: trunk/py4science/examples/skel/lotka_volterra_skel.py =================================================================== --- trunk/py4science/examples/skel/lotka_volterra_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/lotka_volterra_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -3,19 +3,22 @@ import scipy.integrate as integrate def dr(r, f): - 'return delta r' - XXX + return alpha*r - beta*r*f def df(r, f): - 'return delta f' - XXX + return gamma*r*f - delta*f + def derivs(state, t): """ Map the state variable [rabbits, foxes] to the derivitives [deltar, deltaf] at time t """ - XXX - + #print t, state + r, f = state # rabbits and foxes + deltar = dr(r, f) # change in rabbits + deltaf = df(r, f) # change in foxes + return deltar, deltaf + alpha, delta = 1, .25 beta, gamma = .2, .05 @@ -23,34 +26,53 @@ r0 = 20 f0 = 10 -t = XXX # pick a time vector (think about the time scales!) -y0 = [r0, f0] # the initial [rabbits, foxes] state vector -y = XXX # integrate derives over t starting at y0 -r = XXX # extract the rabbits vector -f = XXX # extract the foxes vector +t = n.arange(0.0, 100, 0.1) +y0 = [r0, f0] # the initial [rabbits, foxes] state vector +y = integrate.odeint(derivs, y0, t) +r = y[:,0] # extract the rabbits vector +f = y[:,1] # extract the foxes vector -# FIGURE 1: rabbits vs time and foxes vs time on the same plot with -# legend and xlabel, ylabel and title +p.figure() +p.plot(t, r, label='rabbits') +p.plot(t, f, label='foxes') +p.xlabel('time (years)') +p.ylabel('population') +p.title('population trajectories') +p.grid() +p.legend() +p.savefig('lotka_volterra.png', dpi=150) +p.savefig('lotka_volterra.eps') -# FIGURE 2: the phase plane -# plot r vs f and label the x and y axes -XXX +p.figure() +p.plot(r, f, color='red') +p.xlabel('rabbits') +p.ylabel('foxes') +p.title('phase plane') -# FIGURE 2 continued.... -# use meshgrid to make a grid over R and F -# with a coarse 1 year sampling. evaluate dR and dF over the 2 s -# grids and make a quiver plot. See pylab.quiver and matplotlib -# examples/quiver_demo.py -XXX +# make a direction field plot with quiver +rmax = 1.1 * r.max() +fmax = 1.1 * f.max() +R, F = n.meshgrid(n.arange(-1, rmax), n.arange(-1, fmax)) +dR = dr(R, F) +dF = df(R, F) +p.quiver(R, F, dR, dF) -# FIGURE 2 continued... use contour to compute the null clines over -# dR (the rabbits null cline) and dF (the foxes null cline). You will -# need to do a finer meshgrid for accurate null clins and pass -# levels=[0] to contour -XXX +R, F = n.meshgrid(n.arange(-1, rmax, .1), n.arange(-1, fmax, .1)) +dR = dr(R, F) +dF = df(R, F) + +p.contour(R, F, dR, levels=[0], linewidths=3, colors='blue') +p.contour(R, F, dF, levels=[0], linewidths=3, colors='green') +p.ylabel('foxes') +p.title('trajectory, direction field and null clines') + +p.savefig('lotka_volterra_pplane.png', dpi=150) +p.savefig('lotka_volterra_pplane.eps') + + p.show() Modified: trunk/py4science/examples/skel/noisy_sine_skel.py =================================================================== --- trunk/py4science/examples/skel/noisy_sine_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/noisy_sine_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -5,18 +5,21 @@ f = 10 # 10 Hz frequency sigma = 0.5 # 0.5 volt standard deviation noise -# create the t and v arrays; see the scipy commands arange, sin, and randn -t = XXX # an evenly sampled time array -v = XXX # a noisy sine wave +# create the t and v and store them a 2D array X +t = arange(0.0, 2.0, 0.02) # an evenly sampled time array +v = a*sin(2*f*pi*t) + sigma*randn(len(t)) # a noisy sine wave +X = zeros((len(t),2)) # an empty output array +X[:,0] = t # add t to the first column +X[:,1] = v # add s to the 2nd column +p.save('data/noisy_sine.dat', X) # save the output file as ASCII -# create a 2D array X and put t in the 1st column and v in the 2nd; -# see the numpy command zeros -X = XXX - -# save the output file as ASCII; see the pylab command save -XXX - -# plot the arrays t vs v and label the x-axis, y-axis and title save -# the output figure as noisy_sine.png. See the pylab commands plot, -# xlabel, ylabel, grid, show -XXX +# plot the arrays t vs v and label the x-axis, y-axis and title +# save the output figure as noisy_sine.png +p.plot(t, v, 'b-') +p.xlabel('time (s)') +p.ylabel('volts (V)') +p.title('A noisy sine wave') +p.grid() +p.savefig('noisy_sine.png', dpi=150) +p.savefig('noisy_sine.eps') +p.show() Modified: trunk/py4science/examples/skel/qsort_skel.py =================================================================== --- trunk/py4science/examples/skel/qsort_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/qsort_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,29 +1,58 @@ """Simple quicksort implementation. -From http://en.wikipedia.org/wiki/Quicksort: +From http://en.wikipedia.org/wiki/Quicksort we have this pseudocode (see also +the C implementation for comparison). +Note: what follows is NOT python code, it's meant to only illustrate the +algorithm for you. Below you'll need to actually implement it in real Python. +You may be surprised at how close a working Python implementation can be to +this pseudocode. + + function quicksort(array) var list less, greater - if length(array) ≤ 1 + if length(array) <= 1 return array select and remove a pivot value pivot from array for each x in array - if x ≤ pivot then append x to less + if x <= pivot then append x to less else append x to greater return concatenate(quicksort(less), pivot, quicksort(greater)) """ def qsort(lst): - """Return a sorted copy of the input list.""" + """Return a sorted copy of the input list. - raise NotImplementedError + Input: + lst : a list of elements which can be compared. + + Examples: + + >>> qsort([]) + [] + + >>> qsort([3,2,5]) + [2, 3, 5] + """ + + # Hint: remember that all recursive functions need an exit condition + raise NotImplementedError('insert missing code here') + + # Select pivot and apply recursively + raise NotImplementedError('insert missing code here') + + # Upon return, make sure to properly concatenate the output lists + raise NotImplementedError('insert missing code here') + + #----------------------------------------------------------------------------- # Tests #----------------------------------------------------------------------------- import random -import nose.tools as nt +import nose +import nose, nose.tools as nt def test_sorted(): seq = range(10) @@ -37,9 +66,8 @@ sseq = qsort(rseq) nt.assert_equal(tseq,sseq) +# If called from the command line, run all the tests if __name__ == '__main__': - # From the command line, run the test suite - import nose # This call form is ipython-friendly nose.runmodule(argv=['-s','--with-doctest'], exit=False) Modified: trunk/py4science/examples/skel/quad_newton_skel.py =================================================================== --- trunk/py4science/examples/skel/quad_newton_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/quad_newton_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -4,23 +4,36 @@ from math import sin -import scipy, scipy.integrate, scipy.optimize +from scipy.integrate import quad +from scipy.optimize import newton -quad = scipy.integrate.quad -newton = scipy.optimize.newton +# test input function +def f(t): + # f(t): t * sin^2(t) + raise NotImplementedError('insert missing code here') -# test input function f(t): t * sin^2(t) -def f(t): XXX +def g(t): + "Exact form for g by integrating f(t)" + u = 0.25 + return .25*(t**2-t*sin(2*t)+(sin(t))**2)-u -# Use u=0.25 -def g(t): XXX +def gn(t): + "g(t) obtained by numerical integration" + u = 0.25 + # Hint: use quad, see its return value carefully. + raise NotImplementedError('insert missing code here') # main tguess = 10.0 -print "Solution using the numerical integration technique" -t0 = newton(g,tguess,f) +print '"Exact" solution (knowing the analytical form of the integral)' +raise NotImplementedError('insert missing code here') print "t0, g(t0) =",t0,g(t0) print +print "Solution using the numerical integration technique" +raise NotImplementedError('insert missing code here') +print "t1, g(t1) =",t1,g(t1) + +print print "To six digits, the answer in this case is t==1.06601." Modified: trunk/py4science/examples/skel/stats_descriptives_skel.py =================================================================== --- trunk/py4science/examples/skel/stats_descriptives_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/stats_descriptives_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -4,7 +4,6 @@ import numpy import pylab -XXX = None class Descriptives: """ @@ -12,16 +11,16 @@ """ def __init__(self, samples): self.samples = numpy.asarray(samples) - self.N = XXX # the number of samples - self.median = XXX # sample median - self.min = XXX # sample min - self.max = XXX # sample max - self.mean = XXX # sample mean - self.std = XXX # sample standard deviation - self.var = XXX # sample variance - self.skew = XXX # the sample skewness - self.kurtosis = XXX # the sample kurtosis - self.range = XXX # the sample range max-min + self.N = len(samples) + self.median = stats.median(samples) + self.min = numpy.amin(samples) + self.max = numpy.amax(samples) + self.mean = stats.mean(samples) + self.std = stats.std(samples) + self.var = self.std**2. + self.skew = stats.skew(samples) + self.kurtosis = stats.kurtosis(samples) + self.range = self.max - self.min def __repr__(self): """ @@ -33,11 +32,20 @@ descriptives = ( 'N = %d' % self.N, - XXX # the rest here - ) + 'Mean = %1.4f' % self.mean, + 'Median = %1.4f' % self.median, + 'Min = %1.4f' % self.min, + 'Max = %1.4f' % self.max, + 'Range = %1.4f' % self.range, + 'Std = %1.4f' % self.std, + 'Skew = %1.4f' % self.skew, + 'Kurtosis = %1.4f' % self.kurtosis, + ) return '\n'.join(descriptives) - def plots(self, figfunc, maxlags=20, Fs=1, detrend=detrend_linear, fmt='bo'): + def plots(self, figfunc, maxlags=20, Fs=1, detrend=detrend_linear, + fmt='bo', bins=100, + ): """ plots the time series, histogram, autocorrelation and spectrogram @@ -56,10 +64,11 @@ maxlags : max number of lags for the autocorr - detrend : a function used to detrend the data for the - correlation and spectral functions + detrend : a function used to detrend the data for the correlation and spectral functions fmt : the plot format string + + bins : the bins argument to hist """ data = self.samples @@ -79,12 +88,27 @@ c = C() N = 5 fig = c.fig = figfunc() + fig.subplots_adjust(hspace=0.3) ax = c.ax1 = fig.add_subplot(N,1,1) c.plot = ax.plot(data, fmt) + ax.set_ylabel('data') - # XXX the rest of the plot funtions here + ax = c.ax2 = fig.add_subplot(N,1,2) + c.hist = ax.hist(data, bins) + ax.set_ylabel('hist') - + ax = c.ax3 = fig.add_subplot(N,1,3) + c.acorr = ax.acorr(data, detrend=detrend, usevlines=True, + maxlags=maxlags, normed=True) + ax.set_ylabel('acorr') + + ax = c.ax4 = fig.add_subplot(N,1,4) + c.psd = ax.psd(data, Fs=Fs, detrend=detrend) + ax.set_ylabel('psd') + + ax = c.ax5 = fig.add_subplot(N,1,5) + c.specgtram = ax.specgram(data, Fs=Fs, detrend=detrend) + ax.set_ylabel('specgram') return c @@ -94,15 +118,20 @@ # list of floating point values, one value per line. Note you # will have to do some extra parsing data = [] - #fname = 'data/nm560.dat' # tree rings in New Mexico 837-1987 + fname = 'data/nm560.dat' # tree rings in New Mexico 837-1987 fname = 'data/hsales.dat' # home sales for line in file(fname): line = line.strip() if not line: continue - # XXX convert to float and add to data here - + vals = line.split() + val = vals[0] + data.append(float(val)) + desc = Descriptives(data) print desc - c = desc.plots(pylab.figure, Fs=12, fmt='-o') + c = desc.plots(pylab.figure, Fs=12, fmt='-') c.ax1.set_title(fname) + + c.fig.savefig('stats_descriptives.png', dpi=150) + c.fig.savefig('stats_descriptives.eps') pylab.show() Modified: trunk/py4science/examples/skel/stats_distributions_skel.py =================================================================== --- trunk/py4science/examples/skel/stats_distributions_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/stats_distributions_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -13,8 +13,8 @@ # a uniform distribution from scipy.stats.uniform and use the "rvs" # method to generate N uniform random variates N = 100000 -uniform = XXX # the frozen uniform distribution -uninse = XXX # the random variates +uniform = scipy.stats.uniform() # the frozen uniform distribution +uninse = uniform.rvs(N) # the random variates # in each time interval, the probability of an emission rate = 20. # the emission rate in Hz @@ -22,10 +22,10 @@ t = numpy.arange(N)*dx # the time vector # the probability of an emission is proportionate to the rate and the interval -emit_times = XXX +emit_times = t[uninse < rate*dx] # the difference in the emission times is the wait time -wait_times = XXX +wait_times = numpy.diff(emit_times) # plot the distribution of waiting times and the expected exponential # density function lambda exp( lambda wt) where lambda is the rate @@ -35,26 +35,34 @@ # 1/lambda. Plot all three on the same graph and make a legend. # Decorate your graphs with an xlabel, ylabel and title fig = figure() -ax = fig.add_subplot(111) -p, bins, patches = XXX # use ax.hist -l1, = ax.plot(bins, XXX, lw=2, color='red') # the analytic result -l2, = ax.plot(bins, XXX, # use scipy.stats.expon.pdf +ax = fig.add_subplot(311) +p, bins, patches = ax.hist(wait_times, 100, normed=True) +l1, = ax.plot(bins, rate*numpy.exp(-rate * bins), lw=2, color='red') +l2, = ax.plot(bins, scipy.stats.expon.pdf(bins, 0, 1./rate), lw=2, ls='--', color='green') -ax.set_xlabel('waiting time') + ax.set_ylabel('PDF') -ax.set_title('waiting time density of a %dHz Poisson emitter'%rate) -ax.legend(XXX, XXX) # create the proper legend +ax.set_title('waiting time densities of a %dHz Poisson emitter'%rate) +ax.text(0.05, 0.9, 'one interval', transform=ax.transAxes) +ax.legend((patches[0], l1, l2), ('simulated', 'analytic', 'scipy.stats.expon')) - # plot the distribution of waiting times for two events; the # distribution of waiting times for N events should equal a N-th order # gamma distribution (the exponential distribution is a 1st order # gamma distribution. Use scipy.stats.gamma to compare the fits. # Hint: you can stride your emission times array to get every 2nd # emission -XXX +wait_times2 = numpy.diff(emit_times[::2]) +ax = fig.add_subplot(312) +p, bins, patches = ax.hist(wait_times2, 100, normed=True) +l1, = ax.plot(bins, scipy.stats.gamma.pdf(bins, 2, 0, 1./rate), + lw=2, ls='-', color='red') +ax.set_ylabel('PDF') +ax.text(0.05, 0.9, 'two intervals', transform=ax.transAxes) +ax.legend((patches[0], l1), ('simulated', 'scipy.stats.gamma')) + # plot the distribution of waiting times for 10 events; again the # distribution will be a 10th order gamma distribution so plot that # along with the empirical density. The central limit thm says that @@ -67,7 +75,24 @@ # get the normal distribution. Note that the scale parameter of the # normal is the standard deviation which is the square root of the # variance -XXX +expon_mean, expon_var = scipy.stats.expon(0, 1./rate).stats() +mu, var = 10*expon_mean, 10*expon_var +sigma = numpy.sqrt(var) +wait_times10 = numpy.diff(emit_times[::10]) +ax = fig.add_subplot(313) +p, bins, patches = ax.hist(wait_times10, 100, normed=True) +l1, = ax.plot(bins, scipy.stats.gamma.pdf(bins, 10, 0, 1./rate), + lw=2, ls='-', color='red') +l2, = ax.plot(bins, scipy.stats.norm.pdf(bins, mu, sigma), + lw=2, ls='--', color='green') +ax.set_xlabel('waiting times') +ax.set_ylabel('PDF') +ax.text(0.1, 0.9, 'ten intervals', transform=ax.transAxes) +ax.legend((patches[0], l1, l2), ('simulated', 'scipy.stats.gamma', 'normal approx')) +fig.savefig('stats_distributions.png', dpi=150) +fig.savefig('stats_distributions.eps') + + show() Modified: trunk/py4science/examples/skel/stock_records_skel.py =================================================================== --- trunk/py4science/examples/skel/stock_records_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/stock_records_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -22,26 +22,25 @@ system before re-downloading it """ fname = '%s.csv'%ticker - url = XXX # create the url for this ticker + url = 'http://ichart.finance.yahoo.com/table.csv?' +\ + 's=%s&d=9&e=20&f=2007&g=d&a=0&b=29&c=1993&ignore=.csv'%ticker # the os.path module contains function for checking whether a file - # exists, and fetch it if not - XXX + # exists + if not os.path.exists(fname): + urllib.urlretrieve(url, fname) + r = mlab.csv2rec(fname) - # load the CSV file intoo a numpy record array - r = XXX - # note that the CSV file is sorted most recent date first, so you # will probably want to sort the record array so most recent date # is last - XXX + r.sort() return r -tickers = 'INTC', 'MSFT', 'YHOO', 'GOOG', 'GE', 'WMT', 'AAPL' +tickers = 'SPY', 'QQQQ', 'INTC', 'MSFT', 'YHOO', 'GOOG', 'GE', 'WMT', 'AAPL' -# we want to compute returns since 2003, so define the start date as a -# datetime.datetime instance -startdate = XXX +# we want to compute returns since 2003, so define the start date +startdate = datetime.date(2003,1,1) # we'll store a list of each return and ticker for analysis later data = [] # a list of (return, ticker) for each stock @@ -50,22 +49,24 @@ print 'fetching', ticker r = fetch_stock(ticker) - # select the numpy records where r.date>=startdatre use numpy mask - # indexing to restrict r to just the dates > startdate - r = XXX - price = XXX # set price equal to the adjusted close - returns = XXX # return is the (price-p0)/p0 - XXX # store the data + # select the numpy records where r.date>=startdatre - # plot the returns by date for each stock using pylab.plot, adding - # a label for the legend - XXX + r = r[r.date>=startdate] + price = r.adj_close # set price equal to the adjusted close + returns = (price-price[0])/price[0] # return is the (price-p0)/p0 + data.append((returns[-1], ticker)) # store the data -# use pylab legend command to build a legend -XXX + # plot the returns by date for each stock + p.plot(r.date, returns, label=ticker) +p.legend(loc='upper left') + # now sort the data by returns and print the results for each stock -XXX +data.sort() +for g, ticker in data: + print '%s: %1.1f%%'%(ticker, 100*g) -# show the figures + +p.savefig('stock_records.png', dpi=100) +p.savefig('stock_records.eps') p.show() Modified: trunk/py4science/examples/skel/trapezoid_skel.py =================================================================== --- trunk/py4science/examples/skel/trapezoid_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/trapezoid_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,13 +1,14 @@ #!/usr/bin/env python """Simple trapezoid-rule integrator.""" -import numpy as N +import numpy as np def trapz(x, y): """Simple trapezoid integrator for sequence-based innput. Inputs: - - x,y: arrays of the same length. + - x,y: arrays of the same length (and more than one element). If the two + inputs have different lengths, a ValueError exception is raised. Output: - The result of applying the trapezoid rule to the input, assuming that @@ -15,8 +16,17 @@ Minimally modified from matplotlib.mlab.""" - raise NotImplementedError + # Sanity checks. + # + # Hint: if the two inputs have mismatched lengths or less than 2 + # elements, we raise ValueError with an explanatory message. + raise NotImplementedError('insert missing code here') + # Efficient application of trapezoid rule via numpy + # + # Hint: think of using numpy slicing to compute the moving difference in + # the basic trapezoid formula. + raise NotImplementedError('insert missing code here') def trapzf(f,a,b,npts=100): """Simple trapezoid-based integrator. @@ -33,33 +43,52 @@ Output: - The value of the trapezoid-rule approximation to the integral.""" - # you will need to apply the function f to easch element of the - # vector x. What are several ways to do this? Can you profile - # them to see what differences in timings result for long vectors - # x? - raise NotImplementedError + # Hint: you will need to apply the function f to easch element of the + # vector x. What are several ways to do this? Can you profile them to see + # what differences in timings result for long vectors x? + # Generate an equally spaced grid to sample the function. + raise NotImplementedError('insert missing code here') + # For an equispaced grid, the x spacing can just be read off from the first + # two points and factored out of the summation. + raise NotImplementedError('insert missing code here') + + # Sample the input function at all values of x + # + # Hint: you need to make an array out of the evaluations, and the python + # builtin 'map' function can come in handy. + raise NotImplementedError('insert missing code here') + + # Compute the trapezoid rule sum for the final result + raise NotImplementedError('insert missing code here') + + #----------------------------------------------------------------------------- # Tests #----------------------------------------------------------------------------- import nose, nose.tools as nt import numpy.testing as nptest +# A simple function for testing def square(x): return x**2 def test_err(): + """Test that mismatched inputs raise a ValueError exception.""" nt.assert_raises(ValueError,trapz,range(2),range(3)) def test_call(): + "Test a direct call with equally spaced samples. " x = np.linspace(0,1,100) y = np.array(map(square,x)) nptest.assert_almost_equal(trapz(x,y),1./3,4) def test_square(): + "Test integrating the square() function." nptest.assert_almost_equal(trapzf(square,0,1),1./3,4) def test_square2(): + "Another test integrating the square() function." nptest.assert_almost_equal(trapzf(square,0,3,350),9.0,4) Modified: trunk/py4science/examples/skel/wallis_pi_skel.py =================================================================== --- trunk/py4science/examples/skel/wallis_pi_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/wallis_pi_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -6,19 +6,72 @@ # in floating point. from __future__ import division -from decimal import Decimal +def pi(n): + """Compute pi using n terms of Wallis' product. -XXX = None # a sentinel for missing pieces + Wallis' formula approximates pi as -def pi(n): + pi(n) = 2 \prod_{i=1}^{n}\frac{4i^2}{4i^2-1}.""" + + num = 1 + den = 1 + for i in xrange(1,n+1): + tmp = 4*i*i + num *= tmp + den *= tmp-1 + return 2.0*(num/den) + +def part_range(n1,n2,nchunks): + """Partition a range specification in nchunks""" + size,rem = divmod(n2-n1,nchunks) + sizes = [size]*nchunks + while rem > 0: + for i in range(nchunks): + sizes[i] += 1 + rem -= 1 + if rem == 0: + break + + # The sizes list has the offsets, now we need the actual start,stop pairs + ranges = [] + start=n1 + for size in sizes: + ranges.append((start,start+size)) + start += size + return ranges + +def wpi_nd(range_spec): """Compute pi using n terms of Wallis' product. Wallis' formula approximates pi as pi(n) = 2 \prod_{i=1}^{n}\frac{4i^2}{4i^2-1}.""" - raise NotImplementedError + n1,n2 = range_spec + num = 1 + den = 1 + for i in xrange(n1,n2): + tmp = 4*i*i + num *= tmp + den *= tmp-1 + + return num,den + +def par_pi(n,num_engines=1): + """Compute pi using n terms of Wallis' product. + + Wallis' formula approximates pi as + + pi(n) = 2 \prod_{i=1}^{n}\frac{4i^2}{4i^2-1}. + + Parallel version.""" + + num,den = reduce(lambda x,y:(x[0]*y[0],x[1]*y[1]), + map(wpi_nd,part_range(1,n+1,num_engines))) + return 2.0*(num/den) + + # This part only executes when the code is run as a script, not when it is # imported as a library if __name__ == '__main__': @@ -29,13 +82,12 @@ import numpy as N # Create a list of points 'nrange' where we'll compute Wallis' formula - nrange = XXX - + nrange = N.linspace(10,2000,20).astype(int) # Make an array of such values - wpi = XXX + wpi = N.array(map(pi,nrange)) # Compute the difference against the value of pi in numpy (standard # 16-digit value) - diff = XXX + diff = abs(wpi-N.pi) # Make a new figure and build a semilog plot of the difference so we can # see the quality of the convergence @@ -45,9 +97,9 @@ P.semilogy(nrange,diff,'-o',mfc='red') # A bit of labeling and a grid - P.title(r"Convergence of Wallis' product formula for pi") + P.title(r"Convergence of Wallis' product formula for $\pi$") P.xlabel('Number of terms') - P.ylabel(r'|Error}|') + P.ylabel(r'Absolute Error') P.grid() # Display the actual plot Modified: trunk/py4science/examples/skel/wordfreqs_skel.py =================================================================== --- trunk/py4science/examples/skel/wordfreqs_skel.py 2008-10-19 07:38:45 UTC (rev 6268) +++ trunk/py4science/examples/skel/wordfreqs_skel.py 2008-10-19 07:48:51 UTC (rev 6269) @@ -1,27 +1,27 @@ #!/usr/bin/env python """Word frequencies - count word frequencies in a string.""" -XXX = None # a sentinel for missing pieces - def word_freq(text): """Return a dictionary of word frequencies for the given text.""" - # you need to write this - return XXX + freqs = {} + for word in text.split(): + freqs[word] = freqs.get(word, 0) + 1 + return freqs def print_vk(lst): """Print a list of value/key pairs nicely formatted in key/value order.""" # Find the longest key: remember, the list has value/key paris, so the key # is element [1], not [0] - longest_key = max(map(lambda x: len(x[1]),lst)) + #longest_key = max(map(lambda x: len(x[1]),lst)) + longest_key = max([len(word) for count, word in lst]) # Make a format string out of it fmt = '%'+str(longest_key)+'s -> %s' # Do actual printing for v,k in lst: print fmt % (k,v) - def freq_summ(freqs,n=10): """Print a simple summary of a word frequencies dictionary. @@ -29,12 +29,12 @@ - freqs: a dictionary of word frequencies. Optional inputs: - - n: the number of items to print""" + - n: the number of """ - words,counts = XXX # look at the keys and values methods of dicts + words,counts = freqs.keys(),freqs.values() # Sort by count - - items = XXX # think of a list, look at zip() and think of sort() + items = zip(counts,words) + items.sort() print 'Number of words:',len(freqs) print @@ -44,12 +44,8 @@ print '%d most frequent words:' % n print_vk(items[-n:]) - if __name__ == '__main__': - # You need to read the contents of the file HISTORY.gz and store it in the - # variable named 'text'. Do NOT unzip it manually, look at the gzip module - # from the standard library and the read() method of file objects. - text = XXX - + import gzip + text = gzip.open('data/HISTORY.gz').read() freqs = word_freq(text) freq_summ(freqs,20) This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site. |