repo_name
stringlengths
7
92
path
stringlengths
5
129
copies
stringclasses
201 values
size
stringlengths
4
6
content
stringlengths
1.03k
375k
license
stringclasses
15 values
nguyentu1602/statsmodels
statsmodels/graphics/factorplots.py
28
7596
# -*- coding: utf-8 -*- """ Authors: Josef Perktold, Skipper Seabold, Denis A. Engemann """ from statsmodels.compat.python import get_function_name, iterkeys, lrange, zip, iteritems import numpy as np from statsmodels.graphics.plottools import rainbow import statsmodels.graphics.utils as utils def interaction_plot(x, trace, response, func=np.mean, ax=None, plottype='b', xlabel=None, ylabel=None, colors=[], markers=[], linestyles=[], legendloc='best', legendtitle=None, **kwargs): """ Interaction plot for factor level statistics. Note. If categorial factors are supplied levels will be internally recoded to integers. This ensures matplotlib compatiblity. uses pandas.DataFrame to calculate an `aggregate` statistic for each level of the factor or group given by `trace`. Parameters ---------- x : array-like The `x` factor levels constitute the x-axis. If a `pandas.Series` is given its name will be used in `xlabel` if `xlabel` is None. trace : array-like The `trace` factor levels will be drawn as lines in the plot. If `trace` is a `pandas.Series` its name will be used as the `legendtitle` if `legendtitle` is None. response : array-like The reponse or dependent variable. If a `pandas.Series` is given its name will be used in `ylabel` if `ylabel` is None. func : function Anything accepted by `pandas.DataFrame.aggregate`. This is applied to the response variable grouped by the trace levels. plottype : str {'line', 'scatter', 'both'}, optional The type of plot to return. Can be 'l', 's', or 'b' ax : axes, optional Matplotlib axes instance xlabel : str, optional Label to use for `x`. Default is 'X'. If `x` is a `pandas.Series` it will use the series names. ylabel : str, optional Label to use for `response`. Default is 'func of response'. If `response` is a `pandas.Series` it will use the series names. colors : list, optional If given, must have length == number of levels in trace. linestyles : list, optional If given, must have length == number of levels in trace. markers : list, optional If given, must have length == number of lovels in trace kwargs These will be passed to the plot command used either plot or scatter. If you want to control the overall plotting options, use kwargs. Returns ------- fig : Figure The figure given by `ax.figure` or a new instance. Examples -------- >>> import numpy as np >>> np.random.seed(12345) >>> weight = np.random.randint(1,4,size=60) >>> duration = np.random.randint(1,3,size=60) >>> days = np.log(np.random.randint(1,30, size=60)) >>> fig = interaction_plot(weight, duration, days, ... colors=['red','blue'], markers=['D','^'], ms=10) >>> import matplotlib.pyplot as plt >>> plt.show() .. plot:: import numpy as np from statsmodels.graphics.factorplots import interaction_plot np.random.seed(12345) weight = np.random.randint(1,4,size=60) duration = np.random.randint(1,3,size=60) days = np.log(np.random.randint(1,30, size=60)) fig = interaction_plot(weight, duration, days, colors=['red','blue'], markers=['D','^'], ms=10) import matplotlib.pyplot as plt #plt.show() """ from pandas import DataFrame fig, ax = utils.create_mpl_ax(ax) response_name = ylabel or getattr(response, 'name', 'response') ylabel = '%s of %s' % (get_function_name(func), response_name) xlabel = xlabel or getattr(x, 'name', 'X') legendtitle = legendtitle or getattr(trace, 'name', 'Trace') ax.set_ylabel(ylabel) ax.set_xlabel(xlabel) x_values = x_levels = None if isinstance(x[0], str): x_levels = [l for l in np.unique(x)] x_values = lrange(len(x_levels)) x = _recode(x, dict(zip(x_levels, x_values))) data = DataFrame(dict(x=x, trace=trace, response=response)) plot_data = data.groupby(['trace', 'x']).aggregate(func).reset_index() # return data # check plot args n_trace = len(plot_data['trace'].unique()) if linestyles: try: assert len(linestyles) == n_trace except AssertionError as err: raise ValueError("Must be a linestyle for each trace level") else: # set a default linestyles = ['-'] * n_trace if markers: try: assert len(markers) == n_trace except AssertionError as err: raise ValueError("Must be a linestyle for each trace level") else: # set a default markers = ['.'] * n_trace if colors: try: assert len(colors) == n_trace except AssertionError as err: raise ValueError("Must be a linestyle for each trace level") else: # set a default #TODO: how to get n_trace different colors? colors = rainbow(n_trace) if plottype == 'both' or plottype == 'b': for i, (values, group) in enumerate(plot_data.groupby(['trace'])): # trace label label = str(group['trace'].values[0]) ax.plot(group['x'], group['response'], color=colors[i], marker=markers[i], label=label, linestyle=linestyles[i], **kwargs) elif plottype == 'line' or plottype == 'l': for i, (values, group) in enumerate(plot_data.groupby(['trace'])): # trace label label = str(group['trace'].values[0]) ax.plot(group['x'], group['response'], color=colors[i], label=label, linestyle=linestyles[i], **kwargs) elif plottype == 'scatter' or plottype == 's': for i, (values, group) in enumerate(plot_data.groupby(['trace'])): # trace label label = str(group['trace'].values[0]) ax.scatter(group['x'], group['response'], color=colors[i], label=label, marker=markers[i], **kwargs) else: raise ValueError("Plot type %s not understood" % plottype) ax.legend(loc=legendloc, title=legendtitle) ax.margins(.1) if all([x_levels, x_values]): ax.set_xticks(x_values) ax.set_xticklabels(x_levels) return fig def _recode(x, levels): """ Recode categorial data to int factor. Parameters ---------- x : array-like array like object supporting with numpy array methods of categorially coded data. levels : dict mapping of labels to integer-codings Returns ------- out : instance numpy.ndarray """ from pandas import Series name = None if isinstance(x, Series): name = x.name x = x.values if x.dtype.type not in [np.str_, np.object_]: raise ValueError('This is not a categorial factor.' ' Array of str type required.') elif not isinstance(levels, dict): raise ValueError('This is not a valid value for levels.' ' Dict required.') elif not (np.unique(x) == np.unique(list(iterkeys(levels)))).all(): raise ValueError('The levels do not match the array values.') else: out = np.empty(x.shape[0], dtype=np.int) for level, coding in iteritems(levels): out[x == level] = coding if name: out = Series(out) out.name = name return out
bsd-3-clause
jm-begon/scikit-learn
sklearn/__init__.py
154
3014
""" Machine learning module for Python ================================== sklearn is a Python module integrating classical machine learning algorithms in the tightly-knit world of scientific Python packages (numpy, scipy, matplotlib). It aims to provide simple and efficient solutions to learning problems that are accessible to everybody and reusable in various contexts: machine-learning as a versatile tool for science and engineering. See http://scikit-learn.org for complete documentation. """ import sys import re import warnings # Make sure that DeprecationWarning within this package always gets printed warnings.filterwarnings('always', category=DeprecationWarning, module='^{0}\.'.format(re.escape(__name__))) # PEP0440 compatible formatted version, see: # https://www.python.org/dev/peps/pep-0440/ # # Generic release markers: # X.Y # X.Y.Z # For bugfix releases # # Admissible pre-release markers: # X.YaN # Alpha release # X.YbN # Beta release # X.YrcN # Release Candidate # X.Y # Final release # # Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer. # 'X.Y.dev0' is the canonical version of 'X.Y.dev' # __version__ = '0.17.dev0' try: # This variable is injected in the __builtins__ by the build # process. It used to enable importing subpackages of sklearn when # the binaries are not built __SKLEARN_SETUP__ except NameError: __SKLEARN_SETUP__ = False if __SKLEARN_SETUP__: sys.stderr.write('Partial import of sklearn during the build process.\n') # We are not importing the rest of the scikit during the build # process, as it may not be compiled yet else: from . import __check_build from .base import clone __check_build # avoid flakes unused variable error __all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition', 'cross_validation', 'datasets', 'decomposition', 'dummy', 'ensemble', 'externals', 'feature_extraction', 'feature_selection', 'gaussian_process', 'grid_search', 'isotonic', 'kernel_approximation', 'kernel_ridge', 'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass', 'naive_bayes', 'neighbors', 'neural_network', 'pipeline', 'preprocessing', 'qda', 'random_projection', 'semi_supervised', 'svm', 'tree', # Non-modules: 'clone'] def setup_module(module): """Fixture for the tests to assure globally controllable seeding of RNGs""" import os import numpy as np import random # It could have been provided in the environment _random_seed = os.environ.get('SKLEARN_SEED', None) if _random_seed is None: _random_seed = np.random.uniform() * (2 ** 31 - 1) _random_seed = int(_random_seed) print("I: Seeding RNGs with %r" % _random_seed) np.random.seed(_random_seed) random.seed(_random_seed)
bsd-3-clause
reuk/wayverb
scripts/python/dispersion.py
2
6340
from math import e, pi import numpy as np import matplotlib.pyplot as plt from matplotlib import colors, ticker, cm from mpl_toolkits.mplot3d import Axes3D import numpy as np import operator def get_base_vectors(flip): ret = [ np.array([0.0, 2.0 * np.sqrt(2.0) / 3.0, 1.0 / 3.0]), np.array([ np.sqrt(2.0 / 3.0), -np.sqrt(2.0) / 3.0, 1.0 / 3.0]), np.array([0.0, 0.0, -1.0]), np.array([-np.sqrt(2.0 / 3.0), -np.sqrt(2.0) / 3.0, 1.0 / 3.0]), ] if flip: ret = [np.array([1, -1, -1]) * i for i in ret] return ret def get_vectors(): ret = [i + j for i in get_base_vectors(False) for j in get_base_vectors(True)] ret = filter(lambda x: np.any(x != np.array([0, 0, 0])), ret) return ret # DUYNE METHOD def get_speed(arr): """ The diagrams in the paper appear to be continuous outside of the range -1.5, 1.5. However, this function has a strange discontinuity at a radius of 1.4 """ def get_b(arr): summed = sum([pow(e, 1j * np.dot(arr, i)) for i in get_vectors()]) return 1.0 - 0.25 * summed.real def get_ang_g(arr): b = get_b(arr) return 0.5 * np.arctan(np.sqrt(4 - b * b) / abs(b)) c = np.sqrt(1.0 / 3.0) norm = np.linalg.norm(arr) # this analysis is only valid for frequencies below pi / 2 # (spectrum is mirrored above this limit) # simulated frequency is equal to magnitude of wave vector (arr) if norm < pi / 2: return get_ang_g(arr) / (norm * c) else: return None # CAMPOS METHOD def get_speed_campos(arr): def get_b(arr): x, y, z = arr a = np.cos(2.0 * x / np.sqrt(3.0)) * np.cos(2.0 * y / np.sqrt(3.0)) b = np.cos(2.0 * x / np.sqrt(3.0)) * np.cos(2.0 * z / np.sqrt(3.0)) c = np.cos(2.0 * y / np.sqrt(3.0)) * np.cos(2.0 * z / np.sqrt(3.0)) return a + b + c - 1 def get_kd(arr): return np.sqrt(3.0) * np.arccos(get_b(arr) / 2.0) / (2.0 * np.linalg.norm(arr)) return get_kd(arr) # direction error analysis from @hacihabiboglu # p(x) = pressure field in spatial(?) domain # P(w) = pressure field in frequency domain def get_U(): v = get_base_vectors(True) U = np.vstack(v) return U def eq_21(u, w): return pow(e, -1j * np.dot(u, w)) - 1 def eq_22(w): return np.array([eq_21(i, w) for i in get_base_vectors(True)]) def eq_23(w): return np.dot(np.linalg.pinv(get_U()), eq_22(w)) def hermitian_angle(a, b): prod = np.dot(a, np.conj(b)).real mag_a = np.sqrt(np.dot(a, np.conj(a))) mag_b = np.sqrt(np.dot(b, np.conj(b))) return (prod / (mag_a * mag_b)).real def direction_difference(arr): def get_term_1(): return eq_23(arr) def get_term_2(): return 1j * arr return hermitian_angle(get_term_1(), get_term_2()) # monte carlo bandwidth estimation def random_three_vector(): phi = np.random.uniform(0, pi * 2) costheta = np.random.uniform(-1, 1) theta = np.arccos(costheta) x = np.sin(theta) * np.cos(phi) y = np.sin(theta) * np.sin(phi) z = np.cos(theta) return np.array([x, y, z]) def get_max_valid_frequency(func, accuracy, starting_freq, increments, samples): last = starting_freq + increments ret = starting_freq while True: sample_points = [random_three_vector() * last for i in range(samples)] sampled = [func(i) for i in sample_points] if not all(map(lambda x: x > accuracy, sampled)): return ret else: ret = last last += increments def main(): """ This program duplicates the tetrahedral dispersion diagrams from the paper 'The Tetrahedral Digital Waveguide Mesh' buy Duyne and Smith. I wrote it to try to understand how to do dispersion analysis - the analysis here is of the difference of the actual wavefront speed to the ideal speed. """ w = np.array([0, 1, 0]) w /= np.linalg.norm(w) print "w", w for i in get_base_vectors(True): print "u", i print "21", eq_21(i, w) print "22", eq_22(w) print "23", eq_23(w) print print direction_difference(w) func = direction_difference vfunc = np.vectorize(lambda x, y, z: func(np.array([x, y, z]))) max_val = np.pi / 4 phi, theta = np.mgrid[0:pi:50j, 0:2*pi:50j] XX = max_val * np.sin(phi) * np.cos(theta) YY = max_val * np.sin(phi) * np.sin(theta) ZZ = max_val * np.cos(phi) zz = vfunc(XX, YY, ZZ) zzmin, zzmax = zz.min(), zz.max() print "dispersion error range:", zzmin, "to", zzmax zz = (zz - zzmin) / (zzmax - zzmin) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot_surface( XX, YY, ZZ, rstride=1, cstride=1, facecolors=cm.jet(zz)) plt.show() # func = get_speed_campos # vfunc = np.vectorize(lambda x, y, z: func(np.array([x, y, z]))) # # min_accuracy = 0.99 # max_val = get_max_valid_frequency(func, min_accuracy, 0.1, 0.001, 20) # print "maximum radius (frequency): ", max_val / (pi / 2) # phi, theta = np.mgrid[0:pi:50j, 0:2*pi:50j] # XX = max_val * np.sin(phi) * np.cos(theta) # YY = max_val * np.sin(phi) * np.sin(theta) # ZZ = max_val * np.cos(phi) # zz = vfunc(XX, YY, ZZ) # zzmin, zzmax = zz.min(), zz.max() # print "dispersion error range:", zzmin, "to", zzmax # zz = (zz - zzmin) / (zzmax - zzmin) # # fig = plt.figure() # # bounds = pi / 2 # N = 100 # x = np.linspace(-bounds, bounds, N) # y = np.linspace(-bounds, bounds, N) # X, Y = np.meshgrid(x, y) # Z = np.zeros(X.shape) # depth = np.linspace(0.9, 1, 11) # # ### plot 1 # ax = fig.add_subplot(221 + 0) # z = vfunc(Z, X, Y) # plt.contourf(X, Y, z, depth) # cbar = plt.colorbar() # # ### plot 2 # ax = fig.add_subplot(221 + 1) # z = vfunc(X, Z, Y) # plt.contourf(X, Y, z, depth) # cbar = plt.colorbar() # # ### plot 3 # ax = fig.add_subplot(221 + 2) # z = vfunc(X, Y, Z) # plt.contourf(X, Y, z, depth) # cbar = plt.colorbar() # # ax = fig.add_subplot(224, projection='3d') # ax.plot_surface( # XX, YY, ZZ, rstride=1, cstride=1, facecolors=cm.jet(zz)) # # plt.show() if __name__ == "__main__": main()
gpl-2.0
mompiou/misorientation
misorientation.py
1
48149
#!/usr/bin/python from __future__ import division import numpy as np from Tkinter import * from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg from matplotlib.figure import Figure from matplotlib import pyplot as plt from PIL import Image from PIL import PngImagePlugin import ttk import sys from fractions import Fraction from tkFileDialog import * import os import matplotlib as mpl mpl.rcParams['font.size'] = 12 ###################################################################" ##### Fonction projection sur l'abaque #################################################################### def proj(x,y,z): if z==1: X=0 Y=0 elif z<-0.000001: X=250 Y=250 else: X=x/(1+z) Y=y/(1+z) return np.array([X,Y],float) ###################################################################" ##### Fonction rotation #################################################################### def rotation(phi1,phi,phi2): phi1=phi1*np.pi/180; phi=phi*np.pi/180; phi2=phi2*np.pi/180; R=np.array([[np.cos(phi1)*np.cos(phi2)-np.cos(phi)*np.sin(phi1)*np.sin(phi2), -np.cos(phi)*np.cos(phi2)*np.sin(phi1)-np.cos(phi1)* np.sin(phi2),np.sin(phi)*np.sin(phi1)],[np.cos(phi2)*np.sin(phi1) +np.cos(phi)*np.cos(phi1)*np.sin(phi2),np.cos(phi)*np.cos(phi1) *np.cos(phi2)-np.sin(phi1)*np.sin(phi2), -np.cos(phi1)*np.sin(phi)], [np.sin(phi)*np.sin(phi2), np.cos(phi2)*np.sin(phi), np.cos(phi)]],float) return R #################################################################### ##### Fonction rotation autour d'un axe #################################################################### def Rot(th,a,b,c): th=th*np.pi/180; aa=a/np.linalg.norm([a,b,c]); bb=b/np.linalg.norm([a,b,c]); cc=c/np.linalg.norm([a,b,c]); c1=np.array([[1,0,0],[0,1,0],[0,0,1]],float) c2=np.array([[aa**2,aa*bb,aa*cc],[bb*aa,bb**2,bb*cc],[cc*aa, cc*bb,cc**2]],float) c3=np.array([[0,-cc,bb],[cc,0,-aa],[-bb,aa,0]],float) R=np.cos(th)*c1+(1-np.cos(th))*c2+np.sin(th)*c3 return R #################################################################### ##### Fonction cristal #################################################################### def crist(): global axesA,axeshA,axesB,axeshB,D,Dstar,V a=eval(a_entry.get()) b=eval(b_entry.get()) c=eval(c_entry.get()) alp=eval(alp_entry.get()) bet=eval(bet_entry.get()) gam=eval(gam_entry.get()) e=eval(e_entry.get()) d2=eval(d_label_var.get()) alp=alp*np.pi/180; bet=bet*np.pi/180; gam=gam*np.pi/180; V=a*b*c*np.sqrt(1-(np.cos(alp)**2)-(np.cos(bet))**2-(np.cos(gam))**2+2*b*c*np.cos(alp)*np.cos(bet)*np.cos(gam)) D=np.array([[a,b*np.cos(gam),c*np.cos(bet)],[0,b*np.sin(gam), c*(np.cos(alp)-np.cos(bet)*np.cos(gam))/np.sin(gam)],[0,0,V/(a*b*np.sin(gam))]]) Dstar=np.transpose(np.linalg.inv(D)) G=np.array([[a**2,a*b*np.cos(gam),a*c*np.cos(bet)],[a*b*np.cos(gam),b**2,b*c*np.cos(alp)],[a*c*np.cos(bet),b*c*np.cos(alp),c**2]]) axes=np.zeros(((2*e+1)**3-1,3)) axesh=np.zeros(((2*e+1)**3-1,3)) id=0 for i in range(-e,e+1): for j in range(-e,e+1): for k in range(-e,e+1): if (i,j,k)!=(0,0,0): d=1/(np.sqrt(np.dot(np.array([i,j,k]),np.dot(np.linalg.inv(G),np.array([i,j,k]))))) if d>d2*0.1*np.amax([a,b,c]): if var_uvw.get()==0: axesh[id,:]=np.dot(Dstar,np.array([i,j,k],float)) axes[id,:]=np.array([i,j,k],float) else: axesh[id,:]=np.dot(D,np.array([i,j,k],float)) axes[id,:]=np.array([i,j,k],float) id=id+1 axesA=axes axesB=axes axeshA=axesh axeshB=axesh return axesA,axeshA,axesB,axeshB,D,Dstar,V def dm(): global dmip a=f.add_subplot(111) a.figure.clear() a=f.add_subplot(111) dmip=dmip-eval(d_entry.get()) d_label_var.set(dmip) crist() trace() return dmip def dp(): global dmip a=f.add_subplot(111) a.figure.clear() a=f.add_subplot(111) dmip=dmip+eval(d_entry.get()) d_label_var.set(dmip) crist() trace() return dmip #################################################################### ##### Fonction ajouter un pole #################################################################### def poleA(pole1,pole2,pole3): global MA,axesA,axeshA,Ta,V,D,Dstar fp=f.add_subplot(111) Gs=np.array([pole1,pole2,pole3],float) Pp=np.zeros((1,2),float) if var_uvw.get()==0: Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs)) else: Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs)) S=np.dot(MA,Gsh) if S[2]<0: S=-S Gsh=-Gsh pole1=-pole1 pole2=-pole2 pole3=-pole3 Pp=proj(S[0],S[1],S[2])*600/2 l=str(int(pole1))+str(int(pole2))+str(int(pole3)) fp.plot(Pp[0]+600/2,Pp[1]+600/2,'ro') fp.annotate(l,(Pp[0]+600/2,Pp[1]+600/2)) fp.axis([0,600,0,600]) fp.axis('off') fp.figure.canvas.draw() axesA=np.vstack((axesA,np.array([pole1,pole2,pole3]))) axesA=np.vstack((axesA,np.array([-pole1,-pole2,-pole3]))) Ta=np.vstack((Ta,np.array([S[0],S[1],S[2]]))) Ta=np.vstack((Ta,np.array([-S[0],-S[1],-S[2]]))) axeshA=np.vstack((axeshA,np.array([Gsh[0],Gsh[1],Gsh[2]]))) axeshA=np.vstack((axeshA,np.array([-Gsh[0],-Gsh[1],-Gsh[2]]))) return axesA,axeshA,Ta def poleB(pole1,pole2,pole3): global MB,axesB,axeshB,Tb,V,D,Dstar fp=f.add_subplot(111) Gs=np.array([pole1,pole2,pole3],float) Pp=np.zeros((1,2),float) if var_uvw.get()==0: Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs)) else: Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs)) S=np.dot(MB,Gsh) if S[2]<0: S=-S Gsh=-Gsh pole1=-pole1 pole2=-pole2 pole3=-pole3 Pp=proj(S[0],S[1],S[2])*600/2 l=str(int(pole1))+str(int(pole2))+str(int(pole3)) fp.plot(Pp[0]+600/2,Pp[1]+600/2,'ro') fp.annotate(l,(Pp[0]+600/2,Pp[1]+600/2)) fp.axis([0,600,0,600]) fp.axis('off') fp.figure.canvas.draw() axesB=np.vstack((axesB,np.array([pole1,pole2,pole3]))) axesB=np.vstack((axesB,np.array([-pole1,-pole2,-pole3]))) Tb=np.vstack((Tb,np.array([S[0],S[1],S[2]]))) Tb=np.vstack((Tb,np.array([-S[0],-S[1],-S[2]]))) axeshB=np.vstack((axeshB,np.array([Gsh[0],Gsh[1],Gsh[2]]))) axeshB=np.vstack((axeshB,np.array([-Gsh[0],-Gsh[1],-Gsh[2]]))) return axesB,axeshB,Tb def addpoleA_sym(): pole1A=eval(pole1A_entry.get()) pole2A=eval(pole2A_entry.get()) pole3A=eval(pole3A_entry.get()) poleA(pole1A,pole2A,pole3A) poleA(pole1A,pole2A,-pole3A) poleA(pole1A,-pole2A,pole3A) poleA(-pole1A,pole2A,pole3A) poleA(pole2A,pole1A,pole3A) poleA(pole2A,pole1A,-pole3A) poleA(pole2A,-pole1A,pole3A) poleA(-pole2A,pole1A,pole3A) poleA(pole2A,pole3A,pole1A) poleA(pole2A,pole3A,-pole1A) poleA(pole2A,-pole3A,pole1A) poleA(-pole2A,pole3A,pole1A) poleA(pole1A,pole3A,pole2A) poleA(pole1A,pole3A,-pole2A) poleA(pole1A,-pole3A,pole2A) poleA(-pole1A,pole3A,pole2A) poleA(pole3A,pole1A,pole2A) poleA(pole3A,pole1A,-pole2A) poleA(pole3A,-pole1A,pole2A) poleA(-pole3A,pole1A,pole2A) poleA(pole3A,pole2A,pole1A) poleA(pole3A,pole2A,-pole1A) poleA(pole3A,-pole2A,pole1A) poleA(-pole3A,pole2A,pole1A) trace() def addpoleB_sym(): pole1B=eval(pole1B_entry.get()) pole2B=eval(pole2B_entry.get()) pole3B=eval(pole3B_entry.get()) poleB(pole1B,pole2B,pole3B) poleB(pole1B,pole2B,-pole3B) poleB(pole1B,-pole2B,pole3B) poleB(-pole1B,pole2B,pole3B) poleB(pole2B,pole1B,pole3B) poleB(pole2B,pole1B,-pole3B) poleB(pole2B,-pole1B,pole3B) poleB(-pole2B,pole1B,pole3B) poleB(pole2B,pole3B,pole1B) poleB(pole2B,pole3B,-pole1B) poleB(pole2B,-pole3B,pole1B) poleB(-pole2B,pole3B,pole1B) poleB(pole1B,pole3B,pole2B) poleB(pole1B,pole3B,-pole2B) poleB(pole1B,-pole3B,pole2B) poleB(-pole1B,pole3B,pole2B) poleB(pole3B,pole1B,pole2B) poleB(pole3B,pole1B,-pole2B) poleB(pole3B,-pole1B,pole2B) poleB(-pole3B,pole1B,pole2B) poleB(pole3B,pole2B,pole1B) poleB(pole3B,pole2B,-pole1B) poleB(pole3B,-pole2B,pole1B) poleB(-pole3B,pole2B,pole1B) trace() def addpoleA(): pole1A=eval(pole1A_entry.get()) pole2A=eval(pole2A_entry.get()) pole3A=eval(pole3A_entry.get()) poleA(pole1A,pole2A,pole3A) trace() def addpoleB(): pole1B=eval(pole1B_entry.get()) pole2B=eval(pole2B_entry.get()) pole3B=eval(pole3B_entry.get()) poleB(pole1B,pole2B,pole3B) trace() #################################################################### ##### Fonction tracer plan #################################################################### def trace_planA(): global MA,axes,axesh,Ta,V,D,Dstar f2=f.add_subplot(111) pole1A=eval(pole1A_entry.get()) pole2A=eval(pole2A_entry.get()) pole3A=eval(pole3A_entry.get()) Gs=np.array([pole1A,pole2A,pole3A],float) if var_uvw.get()==0: Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs)) else: Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs)) S=np.dot(MA,Gsh) if S[2]<0: S=-S Gsh=-Gsh pole1A=-pole1A pole2A=-pole2A pole3A=-pole3A r=np.sqrt(S[0]**2+S[1]**2+S[2]**2) A=np.zeros((2,100)) Q=np.zeros((1,2)) if S[2]==0: t=90 w=0 else: t=np.arctan2(S[1],S[0])*180/np.pi w=0 ph=np.arccos(S[2]/r)*180/np.pi for g in np.linspace(-np.pi,np.pi-0.00001,100): Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g),np.cos(g),0]))) A[:,w]=proj(Aa[0],Aa[1],Aa[2])*600/2 if A[0,w]<>75000: Q=np.vstack((Q,A[:,w])) w=w+1 Q=np.delete(Q,0,0) f2.plot(Q[:,0]+600/2,Q[:,1]+600/2,'r') f2.axis([0,600,0,600]) f2.axis('off') f2.figure.canvas.draw() trace() def trace_planB(): global MB,axes,axesh,Tb,V,D,Dstar f2=f.add_subplot(111) pole1B=eval(pole1B_entry.get()) pole2B=eval(pole2B_entry.get()) pole3B=eval(pole3B_entry.get()) Gs=np.array([pole1B,pole2B,pole3B],float) if var_uvw.get()==0: Gsh=np.dot(Dstar,Gs)/np.linalg.norm(np.dot(Dstar,Gs)) else: Gsh=np.dot(D,Gs)/np.linalg.norm(np.dot(D,Gs)) S=np.dot(MB,Gsh) if S[2]<0: S=-S Gsh=-Gsh pole1B=-pole1B pole2B=-pole2B pole3B=-pole3B r=np.sqrt(S[0]**2+S[1]**2+S[2]**2) A=np.zeros((2,100)) Q=np.zeros((1,2)) if S[2]==0: t=90 w=0 else: t=np.arctan2(S[1],S[0])*180/np.pi w=0 ph=np.arccos(S[2]/r)*180/np.pi for g in np.linspace(-np.pi,np.pi-0.00001,100): Aa=np.dot(Rot(t,0,0,1),np.dot(Rot(ph,0,1,0),np.array([np.sin(g),np.cos(g),0]))) A[:,w]=proj(Aa[0],Aa[1],Aa[2])*600/2 if A[0,w]<>75000: Q=np.vstack((Q,A[:,w])) w=w+1 Q=np.delete(Q,0,0) f2.plot(Q[:,0]+600/2,Q[:,1]+600/2,'r') f2.axis([0,600,0,600]) f2.axis('off') f2.figure.canvas.draw() trace() #################################################################### ##### Click a pole #################################################################### def click_a_pole(event): global MB,Dstar,D x=event.x y=event.y x=(x-411)*2/620 y=-(y-400)*2/620 X=2*x/(1+x**2+y**2) Y=2*y/(1+x**2+y**2) Z=(-1+x**2+y**2)/(1+x**2+y**2) if Z<0: X=-X Y=-Y A=np.dot(np.linalg.inv(MB),np.array([X,Y,Z])) n=0 L=np.zeros((3,16**3)) for i in range(-8,9,1): for j in range(-8,9,1): for k in range(-8,9,1): if np.linalg.norm([i,j,k])<>0: if var_uvw.get()==0: Q=np.dot(Dstar,np.array([i,j,k],float))/np.linalg.norm(np.dot(Dstar,np.array([i,j,k],float))) if np.abs(Q[0]-A[0])<0.05 and np.abs(Q[1]-A[1])<0.05 and np.abs(Q[2]-A[2])<0.05: L[:,n]=np.array([i,j,k],float) n=n+1 else: Q=np.dot(D,np.array([i,j,k],float))/np.linalg.norm(np.dot(D,np.array([i,j,k],float))) if np.abs(Q[0]-A[0])<0.05 and np.abs(Q[1]-A[1])<0.05 and np.abs(Q[2]-A[2])<0.05: L[:,n]=np.array([i,j,k],float) n=n+1 if np.linalg.norm(L[:,0])<>0: poleB(L[0,0],L[1,0],L[2,0]) trace() #################################################################### ##### Inclinaison-beta #################################################################### #################################################################### ##### Fonction desorientation #################################################################### def Rota(t,u,v,w,g): Ae=np.dot(g,np.array([u,v,w])) Re=Rot(t,Ae[0],Ae[1],Ae[2]) return Re def cryststruct(): global cs a=eval(a_entry.get()) b=eval(b_entry.get()) c=eval(c_entry.get()) alp=eval(alp_entry.get()) bet=eval(bet_entry.get()) gam=eval(gam_entry.get()) if gam==90 and alp==90 and bet==90 and a==b and b==c: cs=1 if gam==120 and alp==90 and bet==90: cs=2 if gam==90 and alp==90 and bet==90 and a==b and b<>c: cs=3 if alp<>90 and a==b and b==c: cs=4 if gam==90 and alp==90 and bet==90 and a<>b and b<>c: cs=5 if gam<>90 and alp==90 and bet==90 and a<>b and b<>c: cs=6 if gam<>90 and alp<>90 and bet<>90 and a<>b and b<>c: cs=7 return cs def Sy(g): global cs if cs==1: S1=Rota(90,1,0,0,g); S2=Rota(180,1,0,0,g); S3=Rota(270,1,0,0,g); S4=Rota(90,0,1,0,g); S5=Rota(180,0,1,0,g); S6=Rota(270,0,1,0,g); S7=Rota(90,0,0,1,g); S8=Rota(180,0,0,1,g); S9=Rota(270,0,0,1,g); S10=Rota(180,1,1,0,g); S11=Rota(180,1,0,1,g); S12=Rota(180,0,1,1,g); S13=Rota(180,-1,1,0,g); S14=Rota(180,-1,0,1,g); S15=Rota(180,0,-1,1,g); S16=Rota(120,1,1,1,g); S17=Rota(240,1,1,1,g); S18=Rota(120,-1,1,1,g); S19=Rota(240,-1,1,1,g); S20=Rota(120,1,-1,1,g); S21=Rota(240,1,-1,1,g); S22=Rota(120,1,1,-1,g); S23=Rota(240,1,1,-1,g); S24=np.eye(3,3); S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12,S13,S14,S15,S16,S17,S18,S19,S20,S21,S22,S23,S24)) if cs==2: S1=Rota(60,0,0,1,g); S2=Rota(120,0,0,1,g); S3=Rota(180,0,0,1,g); S4=Rota(240,0,0,1,g); S5=Rota(300,0,0,1,g); S6=np.eye(3,3); S7=Rota(180,0,0,1,g); S8=Rota(180,0,1,0,g); S9=Rota(180,1/2,np.sqrt(3)/2,0,g); S10=Rota(180,-1/2,np.sqrt(3)/2,0,g); S11=Rota(180,np.sqrt(3)/2,1/2,0,g); S12=Rota(180,-np.sqrt(3)/2,1/2,0,g); S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12)) if cs==3: S1=Rota(90,0,0,1,g); S2=Rota(180,0,0,1,g); S3=Rota(270,0,0,1,g); S4=Rota(180,0,1,0,g); S5=Rota(180,1,0,0,g); S6=Rota(180,1,1,0,g); S7=Rota(180,1,-1,0,g); S8=np.eye(3,3) S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8)) if cs==4: S1=Rota(60,0,0,1,g); S2=Rota(120,0,0,1,g); S3=Rota(180,0,0,1,g); S4=Rota(240,0,0,1,g); S5=Rota(300,0,0,1,g); S6=np.eye(3,3); S7=Rota(180,0,0,1,g); S8=Rota(180,0,1,0,g); S9=Rota(180,1/2,np.sqrt(3)/2,0,g); S10=Rota(180,-1/2,np.sqrt(3)/2,0,g); S11=Rota(180,np.sqrt(3)/2,1/2,0,g); S12=Rota(180,-np.sqrt(3)/2,1/2,0,g); S=np.vstack((S1,S2,S3,S4,S5,S6,S7,S8,S9,S10,S11,S12)) if cs==5: S1=Rota(180,0,0,1,g); S2=Rota(180,1,0,0,g); S3=Rota(180,0,1,0,g); S4=np.eye(3,3); S=np.vstack((S1,S2,S3,S4)) if cs==6: S1=Rota(180,0,1,0,g); S2=np.eye(3,3); S=np.vstack((S1,S2)) if cs==7: S=np.eye(3,3); return S def null(A, rcond=None): u, s, vh = np.linalg.svd(A, full_matrices=True) M, N = u.shape[0], vh.shape[1] if rcond is None: rcond = np.finfo(s.dtype).eps * max(M, N) tol = np.amax(s) * rcond num = np.sum(s > tol, dtype=int) Q = vh[num:,:].T.conj() return Q def desorientation(): global D0,S,D1,cs,V,Qp a = f.add_subplot(111) a.figure.clear() a = f.add_subplot(111) fn = os.path.join(os.path.dirname(__file__), 'stereo.png') img=np.array(Image.open(fn)) cryststruct() phi1a=eval(phi1A_entry.get()) phia=eval(phiA_entry.get()) phi2a=eval(phi2A_entry.get()) phi1b=eval(phi1B_entry.get()) phib=eval(phiB_entry.get()) phi2b=eval(phi2B_entry.get()) gA=rotation(phi1a,phia,phi2a) gB=rotation(phi1b,phib,phi2b) k=0 S=Sy(gA) D0=np.zeros((int(np.shape(S)[0]/3),5)) D1=np.zeros((int(np.shape(S)[0]/3),3)) Qp=np.zeros((int(np.shape(S)[0]/3),2)) for i in range(0,np.shape(S)[0],3): In=np.dot(np.array([[S[i,0],S[i+1,0],S[i+2,0]],[S[i,1],S[i+1,1],S[i+2,1]],[S[i,2],S[i+1,2],S[i+2,2]]]),gA) Ing=np.dot(In,np.array([0,0,1])) In2=np.dot(Rot(-phi2b,Ing[0],Ing[1],Ing[2]),In) Ing2=np.dot(In2,np.array([1,0,0])) In3=np.dot(Rot(-phib,Ing2[0],Ing2[1],Ing2[2]),In2) Ing3=np.dot(In3,np.array([0,0,1])) A=np.dot(Rot(-phi1b,Ing3[0],Ing3[1],Ing3[2]),In3)-np.eye(3) V=null(A,0.001).T if 0.5*(np.trace(A+np.eye(3))-1)>1: D0[k,3]=0 elif 0.5*(np.trace(A+np.eye(3))-1)<-1: D0[k,3]=180 else: D0[k,3]=np.arccos(0.5*(np.trace(A+np.eye(3))-1))*180/np.pi if np.abs(D0[k,3])<1e-5: D0[k,0]=0 D0[k,1]=0 D0[k,2]=0 else: D0[k,0]=V[0,0]/np.linalg.norm(V) D0[k,1]=V[0,1]/np.linalg.norm(V) D0[k,2]=V[0,2]/np.linalg.norm(V) Ds1=np.dot(np.linalg.inv(gB),np.array([D0[k,0],D0[k,1],D0[k,2]])) F0=Fraction(Ds1[0]).limit_denominator(10) F1=Fraction(Ds1[1]).limit_denominator(10) F2=Fraction(Ds1[2]).limit_denominator(10) D1[k,0]=F0.numerator*F1.denominator*F2.denominator D1[k,1]=F1.numerator*F0.denominator*F2.denominator D1[k,2]=F2.numerator*F0.denominator*F1.denominator if D0[k,2]<0: D0[k,0]=-D0[k,0] D0[k,1]=-D0[k,1] D0[k,2]=-D0[k,2] D1[k,0]=-D1[k,0] D1[k,1]=-D1[k,1] D1[k,2]=-D1[k,2] D0[k,4]=k Qp[k,:]=proj(D0[k,0],D0[k,1],D0[k,2])*600/2 k=k+1 a.plot(Qp[:,0]+600/2,Qp[:,1]+600/2,'ro') a.axis([0,600,0,600]) a.imshow(img,interpolation="bicubic") a.axis('off') a.figure.canvas.draw() trace() return Qp,S,D1 #################################################################### ##### Fonction principale #################################################################### def trace(): global Ta,Tb,axesA,axeshA,MA,axesB,axeshB,MB,Qp,S,D1,show_ind,D0 a = f.add_subplot(111) fn = os.path.join(os.path.dirname(__file__), 'stereo.png') img=np.array(Image.open(fn)) Pa=np.zeros((np.shape(axesA)[0],2)) Pb=np.zeros((np.shape(axesB)[0],2)) for i in range(0,np.shape(axesA)[0]): axeshA[i,:]=axeshA[i,:]/np.linalg.norm(axeshA[i,:]) Ta[i,:]=np.dot(MA,axeshA[i,:]) Pa[i,:]=proj(Ta[i,0],Ta[i,1],Ta[i,2])*600/2 if show_ind.get()==1: m=np.amax([np.abs(axesA[i,0]),np.abs(axesA[i,1]),np.abs(axesA[i,2])]) if (np.around(axesA[i,0]/m)==axesA[i,0]/m) & (np.around(axesA[i,1]/m)==axesA[i,1]/m) & (np.around(axesA[i,2]/m)==axesA[i,2]/m): sA=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m)) else: sA=str(int(axesA[i,0]))+str(int(axesA[i,1]))+str(int(axesA[i,2])) a.annotate(sA,(Pa[i,0]+600/2,Pa[i,1]+600/2)) for i in range(0,np.shape(axesB)[0]): axeshB[i,:]=axeshB[i,:]/np.linalg.norm(axeshB[i,:]) Tb[i,:]=np.dot(MB,axeshB[i,:]) Pb[i,:]=proj(Tb[i,0],Tb[i,1],Tb[i,2])*600/2 if show_ind.get()==1: m=np.amax([np.abs(axesB[i,0]),np.abs(axesB[i,1]),np.abs(axesB[i,2])]) if (np.around(axesB[i,0]/m)==axesB[i,0]/m) & (np.around(axesB[i,1]/m)==axesB[i,1]/m) & (np.around(axesB[i,2]/m)==axesB[i,2]/m): sB=str(int(axesB[i,0]/m))+str(int(axesB[i,1]/m))+str(int(axesB[i,2]/m)) else: sB=str(int(axesB[i,0]))+str(int(axesB[i,1]))+str(int(axesB[i,2])) a.annotate(sB,(Pb[i,0]+600/2,Pb[i,1]+600/2)) for l in range(0,int(np.shape(S)[0]/3)): if show_angle.get()==1: sangle=str(np.round(D0[l,3],decimals=1)) a.annotate(sangle,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=8) if show_axe.get()==1: saxe=str(int(D1[l,0]))+','+str(int(D1[l,1]))+','+str(int(D1[l,2])) a.annotate(saxe,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=8) if show_num.get()==1: snum=str(int(D0[l,4])) a.annotate(snum,(Qp[l,0]+600/2,Qp[l,1]+600/2),size=10) a.plot(Pa[:,0]+600/2,Pa[:,1]+600/2,'bo') a.plot(Pb[:,0]+600/2,Pb[:,1]+600/2,'go') a.plot(Qp[:,0]+600/2,Qp[:,1]+600/2,'ro') a.axis([0,600,0,600]) a.imshow(img,interpolation="bicubic") a.axis('off') a.figure.canvas.draw() def princ(): global Ta,Tb,MA,MB a = f.add_subplot(111) a.figure.clear() a = f.add_subplot(111) phi1a=eval(phi1A_entry.get()) phia=eval(phiA_entry.get()) phi2a=eval(phi2A_entry.get()) phi1b=eval(phi1B_entry.get()) phib=eval(phiB_entry.get()) phi2b=eval(phi2B_entry.get()) fn = os.path.join(os.path.dirname(__file__), 'stereo.png') img=np.array(Image.open(fn)) crist() Pa=np.zeros((np.shape(axesA)[0],2)) Ta=np.zeros((np.shape(axesA))) Pb=np.zeros((np.shape(axesB)[0],2)) Tb=np.zeros((np.shape(axesB))) for i in range(0,np.shape(axesA)[0]): axeshA[i,:]=axeshA[i,:]/np.linalg.norm(axeshA[i,:]) Ta[i,:]=np.dot(rotation(phi1a,phia,phi2a),axeshA[i,:]) Pa[i,:]=proj(Ta[i,0],Ta[i,1],Ta[i,2])*600/2 m=np.amax([np.abs(axesA[i,0]),np.abs(axesA[i,1]),np.abs(axesA[i,2])]) if (np.around(axesA[i,0]/m)==axesA[i,0]/m) & (np.around(axesA[i,1]/m)==axesA[i,1]/m) & (np.around(axesA[i,2]/m)==axesA[i,2]/m): sA=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m)) else: sA=str(int(axesA[i,0]))+str(int(axesA[i,1]))+str(int(axesA[i,2])) a.annotate(sA,(Pa[i,0]+600/2,Pa[i,1]+600/2)) for i in range(0,np.shape(axesB)[0]): axeshB[i,:]=axeshB[i,:]/np.linalg.norm(axeshB[i,:]) Tb[i,:]=np.dot(rotation(phi1b,phib,phi2b),axeshB[i,:]) Pb[i,:]=proj(Tb[i,0],Tb[i,1],Tb[i,2])*600/2 m=np.amax([np.abs(axesB[i,0]),np.abs(axesB[i,1]),np.abs(axesB[i,2])]) if (np.around(axesB[i,0]/m)==axesB[i,0]/m) & (np.around(axesB[i,1]/m)==axesB[i,1]/m) & (np.around(axesB[i,2]/m)==axesB[i,2]/m): sB=str(int(axesA[i,0]/m))+str(int(axesA[i,1]/m))+str(int(axesA[i,2]/m)) else: sB=str(int(axesB[i,0]))+str(int(axesB[i,1]))+str(int(axesB[i,2])) a.annotate(sB,(Pb[i,0]+600/2,Pb[i,1]+600/2)) a.plot(Pa[:,0]+600/2,Pa[:,1]+600/2,'bo') a.plot(Pb[:,0]+600/2,Pb[:,1]+600/2,'go') a.axis([0,600,0,600]) a.imshow(img,interpolation="bicubic") a.axis('off') a.figure.canvas.draw() MA=rotation(phi1a,phia,phi2a) MB=rotation(phi1b,phib,phi2b) return Ta,MA,MB,Tb ###################################################################### # GUI ###################################################################### def file_save(): global D1,D0,D fout = asksaveasfile(mode='w', defaultextension=".txt") for i in range(np.shape(D1)[0]): text2save = str(int(D0[i,4]))+'\t'+'['+str(int(D1[i,0]))+','+str(int(D1[i,1]))+','+str(int(D1[i,2]))+']'+'\t '+str(np.around(D0[i,3],decimals=2)) fout.write("%s\n" % text2save) fout.close() def image_save(): s = asksaveasfile(mode='w', defaultextension=".jpg") if s: f.savefig(s.name) #s.close() #################################################### #fonction d'initialisation ################################################## def init(): global var_uvw,D1,S,Qp,show_ind,show_angle,show_axe,show_num,dmip,d_label_var fn = os.path.join(os.path.dirname(__file__), 'stereo.png') img=np.array(Image.open(fn)) a = f.add_subplot(111) a.axis('off') a.imshow(img,interpolation="bicubic") a.figure.canvas.draw() S=np.zeros((1,5)) Qp=np.zeros((1,2)) D1=np.zeros((1,5)) var_uvw=IntVar() show_ind=IntVar() show_angle=IntVar() show_axe=IntVar() show_num=IntVar() d_label_var=StringVar() d_label_var.set(0) dmip=0 return var_uvw,show_ind,show_angle,show_axe,show_num ############################################################## # fonction pour quitter ####################################################### def _quit(): root.quit() # stops mainloop root.destroy() # this is necessary on Windows to prevent # Fatal Python Error: PyEval_RestoreThread: NULL tstate ############################################################# root = Tk() root.wm_title("Misorientation") root.geometry('1220x798+10+40') root.configure(bg = '#BDBDBD') #root.resizable(0,0) #s=ttk.Style() #s.theme_use('clam') style = ttk.Style() theme = style.theme_use() default = style.lookup(theme, 'background') ################################################ # Creation d'une zone pour tracer des graphiques ################################################ f = Figure(facecolor='white',figsize=[2,2],dpi=100) canvas = FigureCanvasTkAgg(f, master=root) canvas.get_tk_widget().place(x=0,y=0,height=800,width=800) canvas._tkcanvas.bind('<Button-3>', click_a_pole) canvas.show() toolbar = NavigationToolbar2TkAgg( canvas, root ) toolbar.zoom('off') toolbar.update() ################################################### init() #import _imaging #print _imaging.__file__ ############################################## # Boutons ############################################## phi1A_entry = Entry (master=root) phi1A_entry.place(relx=0.72,rely=0.5,relheight=0.03,relwidth=0.07) phi1A_entry.configure(background="white") phi1A_entry.configure(foreground="black") phi1A_entry.configure(highlightbackground="#e0e0dfdfe3e3") phi1A_entry.configure(highlightcolor="#000000") phi1A_entry.configure(insertbackground="#000000") phi1A_entry.configure(selectbackground="#c4c4c4") phi1A_entry.configure(selectforeground="black") phiA_entry = Entry (master=root) phiA_entry.place(relx=0.72,rely=0.55,relheight=0.03,relwidth=0.07) phiA_entry.configure(background="white") phiA_entry.configure(foreground="black") phiA_entry.configure(highlightcolor="black") phiA_entry.configure(insertbackground="black") phiA_entry.configure(selectbackground="#c4c4c4") phiA_entry.configure(selectforeground="black") label_euler = Label (master=root) label_euler.place(relx=0.77,rely=0.42,height=46,width=163) label_euler.configure(activebackground="#cccccc") label_euler.configure(activeforeground="black") label_euler.configure(cursor="fleur") label_euler.configure(foreground="black") label_euler.configure(highlightcolor="black") label_euler.configure(text='''Euler angles \n A blue , B green''') phi2A_entry = Entry (master=root) phi2A_entry.place(relx=0.72,rely=0.6,relheight=0.03,relwidth=0.07) phi2A_entry.configure(background="white") phi2A_entry.configure(foreground="black") phi2A_entry.configure(highlightcolor="black") phi2A_entry.configure(insertbackground="black") phi2A_entry.configure(selectbackground="#c4c4c4") phi2A_entry.configure(selectforeground="black") button_trace = Button (master=root) button_trace.place(relx=0.7,rely=0.66,height=21,width=49) button_trace.configure(activebackground="#f9f9f9") button_trace.configure(activeforeground="black") button_trace.configure(background="#ff0000") button_trace.configure(command=princ) button_trace.configure(foreground="black") button_trace.configure(highlightcolor="black") button_trace.configure(pady="0") button_trace.configure(text='''PLOT''') Phi1A_label = Label (master=root) Phi1A_label.place(relx=0.67,rely=0.5,height=19,width=50) Phi1A_label.configure(activebackground="#cccccc") Phi1A_label.configure(activeforeground="black") Phi1A_label.configure(foreground="black") Phi1A_label.configure(highlightcolor="black") Phi1A_label.configure(text='''Phi1A''') PhiA_label = Label (master=root) PhiA_label.place(relx=0.67,rely=0.55,height=19,width=50) PhiA_label.configure(activebackground="#cccccc") PhiA_label.configure(activeforeground="black") PhiA_label.configure(foreground="black") PhiA_label.configure(highlightcolor="black") PhiA_label.configure(text='''PhiA''') Phi2A_label = Label (master=root) Phi2A_label.place(relx=0.67,rely=0.6,height=19,width=50) Phi2A_label.configure(activebackground="#cccccc") Phi2A_label.configure(activeforeground="black") Phi2A_label.configure(foreground="black") Phi2A_label.configure(highlightcolor="black") Phi2A_label.configure(text='''Phi2A''') phi1B_entry = Entry (master=root) phi1B_entry.place(relx=0.86,rely=0.5,relheight=0.03,relwidth=0.07) phi1B_entry.configure(background="white") phi1B_entry.configure(foreground="black") phi1B_entry.configure(highlightbackground="#e0e0dfdfe3e3") phi1B_entry.configure(highlightcolor="#000000") phi1B_entry.configure(insertbackground="#000000") phi1B_entry.configure(selectbackground="#c4c4c4") phi1B_entry.configure(selectforeground="black") Phi1B = Label (master=root) Phi1B.place(relx=0.81,rely=0.5,height=19,width=50) Phi1B.configure(activebackground="#cccccc") Phi1B.configure(activeforeground="black") Phi1B.configure(foreground="black") Phi1B.configure(highlightcolor="black") Phi1B.configure(text='''Phi1B''') PhiB_label1 = Label (master=root) PhiB_label1.place(relx=0.81,rely=0.55,height=19,width=50) PhiB_label1.configure(activebackground="#cccccc") PhiB_label1.configure(activeforeground="black") PhiB_label1.configure(foreground="black") PhiB_label1.configure(highlightcolor="black") PhiB_label1.configure(text='''PhiB''') Phi2B_label2 = Label (master=root) Phi2B_label2.place(relx=0.81,rely=0.6,height=19,width=50) Phi2B_label2.configure(activebackground="#cccccc") Phi2B_label2.configure(activeforeground="black") Phi2B_label2.configure(foreground="black") Phi2B_label2.configure(highlightcolor="black") Phi2B_label2.configure(text='''Phi2B''') phiB_entry = Entry (master=root) phiB_entry.place(relx=0.86,rely=0.55,relheight=0.03,relwidth=0.07) phiB_entry.configure(background="white") phiB_entry.configure(foreground="black") phiB_entry.configure(highlightbackground="#e0e0dfdfe3e3") phiB_entry.configure(highlightcolor="#000000") phiB_entry.configure(insertbackground="#000000") phiB_entry.configure(selectbackground="#c4c4c4") phiB_entry.configure(selectforeground="black") phi2B_entry = Entry (master=root) phi2B_entry.place(relx=0.86,rely=0.6,relheight=0.03,relwidth=0.07) phi2B_entry.configure(background="white") phi2B_entry.configure(foreground="black") phi2B_entry.configure(highlightbackground="#e0e0dfdfe3e3") phi2B_entry.configure(highlightcolor="#000000") phi2B_entry.configure(insertbackground="#000000") phi2B_entry.configure(selectbackground="#c4c4c4") phi2B_entry.configure(selectforeground="black") button_desorientation = Button (master=root) button_desorientation.place(relx=0.81,rely=0.66,height=21,width=124) button_desorientation.configure(activebackground="#f9f9f9") button_desorientation.configure(activeforeground="black") button_desorientation.configure(background="#00ff00") button_desorientation.configure(command=desorientation) button_desorientation.configure(foreground="black") button_desorientation.configure(highlightcolor="black") button_desorientation.configure(pady="0") button_desorientation.configure(text='''MISORIENTATION''') Cristal_label = Label (master=root) Cristal_label.place(relx=0.66,rely=0.03,height=19,width=142) Cristal_label.configure(text='''Crystal Parameters''') a_cristal_label = Label (master=root) a_cristal_label.place(relx=0.68,rely=0.06,height=19,width=12) a_cristal_label.configure(text='''a''') b_cristal_label = Label (master=root) b_cristal_label.place(relx=0.68,rely=0.1,height=19,width=12) b_cristal_label.configure(activebackground="#f9f9f9") b_cristal_label.configure(activeforeground="black") b_cristal_label.configure(foreground="black") b_cristal_label.configure(highlightcolor="black") b_cristal_label.configure(text='''b''') c_cristal_label = Label (master=root) c_cristal_label.place(relx=0.68,rely=0.14,height=19,width=11) c_cristal_label.configure(activebackground="#f9f9f9") c_cristal_label.configure(activeforeground="black") c_cristal_label.configure(foreground="black") c_cristal_label.configure(highlightcolor="black") c_cristal_label.configure(text='''c''') alp_cristal_label = Label (master=root) alp_cristal_label.place(relx=0.67,rely=0.18,height=19,width=42) alp_cristal_label.configure(activebackground="#f9f9f9") alp_cristal_label.configure(activeforeground="black") alp_cristal_label.configure(foreground="black") alp_cristal_label.configure(highlightcolor="black") alp_cristal_label.configure(text='''alpha''') bet_cristal_label = Label (master=root) bet_cristal_label.place(relx=0.67,rely=0.22,height=19,width=42) bet_cristal_label.configure(activebackground="#f9f9f9") bet_cristal_label.configure(activeforeground="black") bet_cristal_label.configure(foreground="black") bet_cristal_label.configure(highlightcolor="black") bet_cristal_label.configure(text='''beta''') gam_cristal_label = Label (master=root) gam_cristal_label.place(relx=0.66,rely=0.26,height=19,width=52) gam_cristal_label.configure(activebackground="#f9f9f9") gam_cristal_label.configure(activeforeground="black") gam_cristal_label.configure(foreground="black") gam_cristal_label.configure(highlightcolor="black") gam_cristal_label.configure(text='''gamma''') a_entry = Entry (master=root) a_entry.place(relx=0.7,rely=0.06,relheight=0.03,relwidth=0.06) a_entry.configure(background="white") a_entry.configure(insertbackground="black") b_entry = Entry (master=root) b_entry.place(relx=0.7,rely=0.1,relheight=0.03,relwidth=0.06) b_entry.configure(background="white") b_entry.configure(foreground="black") b_entry.configure(highlightcolor="black") b_entry.configure(insertbackground="black") b_entry.configure(selectbackground="#c4c4c4") b_entry.configure(selectforeground="black") c_entry = Entry (master=root) c_entry.place(relx=0.7,rely=0.14,relheight=0.03,relwidth=0.06) c_entry.configure(background="white") c_entry.configure(foreground="black") c_entry.configure(highlightcolor="black") c_entry.configure(insertbackground="black") c_entry.configure(selectbackground="#c4c4c4") c_entry.configure(selectforeground="black") alp_entry = Entry (master=root) alp_entry.place(relx=0.71,rely=0.18,relheight=0.03,relwidth=0.06) alp_entry.configure(background="white") alp_entry.configure(foreground="black") alp_entry.configure(highlightcolor="black") alp_entry.configure(insertbackground="black") alp_entry.configure(selectbackground="#c4c4c4") alp_entry.configure(selectforeground="black") bet_entry = Entry (master=root) bet_entry.place(relx=0.71,rely=0.22,relheight=0.03,relwidth=0.06) bet_entry.configure(background="white") bet_entry.configure(foreground="black") bet_entry.configure(highlightcolor="black") bet_entry.configure(insertbackground="black") bet_entry.configure(selectbackground="#c4c4c4") bet_entry.configure(selectforeground="black") gam_entry = Entry (master=root) gam_entry.place(relx=0.71,rely=0.26,relheight=0.03,relwidth=0.06) gam_entry.configure(background="white") gam_entry.configure(foreground="black") gam_entry.configure(highlightcolor="black") gam_entry.configure(insertbackground="black") gam_entry.configure(selectbackground="#c4c4c4") gam_entry.configure(selectforeground="black") uvw_button = Checkbutton (master=root) uvw_button.place(relx=0.75,rely=0.66,relheight=0.03,relwidth=0.04) uvw_button.configure(text='''uvw''') uvw_button.configure(variable=var_uvw) e_label = Label (master=root) e_label.place(relx=0.66,rely=0.31,height=19,width=86) e_label.configure(text='''Max indices''') e_entry = Entry (master=root) e_entry.place(relx=0.74,rely=0.31,relheight=0.03,relwidth=0.05) e_entry.configure(background="white") e_entry.configure(insertbackground="black") e2_label = Label (master=root) e2_label.place(relx=0.68,rely=0.36,height=19,width=12) e2_label.configure(text='''d''') dm_button = Button (master=root) dm_button.place(relx=0.7,rely=0.36,height=21,width=13) dm_button.configure(activebackground="#f9f9f9") dm_button.configure(activeforeground="black") dm_button.configure(command=dm) dm_button.configure(foreground="black") dm_button.configure(highlightcolor="black") dm_button.configure(pady="0") dm_button.configure(text='''-''') d_entry = Entry (master=root) d_entry.place(relx=0.72,rely=0.36,relheight=0.02,relwidth=0.04) d_entry.configure(background="white") d_entry.configure(foreground="black") d_entry.configure(highlightcolor="black") d_entry.configure(insertbackground="black") d_entry.configure(selectbackground="#c4c4c4") d_entry.configure(selectforeground="black") dp_button = Button (master=root) dp_button.place(relx=0.76,rely=0.36,height=21,width=17) dp_button.configure(activebackground="#f9f9f9") dp_button.configure(activeforeground="black") dp_button.configure(command=dp) dp_button.configure(foreground="black") dp_button.configure(highlightcolor="black") dp_button.configure(pady="0") dp_button.configure(text='''+''') d_label = Label (master=root) d_label.place(relx=0.73,rely=0.39,height=19,width=16) d_label.configure(textvariable=d_label_var) label_addpoleA = Label (master=root) label_addpoleA.place(relx=0.81,rely=0.03,height=19,width=90) label_addpoleA.configure(activebackground="#cccccc") label_addpoleA.configure(activeforeground="black") label_addpoleA.configure(foreground="black") label_addpoleA.configure(highlightcolor="black") label_addpoleA.configure(text='''Add pole A''') pole1A_entry = Entry (master=root) pole1A_entry.place(relx=0.81,rely=0.06,relheight=0.02 ,relwidth=0.04) pole1A_entry.configure(background="white") pole1A_entry.configure(foreground="black") pole1A_entry.configure(highlightcolor="black") pole1A_entry.configure(insertbackground="black") pole1A_entry.configure(selectbackground="#c4c4c4") pole1A_entry.configure(selectforeground="black") pole2A_entry = Entry (master=root) pole2A_entry.place(relx=0.87,rely=0.06,relheight=0.02 ,relwidth=0.04) pole2A_entry.configure(background="white") pole2A_entry.configure(foreground="black") pole2A_entry.configure(highlightcolor="black") pole2A_entry.configure(insertbackground="black") pole2A_entry.configure(selectbackground="#c4c4c4") pole2A_entry.configure(selectforeground="black") pole3A_entry = Entry (master=root) pole3A_entry.place(relx=0.93,rely=0.06,relheight=0.02 ,relwidth=0.04) pole3A_entry.configure(background="white") pole3A_entry.configure(foreground="black") pole3A_entry.configure(highlightcolor="black") pole3A_entry.configure(insertbackground="black") pole3A_entry.configure(selectbackground="#c4c4c4") pole3A_entry.configure(selectforeground="black") addpoleA_button = Button (master=root) addpoleA_button.place(relx=0.81,rely=0.11,height=31,width=57) addpoleA_button.configure(activebackground="#f9f9f9") addpoleA_button.configure(activeforeground="black") addpoleA_button.configure(command=addpoleA) addpoleA_button.configure(foreground="black") addpoleA_button.configure(highlightcolor="black") addpoleA_button.configure(pady="0") addpoleA_button.configure(text='''Add''') symA_button = Button (master=root) symA_button.place(relx=0.87,rely=0.11,height=31,width=71) symA_button.configure(command=addpoleA_sym) symA_button.configure(pady="0") symA_button.configure(text='''Symetry''') trace_planA_button = Button (master=root) trace_planA_button.place(relx=0.93,rely=0.11,height=31,width=81) trace_planA_button.configure(command=trace_planA) trace_planA_button.configure(pady="0") trace_planA_button.configure(text='''Draw plane''') label_addpoleB = Label (master=root) label_addpoleB.place(relx=0.81,rely=0.2,height=19,width=90) label_addpoleB.configure(activebackground="#cccccc") label_addpoleB.configure(activeforeground="black") label_addpoleB.configure(foreground="black") label_addpoleB.configure(highlightcolor="black") label_addpoleB.configure(text='''Add pole B''') pole1B_entry = Entry (master=root) pole1B_entry.place(relx=0.81,rely=0.24,relheight=0.02 ,relwidth=0.04) pole1B_entry.configure(background="white") pole1B_entry.configure(foreground="black") pole1B_entry.configure(highlightcolor="black") pole1B_entry.configure(insertbackground="black") pole1B_entry.configure(selectbackground="#c4c4c4") pole1B_entry.configure(selectforeground="black") pole2B_entry = Entry (master=root) pole2B_entry.place(relx=0.87,rely=0.24,relheight=0.02 ,relwidth=0.04) pole2B_entry.configure(background="white") pole2B_entry.configure(foreground="black") pole2B_entry.configure(highlightcolor="black") pole2B_entry.configure(insertbackground="black") pole2B_entry.configure(selectbackground="#c4c4c4") pole2B_entry.configure(selectforeground="black") pole3B_entry = Entry (master=root) pole3B_entry.place(relx=0.93,rely=0.24,relheight=0.02 ,relwidth=0.04) pole3B_entry.configure(background="white") pole3B_entry.configure(foreground="black") pole3B_entry.configure(highlightcolor="black") pole3B_entry.configure(insertbackground="black") pole3B_entry.configure(selectbackground="#c4c4c4") pole3B_entry.configure(selectforeground="black") addpoleB_button = Button (master=root) addpoleB_button.place(relx=0.81,rely=0.28,height=31,width=55) addpoleB_button.configure(activebackground="#f9f9f9") addpoleB_button.configure(activeforeground="black") addpoleB_button.configure(command=addpoleB) addpoleB_button.configure(foreground="black") addpoleB_button.configure(highlightcolor="black") addpoleB_button.configure(pady="0") addpoleB_button.configure(text='''Add''') symB_button = Button (master=root) symB_button.place(relx=0.87,rely=0.28,height=31,width=71) symB_button.configure(command=addpoleB_sym) symB_button.configure(pady="0") symB_button.configure(text='''Symetry''') trace_planB_button = Button (master=root) trace_planB_button.place(relx=0.93,rely=0.28,height=31,width=81) trace_planB_button.configure(command=trace_planB) trace_planB_button.configure(pady="0") trace_planB_button.configure(text='''Draw plane''') show_ind_button = Checkbutton (master=root) show_ind_button.place(relx=0.81,rely=0.7,relheight=0.03 ,relwidth=0.11) show_ind_button.configure(text='''Show indices''') show_ind_button.configure(variable=show_ind) show_angle_button = Checkbutton (master=root) show_angle_button.place(relx=0.81,rely=0.74,relheight=0.03 ,relwidth=0.11) show_angle_button.configure(text='''Show angle''') show_angle_button.configure(variable=show_angle) show_axe_button = Checkbutton (master=root) show_axe_button.place(relx=0.81,rely=0.78,relheight=0.03 ,relwidth=0.11) show_axe_button.configure(text='''Show axes''') show_axe_button.configure(variable=show_axe) show_num_button = Checkbutton (master=root) show_num_button.place(relx=0.81,rely=0.82,relheight=0.03 ,relwidth=0.11) show_num_button.configure(text='''Show numbers''') show_num_button.configure(variable=show_num) menu = Menu(master=root) filemenu = Menu(menu, tearoff=0) menu.add_cascade(label="Save", menu=filemenu) root.config(menu=menu) filemenu.add_command(label="Save data", command=file_save) filemenu.add_command(label="Save figure", command=image_save) ###################################################################################################### ######## importer des structures cristallines depuis un fichier Nom,a,b,c,alpha,beta,gamma,space group ###################################################################################################### def structure(i0): global x0 a_entry.delete(0,END) a_entry.insert(1,eval(x0[i0][1])) b_entry.delete(0,END) b_entry.insert(1,eval(x0[i0][2])) c_entry.delete(0,END) c_entry.insert(1,eval(x0[i0][3])) alp_entry.delete(0,END) alp_entry.insert(1,eval(x0[i0][4])) bet_entry.delete(0,END) bet_entry.insert(1,eval(x0[i0][5])) gam_entry.delete(0,END) gam_entry.insert(1,eval(x0[i0][6])) def createstructure(i): return lambda:structure(i) cristalmenu=Menu(menu,tearoff=0) menu.add_cascade(label="Structures", menu=cristalmenu) file_struct=open(os.path.join(os.path.dirname(__file__), 'structure.txt') ,"r") x0=[] i=0 for line in file_struct: x0.append(map(str, line.split())) cristalmenu.add_command(label=x0[i][0], command=createstructure(i)) i=i+1 file_struct.close() ####################################################################################################### phi1A_entry.insert(0,0) phiA_entry.insert(0,0) phi2A_entry.insert(0,0) phi1B_entry.insert(0,0) phiB_entry.insert(0,0) phi2B_entry.insert(0,0) e_entry.insert(1,1) d_entry.insert(1,1) mainloop()
gpl-2.0
swharden/SWHLab
doc/uses/EPSCs-and-IPSCs/smooth histogram method/05.py
1
1812
""" MOST OF THIS CODE IS NOT USED ITS COPY/PASTED AND LEFT HERE FOR CONVENIENCE """ import os import sys # in case our module isn't installed (running from this folder) if not os.path.abspath('../../../') in sys.path: sys.path.append('../../../') # helps spyder get docs import swhlab import swhlab.common as cm import matplotlib.pyplot as plt import numpy as np import warnings # suppress VisibleDeprecationWarning warning warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) def analyzeSweep(abf,plotToo=True,color=None,label=None): Y=abf.sweepYsmartbase()[abf.pointsPerSec*.5:] AV,SD=np.average(Y),np.std(Y) dev=5 # number of stdevs from the avg to set the range R1,R2=[(AV-SD)*dev,(AV+SD)*dev] nBins=1000 hist,bins=np.histogram(Y,bins=nBins,range=[R1,R2],density=True) histSmooth=abf.convolve(hist,cm.kernel_gaussian(nBins/5)) if plotToo: plt.plot(bins[1:],hist,'.',color=color,alpha=.2,ms=10) plt.plot(bins[1:],histSmooth,'-',color=color,lw=5,alpha=.5,label=label) return if __name__=="__main__": #abfFile=R"C:\Users\scott\Documents\important\demodata\abfs\16d07022.abf" abfFile=R"X:\Data\2P01\2016\2016-09-01 PIR TGOT\16d07022.abf" abf=swhlab.ABF(abfFile) # prepare figure plt.figure(figsize=(10,10)) plt.grid() plt.title("smart baseline value distribution") plt.xlabel(abf.units2) plt.ylabel("normalized density") # do the analysis abf.kernel=abf.kernel_gaussian(sizeMS=500) abf.setsweep(175) analyzeSweep(abf,color='b',label="baseline") abf.setsweep(200) analyzeSweep(abf,color='g',label="TGOT") abf.setsweep(375) analyzeSweep(abf,color='y',label="washout") # show figure plt.legend() plt.margins(0,.1) plt.show() print("DONE")
mit
chrisburr/scikit-learn
examples/covariance/plot_sparse_cov.py
300
5078
""" ====================================== Sparse inverse covariance estimation ====================================== Using the GraphLasso estimator to learn a covariance and sparse precision from a small number of samples. To estimate a probabilistic model (e.g. a Gaussian model), estimating the precision matrix, that is the inverse covariance matrix, is as important as estimating the covariance matrix. Indeed a Gaussian model is parametrized by the precision matrix. To be in favorable recovery conditions, we sample the data from a model with a sparse inverse covariance matrix. In addition, we ensure that the data is not too much correlated (limiting the largest coefficient of the precision matrix) and that there a no small coefficients in the precision matrix that cannot be recovered. In addition, with a small number of observations, it is easier to recover a correlation matrix rather than a covariance, thus we scale the time series. Here, the number of samples is slightly larger than the number of dimensions, thus the empirical covariance is still invertible. However, as the observations are strongly correlated, the empirical covariance matrix is ill-conditioned and as a result its inverse --the empirical precision matrix-- is very far from the ground truth. If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number of samples is small, we need to shrink a lot. As a result, the Ledoit-Wolf precision is fairly close to the ground truth precision, that is not far from being diagonal, but the off-diagonal structure is lost. The l1-penalized estimator can recover part of this off-diagonal structure. It learns a sparse precision. It is not able to recover the exact sparsity pattern: it detects too many non-zero coefficients. However, the highest non-zero coefficients of the l1 estimated correspond to the non-zero coefficients in the ground truth. Finally, the coefficients of the l1 precision estimate are biased toward zero: because of the penalty, they are all smaller than the corresponding ground truth value, as can be seen on the figure. Note that, the color range of the precision matrices is tweaked to improve readability of the figure. The full range of values of the empirical precision is not displayed. The alpha parameter of the GraphLasso setting the sparsity of the model is set by internal cross-validation in the GraphLassoCV. As can be seen on figure 2, the grid to compute the cross-validation score is iteratively refined in the neighborhood of the maximum. """ print(__doc__) # author: Gael Varoquaux <gael.varoquaux@inria.fr> # License: BSD 3 clause # Copyright: INRIA import numpy as np from scipy import linalg from sklearn.datasets import make_sparse_spd_matrix from sklearn.covariance import GraphLassoCV, ledoit_wolf import matplotlib.pyplot as plt ############################################################################## # Generate the data n_samples = 60 n_features = 20 prng = np.random.RandomState(1) prec = make_sparse_spd_matrix(n_features, alpha=.98, smallest_coef=.4, largest_coef=.7, random_state=prng) cov = linalg.inv(prec) d = np.sqrt(np.diag(cov)) cov /= d cov /= d[:, np.newaxis] prec *= d prec *= d[:, np.newaxis] X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples) X -= X.mean(axis=0) X /= X.std(axis=0) ############################################################################## # Estimate the covariance emp_cov = np.dot(X.T, X) / n_samples model = GraphLassoCV() model.fit(X) cov_ = model.covariance_ prec_ = model.precision_ lw_cov_, _ = ledoit_wolf(X) lw_prec_ = linalg.inv(lw_cov_) ############################################################################## # Plot the results plt.figure(figsize=(10, 6)) plt.subplots_adjust(left=0.02, right=0.98) # plot the covariances covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_), ('GraphLasso', cov_), ('True', cov)] vmax = cov_.max() for i, (name, this_cov) in enumerate(covs): plt.subplot(2, 4, i + 1) plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s covariance' % name) # plot the precisions precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_), ('GraphLasso', prec_), ('True', prec)] vmax = .9 * prec_.max() for i, (name, this_prec) in enumerate(precs): ax = plt.subplot(2, 4, i + 5) plt.imshow(np.ma.masked_equal(this_prec, 0), interpolation='nearest', vmin=-vmax, vmax=vmax, cmap=plt.cm.RdBu_r) plt.xticks(()) plt.yticks(()) plt.title('%s precision' % name) ax.set_axis_bgcolor('.7') # plot the model selection metric plt.figure(figsize=(4, 3)) plt.axes([.2, .15, .75, .7]) plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-') plt.axvline(model.alpha_, color='.5') plt.title('Model selection') plt.ylabel('Cross-validation score') plt.xlabel('alpha') plt.show()
bsd-3-clause
NelisVerhoef/scikit-learn
examples/manifold/plot_mds.py
261
2616
""" ========================= Multi-dimensional scaling ========================= An illustration of the metric and non-metric MDS on generated noisy data. The reconstructed points using the metric MDS and non metric MDS are slightly shifted to avoid overlapping. """ # Author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # Licence: BSD print(__doc__) import numpy as np from matplotlib import pyplot as plt from matplotlib.collections import LineCollection from sklearn import manifold from sklearn.metrics import euclidean_distances from sklearn.decomposition import PCA n_samples = 20 seed = np.random.RandomState(seed=3) X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float) X_true = X_true.reshape((n_samples, 2)) # Center the data X_true -= X_true.mean() similarities = euclidean_distances(X_true) # Add noise to the similarities noise = np.random.rand(n_samples, n_samples) noise = noise + noise.T noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0 similarities += noise mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed, dissimilarity="precomputed", n_jobs=1) pos = mds.fit(similarities).embedding_ nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12, dissimilarity="precomputed", random_state=seed, n_jobs=1, n_init=1) npos = nmds.fit_transform(similarities, init=pos) # Rescale the data pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum()) npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum()) # Rotate the data clf = PCA(n_components=2) X_true = clf.fit_transform(X_true) pos = clf.fit_transform(pos) npos = clf.fit_transform(npos) fig = plt.figure(1) ax = plt.axes([0., 0., 1., 1.]) plt.scatter(X_true[:, 0], X_true[:, 1], c='r', s=20) plt.scatter(pos[:, 0], pos[:, 1], s=20, c='g') plt.scatter(npos[:, 0], npos[:, 1], s=20, c='b') plt.legend(('True position', 'MDS', 'NMDS'), loc='best') similarities = similarities.max() / similarities * 100 similarities[np.isinf(similarities)] = 0 # Plot the edges start_idx, end_idx = np.where(pos) #a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[X_true[i, :], X_true[j, :]] for i in range(len(pos)) for j in range(len(pos))] values = np.abs(similarities) lc = LineCollection(segments, zorder=0, cmap=plt.cm.hot_r, norm=plt.Normalize(0, values.max())) lc.set_array(similarities.flatten()) lc.set_linewidths(0.5 * np.ones(len(segments))) ax.add_collection(lc) plt.show()
bsd-3-clause
pauliacomi/pyGAPS
tests/characterisation/test_t_plot.py
1
2958
""" This test module has tests relating to t-plots All functions in /calculations/tplot.py are tested here. The purposes are: - testing the user-facing API function (tplot) - testing individual low level functions against known results. Functions are tested against pre-calculated values on real isotherms. All pre-calculated data for characterisation can be found in the /.conftest file together with the other isotherm parameters. """ import pytest from matplotlib.testing.decorators import cleanup from numpy import isclose import pygaps import pygaps.utilities.exceptions as pgEx from .conftest import DATA from .conftest import DATA_N77_PATH @pytest.mark.characterisation class TestTPlot(): """Tests t-plot calculations.""" def test_alphas_checks(self, basic_pointisotherm): """Checks for built-in safeguards.""" # Will raise a "no suitable model exception" with pytest.raises(pgEx.ParameterError): pygaps.t_plot(basic_pointisotherm, thickness_model='random') @pytest.mark.parametrize('sample', [sample for sample in DATA]) def test_tplot(self, sample): """Test calculation with several model isotherms.""" sample = DATA[sample] # exclude datasets where it is not applicable if sample.get('t_area', None): filepath = DATA_N77_PATH / sample['file'] isotherm = pygaps.isotherm_from_json(filepath) res = pygaps.t_plot(isotherm) results = res.get('results') err_relative = 0.1 # 10 percent err_absolute_area = 0.1 # units err_absolute_volume = 0.01 # units assert isclose( results[-1].get('adsorbed_volume'), sample['t_pore_volume'], err_relative, err_absolute_area ) assert isclose( results[0].get('area'), sample['t_area'], err_relative, err_absolute_volume ) def test_tplot_choice(self): """Test choice of points.""" sample = DATA['MCM-41'] filepath = DATA_N77_PATH / sample['file'] isotherm = pygaps.isotherm_from_json(filepath) res = pygaps.t_plot(isotherm, limits=[0.7, 1.0]) results = res.get('results') err_relative = 0.1 # 10 percent err_absolute_area = 0.1 # units err_absolute_volume = 0.01 # units assert isclose( results[-1].get('adsorbed_volume'), sample['t_pore_volume'], err_relative, err_absolute_area ) assert isclose( results[-1].get('area'), sample['s_t_area'], err_relative, err_absolute_volume ) @cleanup def test_tplot_output(self): """Test verbosity.""" sample = DATA['MCM-41'] filepath = DATA_N77_PATH / sample['file'] isotherm = pygaps.isotherm_from_json(filepath) pygaps.t_plot(isotherm, 'Halsey', verbose=True)
mit
christianurich/VIBe2UrbanSim
3rdparty/opus/src/vibe_min/indicators/make_indicators.py
4
4825
# Opus/UrbanSim urban simulation software. # Copyright (C) 2005-2009 University of Washington # See opus_core/LICENSE # script to produce a number of PSRC indicators -- # this illustrates using traits-based configurations programatically from opus_core.configurations.dataset_pool_configuration import DatasetPoolConfiguration from opus_core.indicator_framework.core.source_data import SourceData from opus_core.indicator_framework.image_types.matplotlib_map import Map from opus_core.indicator_framework.image_types.matplotlib_chart import Chart from opus_core.indicator_framework.image_types.table import Table from opus_core.indicator_framework.image_types.geotiff_map import GeotiffMap from opus_core.indicator_framework.image_types.dataset_table import DatasetTable from opus_core.indicator_framework.image_types.matplotlib_lorenzcurve import LorenzCurve #some cache_directories and run descriptions #cache_directory = r'Y:/urbansim_cache/run_1090.2006_11_14_12_12' #run_description = '(run 1090 - double highway capacity 11/28/2006)' #cache_directory = r'Y:/urbansim_cache/run_1091.2006_11_14_12_12' #run_description = '(run 1091 - baseline 11/28/2006)' #cache_directory = r'D:\urbansim_cache\run_1454.2006_12_12_16_28' #run_description = '(run 1454 - travel data from quick travel model)' cache_directory = r'D:\urbansim_cache\run_1090.2006_11_14_12_12' run_description = '(run 1453 - travel data from full travel model)' #cache_directory = r'Y:\urbansim_cache\run_1431.2006_12_08_09_45' #run_description = '(run 1431 - baseyear travel data from travel model run)' #cache_directory = r'D:\urbansim_cache\run_1154.2006_11_17_20_06' #run_description = '(run 1154 - no ugb + double highway capacity 11/28/2006)' #cache_directory = r'D:\urbansim_cache\run_1155.2006_11_17_20_07' #run_description = '(run 1155 - no ugb 11/28/2006)' source_data = SourceData( cache_directory = cache_directory, run_description = run_description, years = [1980, 1981, 1982], dataset_pool_configuration = DatasetPoolConfiguration( package_order=['eugene','urbansim','opus_core'], ), ) single_year_requests = [ Table( attribute = 'urbansim.zone.population', dataset_name = 'zone', source_data = source_data, ), Table( attribute = 'urbansim.zone.number_of_jobs', dataset_name = 'zone', source_data = source_data, ), Map( attribute = 'urbansim.zone.population', scale = [1, 60000], dataset_name = 'zone', source_data = source_data, ), Map( attribute = 'urbansim.zone.number_of_jobs', scale = [1, 60000], dataset_name = 'zone', source_data = source_data, ), Map( scale = [-8000, 40000], attribute = 'urbansim_population_change', source_data = source_data, expression = {'operation': 'change', 'operands': ['urbansim.zone.population']}, dataset_name = 'zone', ), Map( scale = [-2000, 40000], attribute = 'urbansim_employment_change', source_data = source_data, expression = {'operation': 'change', 'operands': ['urbansim.zone.number_of_jobs']}, dataset_name = 'zone', ), ] source_data = SourceData( cache_directory = cache_directory, run_description = run_description, years = [1980, 1981, 1982], dataset_pool_configuration = DatasetPoolConfiguration( package_order=['eugene','urbansim','opus_core'], ), ) multi_year_requests = [ Table( attribute = 'alldata.aggregate_all(urbansim.gridcell.residential_units, function=sum)', dataset_name = 'alldata', source_data = source_data, name = 'residential_units' ), Chart( attribute = 'alldata.aggregate_all(urbansim.gridcell.residential_units, function=sum)', dataset_name = 'alldata', source_data = source_data, name = 'residential_units' ), Table( attribute = 'alldata.aggregate_all(urbansim.gridcell.number_of_jobs, function=sum)', dataset_name = 'alldata', source_data = source_data, name = 'number_of_jobs' ), ] if __name__ == '__main__': from opus_core.indicator_framework.core.indicator_factory import IndicatorFactory IndicatorFactory().create_indicators( indicators = single_year_requests, display_error_box = False, show_results = True) IndicatorFactory().create_indicators( indicators = multi_year_requests, display_error_box = False, show_results = True)
gpl-2.0
boland1992/seissuite_iran
build/lib.linux-x86_64-2.7/seissuite/ant/psdepthmodel.py
6
9233
""" Module taking care of the forward modelling: theoretical dispersion curve given a 1D crustal model of velocities and densities. Uses the binaries of the Computer Programs in Seismology, with must be installed in *COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR* """ import numpy as np import matplotlib.pyplot as plt import os import shutil import itertools as it from easyprocess import EasyProcess import tempfile import pickle # getting the dir of the binaries of the Computer Programs in Seismology # import CONFIG class initalised in ./configs/tmp_config.pickle config_pickle = 'configs/tmp_config.pickle' f = open(name=config_pickle, mode='rb') CONFIG = pickle.load(f) f.close() # import variables from initialised CONFIG class. COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR=CONFIG.COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR # default header of the model file: # isotropic, 1D, flat Earth with layers of constant velocity MODEL_HEADER = """MODEL.01 TEST ISOTROPIC KGS FLAT EARTH 1-D CONSTANT VELOCITY LINE08 LINE09 LINE10 LINE11 H VP VS RHO QP QS ETAP ETAS FREFP FREFS""" class VsModel: """ Class holding a layered model of Vs function of depth, with Vp/Vs and rho/Vs ratio fixed. """ def __init__(self, vs, dz, ratio_vp_vs, ratio_rho_vs, name='', store_vg_at_periods=None): """ Initializes model with layers' Vs (vs), layers' thickness (dz), and layers' ratio Vp/Vs and rho/Vs (ratio_vp_vs, ratio_rho_vs). """ # checking shapes nlayers = np.size(vs) if np.size(dz) != nlayers - 1: raise Exception("Size of dz should be nb of layers minus 1") if not np.size(ratio_vp_vs) in [1, nlayers]: raise Exception("Size of ratio_vp_vs should be nb of layers or 1") if not np.size(ratio_rho_vs) in [1, nlayers]: raise Exception("Size of ratio_rho_vs should be nb of layers or 1") self.name = name self.vs = np.array(vs) self.dz = np.array(dz) self.ratio_vp_vs = np.array(ratio_vp_vs) self.ratio_rho_vs = np.array(ratio_rho_vs) # storing vg model at selected periods if required self.stored_vgperiods = store_vg_at_periods if not store_vg_at_periods is None: self.stored_vg = self.vg_model(store_vg_at_periods) else: self.stored_vg = None def misfit_to_vg(self, periods, vg, sigmavg, squared=True, use_storedvg=True, storevg=False): """ Misfit of modelled vg to observed vg [vg_model - vg]**2 = Sum ------------------ over periods 2 x sigmavg**2 """ # using stored vg model if required and available, else re-calculating it if use_storedvg and np.all(periods == self.stored_vgperiods): vg_model = self.stored_vg else: vg_model = self.vg_model(periods, store=storevg) misfit = np.sum(((vg_model - vg) / sigmavg)**2) / 2.0 if squared: misfit = np.sqrt(misfit) return misfit def vg_model(self, periods, store=False): """ Modelled group velocities, vg, function of period """ vs = self.vs vp = self.ratio_vp_vs * self.vs rho = self.ratio_rho_vs * self.vs dz = np.r_[self.dz, 0] # we append a fake thickness vg = Rayleigh_group_velocities(periods, dz=dz, vp=vp, vs=vs, rho=rho) if store: # storing group velocities if required self.stored_vgperiods = periods self.stored_vg = vg return vg def get_vs_at(self, z): """ Returns Vs ad depth(s) *z* """ indices = np.searchsorted(np.r_[0, self.dz.cumsum()], z, side='right') - 1 if np.any(indices) < 0: raise Exception("Depth out of range") return self.vs[indices] def plot(self, periods, obsvgarrays=None, fig=None, color='r'): """ Plots modelled and observed group velocity function of period (top) and the model itself, i.e. Vs vs depth (bottom) """ if not fig: fig = plt.figure(figsize=(6.5, 10), tight_layout=True) axlist = [fig.add_subplot(211), fig.add_subplot(212)] legend = True else: axlist = fig.get_axes() legend = False # no need to add legend to existing fig # 1st subplot: group velocity vs period ax = axlist[0] self.plot_vg(periods, obsvgarrays=obsvgarrays, ax=ax, legend=legend, color=color) ax.set_title(self.name) # 2nd subplot: Vs vs depth ax = axlist[1] self.plot_model(ax=ax, color=color) fig.canvas.draw() fig.show() return fig def plot_vg(self, periods, obsvgarrays=None, ax=None, legend=True, color='r'): """ Plots modelled and observed group velocity function of period """ # creating figure if not given as input fig = None if not ax: fig = plt.figure() ax = fig.add_subplot(111) vg_model = self.vg_model(periods) ax.plot(periods, vg_model, lw=1.5, color=color, label=self.name) if obsvgarrays: for i, vgarray in enumerate(obsvgarrays): label = 'Observed dispersion curves' if not i else None ax.plot(periods, vgarray, lw=0.5, color='k', label=label) ax.set_xlabel('Period (sec)') ax.set_ylabel('Group velocity (km/s)') if legend: ax.legend(loc='best', fontsize=11, framealpha=0.8) ax.grid(True) if fig: fig.show() def plot_model(self, ax=None, color='r', format_axes=True): """ Plots the model, i.e. Vs vs depth """ # creating figure if not given as input fig = None if not ax: fig = plt.figure() ax = fig.add_subplot(111) x = list(it.chain.from_iterable([[v, v] for v in self.vs])) y = [0.0] + list(it.chain.from_iterable([[z, z] for z in np.cumsum(self.dz)])) + \ [self.dz.sum() + 15] ax.plot(x, y, lw=1.5, color=color) if format_axes: ax.set_ylim(sorted(ax.get_ylim(), reverse=True)) ax.set_xlabel('Vs (km/s)') ax.set_ylabel('Depth (km)') ax.grid(True) if fig: fig.show() def Rayleigh_group_velocities(periods, dz, vp, vs, rho, verbose=False): """ Returns the array of Rayleigh wave group velocities at selected periods, from the 1-D layered Earth model contained in *dz* (thicknesses), *vp* (P wave velocities), *vs* (S wave velocities) and *rho* (densities). The Computer Programs in Seismology, located in dir *COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR*, are used for the computation. """ if not COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR: raise Exception("Please provide the dir of the Computer Programs in Seismology") # making and moving to temporary dir current_dir = os.getcwd() tmp_dir = tempfile.mkdtemp() os.chdir(tmp_dir) # preparing input files if verbose: print 'Preparing model and periods files' create_model_file('model', dz, vp, vs, rho) f = open('periods', 'w') f.write('\n'.join([str(p) for p in periods])) f.close() # preparing model if verbose: print "Calling sprep96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sprep96') # Rayleigh wave, fundamental mode p = EasyProcess('"{}" -M model -PARR periods -NMOD 1 -R'.format(cmd)).call() if verbose: print p.stdout # phase dispersion curve if verbose: print "Calling sdisp96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sdisp96') p = EasyProcess('"{}" -v'.format(cmd)).call() if verbose: print p.stdout # group dispersion curve if verbose: print "Calling sregn96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sregn96') p = EasyProcess('"{}"'.format(cmd)).call() if verbose: print p.stdout # exporting group velocities (-U) of Rayleigh waves (-R) in ascii file if verbose: print "Calling sdpegn96" cmd = os.path.join(COMPUTER_PROGRAMS_IN_SEISMOLOGY_DIR, 'sdpegn96') p = EasyProcess('"{}" -R -S -U -XLOG -PER -ASC'.format(cmd)).call() if verbose: print p.stdout # loading group velocities from 6th column of ascii file vg = np.loadtxt('SREGN.ASC', skiprows=1, usecols=(5,)) # removing temp dir os.chdir(current_dir) shutil.rmtree(tmp_dir) return vg def create_model_file(path, dz, vp, vs, rho): """ Writing the 1D model to ascci file, to be used as input by the Computer Programs in Seismology """ qp = np.zeros_like(dz) qs = np.zeros_like(dz) etap = np.zeros_like(dz) etas = np.zeros_like(dz) frefp = np.ones_like(dz) frefs = np.ones_like(dz) f = open(path, mode='w') f.write(MODEL_HEADER) a = np.vstack((dz, vp, vs, rho, qp, qs, etap, etas, frefp, frefs)) for col in a.T: f.write('\n') col.tofile(f, sep=' ') f.close()
gpl-3.0
aringh/odl
examples/tomo/backends/astra_performance_cuda_parallel_2d_cg.py
1
2880
"""Performance example of running native ASTRA vs using ODL for reconstruction. In this example, a 512x512 image is reconstructed using the Conjugate Gradient Least Squares method on the GPU. In general, ASTRA is faster than ODL since it does not need to perform any copies and all arithmetic is performed on the GPU. Despite this, ODL is not much slower. In this example, the overhead is about 60 %, depending on the hardware used. """ import astra import numpy as np import matplotlib.pyplot as plt import scipy import odl # Common geometry parameters domain_size = np.array([512, 512]) n_angles = 180 det_size = 362 niter = 50 phantom = np.rot90(scipy.misc.ascent().astype('float'), -1) # --- ASTRA --- # Define ASTRA geometry vol_geom = astra.create_vol_geom(domain_size[0], domain_size[1]) proj_geom = astra.create_proj_geom('parallel', np.linalg.norm(domain_size) / det_size, det_size, np.linspace(0, np.pi, n_angles)) # Create ASTRA projector proj_id = astra.create_projector('cuda', proj_geom, vol_geom) # Create sinogram sinogram_id, sinogram = astra.create_sino(phantom, proj_id) # Create a data object for the reconstruction rec_id = astra.data2d.create('-vol', vol_geom) # Set up the parameters for a reconstruction algorithm using the CUDA backend cfg = astra.astra_dict('CGLS_CUDA') cfg['ReconstructionDataId'] = rec_id cfg['ProjectionDataId'] = sinogram_id cfg['ProjectorId'] = proj_id # Create the algorithm object from the configuration structure alg_id = astra.algorithm.create(cfg) with odl.util.Timer('ASTRA run'): # Run the algorithm astra.algorithm.run(alg_id, niter) # Get the result rec = astra.data2d.get(rec_id) # Clean up. astra.algorithm.delete(alg_id) astra.data2d.delete(rec_id) astra.data2d.delete(sinogram_id) astra.projector.delete(proj_id) # --- ODL --- # Create reconstruction space reco_space = odl.uniform_discr(-domain_size / 2, domain_size / 2, domain_size) # Create geometry geometry = odl.tomo.parallel_beam_geometry(reco_space, n_angles, det_size) # Create ray transform ray_trafo = odl.tomo.RayTransform(reco_space, geometry, impl='astra_cuda') # Create sinogram data = ray_trafo(phantom) # Solve with CGLS (aka CGN) x = reco_space.zero() with odl.util.Timer('ODL run'): odl.solvers.conjugate_gradient_normal(ray_trafo, x, data, niter=niter) # Display results for comparison plt.figure('Phantom') plt.imshow(phantom.T, origin='lower', cmap='bone') plt.figure('ASTRA sinogram') plt.imshow(sinogram.T, origin='lower', cmap='bone') plt.figure('ASTRA reconstruction') plt.imshow(rec.T, origin='lower', cmap='bone') plt.figure('ODL sinogram') plt.imshow(data.asarray().T, origin='lower', cmap='bone') plt.figure('ODL reconstruction') plt.imshow(x.asarray().T, origin='lower', cmap='bone') plt.show()
mpl-2.0
mne-tools/mne-python
examples/connectivity/mne_inverse_connectivity_spectrum.py
6
3460
""" ============================================================== Compute full spectrum source space connectivity between labels ============================================================== The connectivity is computed between 4 labels across the spectrum between 7.5 Hz and 40 Hz. """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator from mne.connectivity import spectral_connectivity print(__doc__) data_path = sample.data_path() subjects_dir = data_path + '/subjects' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_raw = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif' fname_event = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif' # Load data inverse_operator = read_inverse_operator(fname_inv) raw = mne.io.read_raw_fif(fname_raw) events = mne.read_events(fname_event) # Add a bad channel raw.info['bads'] += ['MEG 2443'] # Pick MEG channels picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=False, eog=True, exclude='bads') # Define epochs for left-auditory condition event_id, tmin, tmax = 1, -0.2, 0.5 epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), reject=dict(mag=4e-12, grad=4000e-13, eog=150e-6)) # Compute inverse solution and for each epoch. By using "return_generator=True" # stcs will be a generator object instead of a list. snr = 1.0 # use lower SNR for single epochs lambda2 = 1.0 / snr ** 2 method = "dSPM" # use dSPM method (could also be MNE or sLORETA) stcs = apply_inverse_epochs(epochs, inverse_operator, lambda2, method, pick_ori="normal", return_generator=True) # Read some labels names = ['Aud-lh', 'Aud-rh', 'Vis-lh', 'Vis-rh'] labels = [mne.read_label(data_path + '/MEG/sample/labels/%s.label' % name) for name in names] # Average the source estimates within each label using sign-flips to reduce # signal cancellations, also here we return a generator src = inverse_operator['src'] label_ts = mne.extract_label_time_course(stcs, labels, src, mode='mean_flip', return_generator=True) fmin, fmax = 7.5, 40. sfreq = raw.info['sfreq'] # the sampling frequency con, freqs, times, n_epochs, n_tapers = spectral_connectivity( label_ts, method='wpli2_debiased', mode='multitaper', sfreq=sfreq, fmin=fmin, fmax=fmax, mt_adaptive=True, n_jobs=1) n_rows, n_cols = con.shape[:2] fig, axes = plt.subplots(n_rows, n_cols, sharex=True, sharey=True) for i in range(n_rows): for j in range(i + 1): if i == j: axes[i, j].set_axis_off() continue axes[i, j].plot(freqs, con[i, j, :]) axes[j, i].plot(freqs, con[i, j, :]) if j == 0: axes[i, j].set_ylabel(names[i]) axes[0, i].set_title(names[i]) if i == (n_rows - 1): axes[i, j].set_xlabel(names[j]) axes[i, j].set(xlim=[fmin, fmax], ylim=[-0.2, 1]) axes[j, i].set(xlim=[fmin, fmax], ylim=[-0.2, 1]) # Show band limits for f in [8, 12, 18, 35]: axes[i, j].axvline(f, color='k') axes[j, i].axvline(f, color='k') plt.tight_layout() plt.show()
bsd-3-clause
buqing2009/MissionPlanner
Lib/site-packages/scipy/optimize/nonlin.py
53
46004
r""" Nonlinear solvers ================= .. currentmodule:: scipy.optimize This is a collection of general-purpose nonlinear multidimensional solvers. These solvers find *x* for which *F(x) = 0*. Both *x* and *F* can be multidimensional. Routines -------- Large-scale nonlinear solvers: .. autosummary:: newton_krylov anderson General nonlinear solvers: .. autosummary:: broyden1 broyden2 Simple iterations: .. autosummary:: excitingmixing linearmixing diagbroyden Examples ======== Small problem ------------- >>> def F(x): ... return np.cos(x) + x[::-1] - [1, 2, 3, 4] >>> import scipy.optimize >>> x = scipy.optimize.broyden1(F, [1,1,1,1], f_tol=1e-14) >>> x array([ 4.04674914, 3.91158389, 2.71791677, 1.61756251]) >>> np.cos(x) + x[::-1] array([ 1., 2., 3., 4.]) Large problem ------------- Suppose that we needed to solve the following integrodifferential equation on the square :math:`[0,1]\times[0,1]`: .. math:: \nabla^2 P = 10 \left(\int_0^1\int_0^1\cosh(P)\,dx\,dy\right)^2 with :math:`P(x,1) = 1` and :math:`P=0` elsewhere on the boundary of the square. The solution can be found using the `newton_krylov` solver: .. plot:: import numpy as np from scipy.optimize import newton_krylov from numpy import cosh, zeros_like, mgrid, zeros # parameters nx, ny = 75, 75 hx, hy = 1./(nx-1), 1./(ny-1) P_left, P_right = 0, 0 P_top, P_bottom = 1, 0 def residual(P): d2x = zeros_like(P) d2y = zeros_like(P) d2x[1:-1] = (P[2:] - 2*P[1:-1] + P[:-2]) / hx/hx d2x[0] = (P[1] - 2*P[0] + P_left)/hx/hx d2x[-1] = (P_right - 2*P[-1] + P[-2])/hx/hx d2y[:,1:-1] = (P[:,2:] - 2*P[:,1:-1] + P[:,:-2])/hy/hy d2y[:,0] = (P[:,1] - 2*P[:,0] + P_bottom)/hy/hy d2y[:,-1] = (P_top - 2*P[:,-1] + P[:,-2])/hy/hy return d2x + d2y - 10*cosh(P).mean()**2 # solve guess = zeros((nx, ny), float) sol = newton_krylov(residual, guess, method='lgmres', verbose=1) print 'Residual', abs(residual(sol)).max() # visualize import matplotlib.pyplot as plt x, y = mgrid[0:1:(nx*1j), 0:1:(ny*1j)] plt.pcolor(x, y, sol) plt.colorbar() plt.show() """ # Copyright (C) 2009, Pauli Virtanen <pav@iki.fi> # Distributed under the same license as Scipy. import sys import numpy as np from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError from numpy import asarray, dot, vdot if sys.platform != 'cli': import scipy.sparse.linalg import scipy.sparse import scipy.lib.blas as blas import inspect else: print "Warning: scipy.optimize.nonlin package is not supported under IronPython yet." from linesearch import scalar_search_wolfe1, scalar_search_armijo __all__ = [ 'broyden1', 'broyden2', 'anderson', 'linearmixing', 'diagbroyden', 'excitingmixing', 'newton_krylov', # Deprecated functions: 'broyden_generalized', 'anderson2', 'broyden3'] #------------------------------------------------------------------------------ # Utility functions #------------------------------------------------------------------------------ class NoConvergence(Exception): pass def maxnorm(x): return np.absolute(x).max() def _as_inexact(x): """Return `x` as an array, of either floats or complex floats""" x = asarray(x) if not np.issubdtype(x.dtype, np.inexact): return asarray(x, dtype=np.float_) return x def _array_like(x, x0): """Return ndarray `x` as same array subclass and shape as `x0`""" x = np.reshape(x, np.shape(x0)) wrap = getattr(x0, '__array_wrap__', x.__array_wrap__) return wrap(x) def _safe_norm(v): if not np.isfinite(v).all(): return np.array(np.inf) return norm(v) #------------------------------------------------------------------------------ # Generic nonlinear solver machinery #------------------------------------------------------------------------------ _doc_parts = dict( params_basic=""" F : function(x) -> f Function whose root to find; should take and return an array-like object. x0 : array-like Initial guess for the solution """.strip(), params_extra=""" iter : int, optional Number of iterations to make. If omitted (default), make as many as required to meet tolerances. verbose : bool, optional Print status to stdout on every iteration. maxiter : int, optional Maximum number of iterations to make. If more are needed to meet convergence, `NoConvergence` is raised. f_tol : float, optional Absolute tolerance (in max-norm) for the residual. If omitted, default is 6e-6. f_rtol : float, optional Relative tolerance for the residual. If omitted, not used. x_tol : float, optional Absolute minimum step size, as determined from the Jacobian approximation. If the step size is smaller than this, optimization is terminated as successful. If omitted, not used. x_rtol : float, optional Relative minimum step size. If omitted, not used. tol_norm : function(vector) -> scalar, optional Norm to use in convergence check. Default is the maximum norm. line_search : {None, 'armijo' (default), 'wolfe'}, optional Which type of a line search to use to determine the step size in the direction given by the Jacobian approximation. Defaults to 'armijo'. callback : function, optional Optional callback function. It is called on every iteration as ``callback(x, f)`` where `x` is the current solution and `f` the corresponding residual. Returns ------- sol : array-like An array (of similar array type as `x0`) containing the final solution. Raises ------ NoConvergence When a solution was not found. """.strip() ) def _set_doc(obj): if obj.__doc__: obj.__doc__ = obj.__doc__ % _doc_parts def nonlin_solve(F, x0, jacobian='krylov', iter=None, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None): """ Find a root of a function, in a way suitable for large-scale problems. Parameters ---------- %(params_basic)s jacobian : Jacobian A Jacobian approximation: `Jacobian` object or something that `asjacobian` can transform to one. Alternatively, a string specifying which of the builtin Jacobian approximations to use: krylov, broyden1, broyden2, anderson diagbroyden, linearmixing, excitingmixing %(params_extra)s See Also -------- asjacobian, Jacobian Notes ----- This algorithm implements the inexact Newton method, with backtracking or full line searches. Several Jacobian approximations are available, including Krylov and Quasi-Newton methods. References ---------- .. [KIM] C. T. Kelley, \"Iterative Methods for Linear and Nonlinear Equations\". Society for Industrial and Applied Mathematics. (1995) http://www.siam.org/books/kelley/ """ condition = TerminationCondition(f_tol=f_tol, f_rtol=f_rtol, x_tol=x_tol, x_rtol=x_rtol, iter=iter, norm=tol_norm) x0 = _as_inexact(x0) func = lambda z: _as_inexact(F(_array_like(z, x0))).flatten() x = x0.flatten() dx = np.inf Fx = func(x) Fx_norm = norm(Fx) jacobian = asjacobian(jacobian) jacobian.setup(x.copy(), Fx, func) if maxiter is None: if iter is not None: maxiter = iter + 1 else: maxiter = 100*(x.size+1) if line_search is True: line_search = 'armijo' elif line_search is False: line_search = None if line_search not in (None, 'armijo', 'wolfe'): raise ValueError("Invalid line search") # Solver tolerance selection gamma = 0.9 eta_max = 0.9999 eta_treshold = 0.1 eta = 1e-3 for n in xrange(maxiter): if condition.check(Fx, x, dx): break # The tolerance, as computed for scipy.sparse.linalg.* routines tol = min(eta, eta*Fx_norm) dx = -jacobian.solve(Fx, tol=tol) if norm(dx) == 0: raise ValueError("Jacobian inversion yielded zero vector. " "This indicates a bug in the Jacobian " "approximation.") # Line search, or Newton step if line_search: s, x, Fx, Fx_norm_new = _nonlin_line_search(func, x, Fx, dx, line_search) else: s = 1.0 x += dx Fx = func(x) Fx_norm_new = norm(Fx) jacobian.update(x.copy(), Fx) if callback: callback(x, Fx) # Adjust forcing parameters for inexact methods eta_A = gamma * Fx_norm_new**2 / Fx_norm**2 if gamma * eta**2 < eta_treshold: eta = min(eta_max, eta_A) else: eta = min(eta_max, max(eta_A, gamma*eta**2)) Fx_norm = Fx_norm_new # Print status if verbose: sys.stdout.write("%d: |F(x)| = %g; step %g; tol %g\n" % ( n, norm(Fx), s, eta)) sys.stdout.flush() else: raise NoConvergence(_array_like(x, x0)) return _array_like(x, x0) _set_doc(nonlin_solve) def _nonlin_line_search(func, x, Fx, dx, search_type='armijo', rdiff=1e-8, smin=1e-2): tmp_s = [0] tmp_Fx = [Fx] tmp_phi = [norm(Fx)**2] s_norm = norm(x) / norm(dx) def phi(s, store=True): if s == tmp_s[0]: return tmp_phi[0] xt = x + s*dx v = func(xt) p = _safe_norm(v)**2 if store: tmp_s[0] = s tmp_phi[0] = p tmp_Fx[0] = v return p def derphi(s): ds = (abs(s) + s_norm + 1) * rdiff return (phi(s+ds, store=False) - phi(s)) / ds if search_type == 'wolfe': s, phi1, phi0 = scalar_search_wolfe1(phi, derphi, tmp_phi[0], xtol=1e-2, amin=smin) elif search_type == 'armijo': s, phi1 = scalar_search_armijo(phi, tmp_phi[0], -tmp_phi[0], amin=smin) if s is None: # XXX: No suitable step length found. Take the full Newton step, # and hope for the best. s = 1.0 x = x + s*dx if s == tmp_s[0]: Fx = tmp_Fx[0] else: Fx = func(x) Fx_norm = norm(Fx) return s, x, Fx, Fx_norm class TerminationCondition(object): """ Termination condition for an iteration. It is terminated if - |F| < f_rtol*|F_0|, AND - |F| < f_tol AND - |dx| < x_rtol*|x|, AND - |dx| < x_tol """ def __init__(self, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, iter=None, norm=maxnorm): if f_tol is None: f_tol = np.finfo(np.float_).eps ** (1./3) if f_rtol is None: f_rtol = np.inf if x_tol is None: x_tol = np.inf if x_rtol is None: x_rtol = np.inf self.x_tol = x_tol self.x_rtol = x_rtol self.f_tol = f_tol self.f_rtol = f_rtol self.norm = maxnorm self.iter = iter self.f0_norm = None self.iteration = 0 def check(self, f, x, dx): self.iteration += 1 f_norm = self.norm(f) x_norm = self.norm(x) dx_norm = self.norm(dx) if self.f0_norm is None: self.f0_norm = f_norm if f_norm == 0: return True if self.iter is not None: # backwards compatibility with Scipy 0.6.0 return self.iteration > self.iter # NB: condition must succeed for rtol=inf even if norm == 0 return ((f_norm <= self.f_tol and f_norm/self.f_rtol <= self.f0_norm) and (dx_norm <= self.x_tol and dx_norm/self.x_rtol <= x_norm)) #------------------------------------------------------------------------------ # Generic Jacobian approximation #------------------------------------------------------------------------------ class Jacobian(object): """ Common interface for Jacobians or Jacobian approximations. The optional methods come useful when implementing trust region etc. algorithms that often require evaluating transposes of the Jacobian. Methods ------- solve Returns J^-1 * v update Updates Jacobian to point `x` (where the function has residual `Fx`) matvec : optional Returns J * v rmatvec : optional Returns A^H * v rsolve : optional Returns A^-H * v matmat : optional Returns A * V, where V is a dense matrix with dimensions (N,K). todense : optional Form the dense Jacobian matrix. Necessary for dense trust region algorithms, and useful for testing. Attributes ---------- shape Matrix dimensions (M, N) dtype Data type of the matrix. func : callable, optional Function the Jacobian corresponds to """ def __init__(self, **kw): names = ["solve", "update", "matvec", "rmatvec", "rsolve", "matmat", "todense", "shape", "dtype"] for name, value in kw.items(): if name not in names: raise ValueError("Unknown keyword argument %s" % name) if value is not None: setattr(self, name, kw[name]) if hasattr(self, 'todense'): self.__array__ = lambda: self.todense() def aspreconditioner(self): return InverseJacobian(self) def solve(self, v, tol=0): raise NotImplementedError def update(self, x, F): pass def setup(self, x, F, func): self.func = func self.shape = (F.size, x.size) self.dtype = F.dtype if self.__class__.setup is Jacobian.setup: # Call on the first point unless overridden self.update(self, x, F) class InverseJacobian(object): def __init__(self, jacobian): self.jacobian = jacobian self.matvec = jacobian.solve self.update = jacobian.update if hasattr(jacobian, 'setup'): self.setup = jacobian.setup if hasattr(jacobian, 'rsolve'): self.rmatvec = jacobian.rsolve @property def shape(self): return self.jacobian.shape @property def dtype(self): return self.jacobian.dtype def asjacobian(J): """ Convert given object to one suitable for use as a Jacobian. """ spsolve = scipy.sparse.linalg.spsolve if isinstance(J, Jacobian): return J elif inspect.isclass(J) and issubclass(J, Jacobian): return J() elif isinstance(J, np.ndarray): if J.ndim > 2: raise ValueError('array must have rank <= 2') J = np.atleast_2d(np.asarray(J)) if J.shape[0] != J.shape[1]: raise ValueError('array must be square') return Jacobian(matvec=lambda v: dot(J, v), rmatvec=lambda v: dot(J.conj().T, v), solve=lambda v: solve(J, v), rsolve=lambda v: solve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif scipy.sparse.isspmatrix(J): if J.shape[0] != J.shape[1]: raise ValueError('matrix must be square') return Jacobian(matvec=lambda v: J*v, rmatvec=lambda v: J.conj().T * v, solve=lambda v: spsolve(J, v), rsolve=lambda v: spsolve(J.conj().T, v), dtype=J.dtype, shape=J.shape) elif hasattr(J, 'shape') and hasattr(J, 'dtype') and hasattr(J, 'solve'): return Jacobian(matvec=getattr(J, 'matvec'), rmatvec=getattr(J, 'rmatvec'), solve=J.solve, rsolve=getattr(J, 'rsolve'), update=getattr(J, 'update'), setup=getattr(J, 'setup'), dtype=J.dtype, shape=J.shape) elif callable(J): # Assume it's a function J(x) that returns the Jacobian class Jac(Jacobian): def update(self, x, F): self.x = x def solve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m, v) elif scipy.sparse.isspmatrix(m): return spsolve(m, v) else: raise ValueError("Unknown matrix type") def matvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m, v) elif scipy.sparse.isspmatrix(m): return m*v else: raise ValueError("Unknown matrix type") def rsolve(self, v, tol=0): m = J(self.x) if isinstance(m, np.ndarray): return solve(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return spsolve(m.conj().T, v) else: raise ValueError("Unknown matrix type") def rmatvec(self, v): m = J(self.x) if isinstance(m, np.ndarray): return dot(m.conj().T, v) elif scipy.sparse.isspmatrix(m): return m.conj().T * v else: raise ValueError("Unknown matrix type") return Jac() elif isinstance(J, str): return dict(broyden1=BroydenFirst, broyden2=BroydenSecond, anderson=Anderson, diagbroyden=DiagBroyden, linearmixing=LinearMixing, excitingmixing=ExcitingMixing, krylov=KrylovJacobian)[J]() else: raise TypeError('Cannot convert object to a Jacobian') #------------------------------------------------------------------------------ # Broyden #------------------------------------------------------------------------------ class GenericBroyden(Jacobian): def setup(self, x0, f0, func): Jacobian.setup(self, x0, f0, func) self.last_f = f0 self.last_x = x0 if hasattr(self, 'alpha') and self.alpha is None: # autoscale the initial Jacobian parameter self.alpha = 0.5*max(norm(x0), 1) / norm(f0) def _update(self, x, f, dx, df, dx_norm, df_norm): raise NotImplementedError def update(self, x, f): df = f - self.last_f dx = x - self.last_x self._update(x, f, dx, df, norm(dx), norm(df)) self.last_f = f self.last_x = x class LowRankMatrix(object): r""" A matrix represented as .. math:: \alpha I + \sum_{n=0}^{n=M} c_n d_n^\dagger However, if the rank of the matrix reaches the dimension of the vectors, full matrix representation will be used thereon. """ def __init__(self, alpha, n, dtype): self.alpha = alpha self.cs = [] self.ds = [] self.n = n self.dtype = dtype self.collapsed = None @staticmethod def _matvec(v, alpha, cs, ds): axpy, scal, dotc = blas.get_blas_funcs(['axpy', 'scal', 'dotc'], cs[:1] + [v]) w = alpha * v for c, d in zip(cs, ds): a = dotc(d, v) w = axpy(c, w, w.size, a) return w @staticmethod def _solve(v, alpha, cs, ds): """Evaluate w = M^-1 v""" if len(cs) == 0: return v/alpha # (B + C D^H)^-1 = B^-1 - B^-1 C (I + D^H B^-1 C)^-1 D^H B^-1 axpy, dotc = blas.get_blas_funcs(['axpy', 'dotc'], cs[:1] + [v]) c0 = cs[0] A = alpha * np.identity(len(cs), dtype=c0.dtype) for i, d in enumerate(ds): for j, c in enumerate(cs): A[i,j] += dotc(d, c) q = np.zeros(len(cs), dtype=c0.dtype) for j, d in enumerate(ds): q[j] = dotc(d, v) q /= alpha q = solve(A, q) w = v/alpha for c, qc in zip(cs, q): w = axpy(c, w, w.size, -qc) return w def matvec(self, v): """Evaluate w = M v""" if self.collapsed is not None: return np.dot(self.collapsed, v) return LowRankMatrix._matvec(v, self.alpha, self.cs, self.ds) def rmatvec(self, v): """Evaluate w = M^H v""" if self.collapsed is not None: return np.dot(self.collapsed.T.conj(), v) return LowRankMatrix._matvec(v, np.conj(self.alpha), self.ds, self.cs) def solve(self, v, tol=0): """Evaluate w = M^-1 v""" if self.collapsed is not None: return solve(self.collapsed, v) return LowRankMatrix._solve(v, self.alpha, self.cs, self.ds) def rsolve(self, v, tol=0): """Evaluate w = M^-H v""" if self.collapsed is not None: return solve(self.collapsed.T.conj(), v) return LowRankMatrix._solve(v, np.conj(self.alpha), self.ds, self.cs) def append(self, c, d): if self.collapsed is not None: self.collapsed += c[:,None] * d[None,:].conj() return self.cs.append(c) self.ds.append(d) if len(self.cs) > c.size: self.collapse() def __array__(self): if self.collapsed is not None: return self.collapsed Gm = self.alpha*np.identity(self.n, dtype=self.dtype) for c, d in zip(self.cs, self.ds): Gm += c[:,None]*d[None,:].conj() return Gm def collapse(self): """Collapse the low-rank matrix to a full-rank one.""" self.collapsed = np.array(self) self.cs = None self.ds = None self.alpha = None def restart_reduce(self, rank): """ Reduce the rank of the matrix by dropping all vectors. """ if self.collapsed is not None: return assert rank > 0 if len(self.cs) > rank: del self.cs[:] del self.ds[:] def simple_reduce(self, rank): """ Reduce the rank of the matrix by dropping oldest vectors. """ if self.collapsed is not None: return assert rank > 0 while len(self.cs) > rank: del self.cs[0] del self.ds[0] def svd_reduce(self, max_rank, to_retain=None): """ Reduce the rank of the matrix by retaining some SVD components. This corresponds to the \"Broyden Rank Reduction Inverse\" algorithm described in [vR]_. Note that the SVD decomposition can be done by solving only a problem whose size is the effective rank of this matrix, which is viable even for large problems. Parameters ---------- max_rank : int Maximum rank of this matrix after reduction. to_retain : int, optional Number of SVD components to retain when reduction is done (ie. rank > max_rank). Default is ``max_rank - 2``. References ---------- .. [vR] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ if self.collapsed is not None: return p = max_rank if to_retain is not None: q = to_retain else: q = p - 2 if self.cs: p = min(p, len(self.cs[0])) q = max(0, min(q, p-1)) m = len(self.cs) if m < p: # nothing to do return C = np.array(self.cs).T D = np.array(self.ds).T D, R = qr(D, mode='qr', econ=True) C = dot(C, R.T.conj()) U, S, WH = svd(C, full_matrices=False, compute_uv=True) C = dot(C, inv(WH)) D = dot(D, WH.T.conj()) for k in xrange(q): self.cs[k] = C[:,k].copy() self.ds[k] = D[:,k].copy() del self.cs[q:] del self.ds[q:] _doc_parts['broyden_params'] = """ alpha : float, optional Initial guess for the Jacobian is (-1/alpha). reduction_method : str or tuple, optional Method used in ensuring that the rank of the Broyden matrix stays low. Can either be a string giving the name of the method, or a tuple of the form ``(method, param1, param2, ...)`` that gives the name of the method and values for additional parameters. Methods available: - ``restart``: drop all matrix columns. Has no extra parameters. - ``simple``: drop oldest matrix column. Has no extra parameters. - ``svd``: keep only the most significant SVD components. Extra parameters: - ``to_retain`: number of SVD components to retain when rank reduction is done. Default is ``max_rank - 2``. max_rank : int, optional Maximum rank for the Broyden matrix. Default is infinity (ie., no rank reduction). """.strip() class BroydenFirst(GenericBroyden): r""" Find a root of a function, using Broyden's first Jacobian approximation. This method is also known as \"Broyden's good method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) dx^\dagger H / ( dx^\dagger H df) which corresponds to Broyden's first Jacobian update .. math:: J_+ = J + (df - J dx) dx^\dagger / dx^\dagger dx References ---------- .. [vR] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ def __init__(self, alpha=None, reduction_method='restart', max_rank=None): GenericBroyden.__init__(self) self.alpha = alpha self.Gm = None if max_rank is None: max_rank = np.inf self.max_rank = max_rank if isinstance(reduction_method, str): reduce_params = () else: reduce_params = reduction_method[1:] reduction_method = reduction_method[0] reduce_params = (max_rank - 1,) + reduce_params if reduction_method == 'svd': self._reduce = lambda: self.Gm.svd_reduce(*reduce_params) elif reduction_method == 'simple': self._reduce = lambda: self.Gm.simple_reduce(*reduce_params) elif reduction_method == 'restart': self._reduce = lambda: self.Gm.restart_reduce(*reduce_params) else: raise ValueError("Unknown rank reduction method '%s'" % reduction_method) def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.Gm = LowRankMatrix(-self.alpha, self.shape[0], self.dtype) def todense(self): return inv(self.Gm) def solve(self, f, tol=0): r = self.Gm.matvec(f) if not np.isfinite(r).all(): # singular; reset the Jacobian approximation self.setup(self.last_x, self.last_f, self.func) return self.Gm.matvec(f) def matvec(self, f): return self.Gm.solve(f) def rsolve(self, f, tol=0): return self.Gm.rmatvec(f) def rmatvec(self, f): return self.Gm.rsolve(f) def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = self.Gm.rmatvec(dx) c = dx - self.Gm.matvec(df) d = v / vdot(df, v) self.Gm.append(c, d) class BroydenSecond(BroydenFirst): """ Find a root of a function, using Broyden\'s second Jacobian approximation. This method is also known as \"Broyden's bad method\". Parameters ---------- %(params_basic)s %(broyden_params)s %(params_extra)s Notes ----- This algorithm implements the inverse Jacobian Quasi-Newton update .. math:: H_+ = H + (dx - H df) df^\dagger / ( df^\dagger df) corresponding to Broyden's second method. References ---------- .. [vR] B.A. van der Rotten, PhD thesis, \"A limited memory Broyden method to solve high-dimensional systems of nonlinear equations\". Mathematisch Instituut, Universiteit Leiden, The Netherlands (2003). http://www.math.leidenuniv.nl/scripties/Rotten.pdf """ def _update(self, x, f, dx, df, dx_norm, df_norm): self._reduce() # reduce first to preserve secant condition v = df c = dx - self.Gm.matvec(df) d = v / df_norm**2 self.Gm.append(c, d) #------------------------------------------------------------------------------ # Broyden-like (restricted memory) #------------------------------------------------------------------------------ class Anderson(GenericBroyden): """ Find a root of a function, using (extended) Anderson mixing. The Jacobian is formed by for a 'best' solution in the space spanned by last `M` vectors. As a result, only a MxM matrix inversions and MxN multiplications are required. [Ey]_ Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). M : float, optional Number of previous vectors to retain. Defaults to 5. w0 : float, optional Regularization parameter for numerical stability. Compared to unity, good values of the order of 0.01. %(params_extra)s References ---------- .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996). """ # Note: # # Anderson method maintains a rank M approximation of the inverse Jacobian, # # J^-1 v ~ -v*alpha + (dX + alpha dF) A^-1 dF^H v # A = W + dF^H dF # W = w0^2 diag(dF^H dF) # # so that for w0 = 0 the secant condition applies for last M iterates, ie., # # J^-1 df_j = dx_j # # for all j = 0 ... M-1. # # Moreover, (from Sherman-Morrison-Woodbury formula) # # J v ~ [ b I - b^2 C (I + b dF^H A^-1 C)^-1 dF^H ] v # C = (dX + alpha dF) A^-1 # b = -1/alpha # # and after simplification # # J v ~ -v/alpha + (dX/alpha + dF) (dF^H dX - alpha W)^-1 dF^H v # def __init__(self, alpha=None, w0=0.01, M=5): GenericBroyden.__init__(self) self.alpha = alpha self.M = M self.dx = [] self.df = [] self.gamma = None self.w0 = w0 def solve(self, f, tol=0): dx = -self.alpha*f n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in xrange(n): df_f[k] = vdot(self.df[k], f) try: gamma = solve(self.a, df_f) except LinAlgError: # singular; reset the Jacobian approximation del self.dx[:] del self.df[:] return dx for m in xrange(n): dx += gamma[m]*(self.dx[m] + self.alpha*self.df[m]) return dx def matvec(self, f): dx = -f/self.alpha n = len(self.dx) if n == 0: return dx df_f = np.empty(n, dtype=f.dtype) for k in xrange(n): df_f[k] = vdot(self.df[k], f) b = np.empty((n, n), dtype=f.dtype) for i in xrange(n): for j in xrange(n): b[i,j] = vdot(self.df[i], self.dx[j]) if i == j and self.w0 != 0: b[i,j] -= vdot(self.df[i], self.df[i])*self.w0**2*self.alpha gamma = solve(b, df_f) for m in xrange(n): dx += gamma[m]*(self.df[m] + self.dx[m]/self.alpha) return dx def _update(self, x, f, dx, df, dx_norm, df_norm): if self.M == 0: return self.dx.append(dx) self.df.append(df) while len(self.dx) > self.M: self.dx.pop(0) self.df.pop(0) n = len(self.dx) a = np.zeros((n, n), dtype=f.dtype) for i in xrange(n): for j in xrange(i, n): if i == j: wd = self.w0**2 else: wd = 0 a[i,j] = (1+wd)*vdot(self.df[i], self.df[j]) a += np.triu(a, 1).T.conj() self.a = a #------------------------------------------------------------------------------ # Simple iterations #------------------------------------------------------------------------------ class DiagBroyden(GenericBroyden): """ Find a root of a function, using diagonal Broyden Jacobian approximation. The Jacobian approximation is derived from previous iterations, by retaining only the diagonal of Broyden matrices. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial guess for the Jacobian is (-1/alpha). %(params_extra)s """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.d = np.ones((self.shape[0],), dtype=self.dtype) / self.alpha def solve(self, f, tol=0): return -f / self.d def matvec(self, f): return -f * self.d def rsolve(self, f, tol=0): return -f / self.d.conj() def rmatvec(self, f): return -f * self.d.conj() def todense(self): return np.diag(-self.d) def _update(self, x, f, dx, df, dx_norm, df_norm): self.d -= (df + self.d*dx)*dx/dx_norm**2 class LinearMixing(GenericBroyden): """ Find a root of a function, using a scalar Jacobian approximation. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional The Jacobian approximation is (-1/alpha). %(params_extra)s """ def __init__(self, alpha=None): GenericBroyden.__init__(self) self.alpha = alpha def solve(self, f, tol=0): return -f*self.alpha def matvec(self, f): return -f/self.alpha def rsolve(self, f, tol=0): return -f*np.conj(self.alpha) def rmatvec(self, f): return -f/np.conj(self.alpha) def todense(self): return np.diag(-np.ones(self.shape[0])/self.alpha) def _update(self, x, f, dx, df, dx_norm, df_norm): pass class ExcitingMixing(GenericBroyden): """ Find a root of a function, using a tuned diagonal Jacobian approximation. The Jacobian matrix is diagonal and is tuned on each iteration. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional Initial Jacobian approximation is (-1/alpha). alphamax : float, optional The entries of the diagonal Jacobian are kept in the range ``[alpha, alphamax]``. %(params_extra)s """ def __init__(self, alpha=None, alphamax=1.0): GenericBroyden.__init__(self) self.alpha = alpha self.alphamax = alphamax self.beta = None def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.beta = self.alpha * np.ones((self.shape[0],), dtype=self.dtype) def solve(self, f, tol=0): return -f*self.beta def matvec(self, f): return -f/self.beta def rsolve(self, f, tol=0): return -f*self.beta.conj() def rmatvec(self, f): return -f/self.beta.conj() def todense(self): return np.diag(-1/self.beta) def _update(self, x, f, dx, df, dx_norm, df_norm): incr = f*self.last_f > 0 self.beta[incr] += self.alpha self.beta[~incr] = self.alpha np.clip(self.beta, 0, self.alphamax, out=self.beta) #------------------------------------------------------------------------------ # Iterative/Krylov approximated Jacobians #------------------------------------------------------------------------------ class KrylovJacobian(Jacobian): r""" Find a root of a function, using Krylov approximation for inverse Jacobian. This method is suitable for solving large-scale problems. Parameters ---------- %(params_basic)s rdiff : float, optional Relative step size to use in numerical differentiation. method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function Krylov method to use to approximate the Jacobian. Can be a string, or a function implementing the same interface as the iterative solvers in `scipy.sparse.linalg`. The default is `scipy.sparse.linalg.lgmres`. inner_M : LinearOperator or InverseJacobian Preconditioner for the inner Krylov iteration. Note that you can use also inverse Jacobians as (adaptive) preconditioners. For example, >>> jac = BroydenFirst() >>> kjac = KrylovJacobian(inner_M=jac.inverse). If the preconditioner has a method named 'update', it will be called as ``update(x, f)`` after each nonlinear step, with ``x`` giving the current point, and ``f`` the current function value. inner_tol, inner_maxiter, ... Parameters to pass on to the \"inner\" Krylov solver. See `scipy.sparse.linalg.gmres` for details. outer_k : int, optional Size of the subspace kept across LGMRES nonlinear iterations. See `scipy.sparse.linalg.lgmres` for details. %(params_extra)s See Also -------- scipy.sparse.linalg.gmres scipy.sparse.linalg.lgmres Notes ----- This function implements a Newton-Krylov solver. The basic idea is to compute the inverse of the Jacobian with an iterative Krylov method. These methods require only evaluating the Jacobian-vector products, which are conveniently approximated by numerical differentiation: .. math:: J v \approx (f(x + \omega*v/|v|) - f(x)) / \omega Due to the use of iterative matrix inverses, these methods can deal with large nonlinear problems. Scipy's `scipy.sparse.linalg` module offers a selection of Krylov solvers to choose from. The default here is `lgmres`, which is a variant of restarted GMRES iteration that reuses some of the information obtained in the previous Newton steps to invert Jacobians in subsequent steps. For a review on Newton-Krylov methods, see for example [KK]_, and for the LGMRES sparse inverse method, see [BJM]_. References ---------- .. [KK] D.A. Knoll and D.E. Keyes, J. Comp. Phys. 193, 357 (2003). .. [BJM] A.H. Baker and E.R. Jessup and T. Manteuffel, SIAM J. Matrix Anal. Appl. 26, 962 (2005). """ def __init__(self, rdiff=None, method='lgmres', inner_maxiter=20, inner_M=None, outer_k=10, **kw): self.preconditioner = inner_M self.rdiff = rdiff self.method = dict( bicgstab=scipy.sparse.linalg.bicgstab, gmres=scipy.sparse.linalg.gmres, lgmres=scipy.sparse.linalg.lgmres, cgs=scipy.sparse.linalg.cgs, minres=scipy.sparse.linalg.minres, ).get(method, method) self.method_kw = dict(maxiter=inner_maxiter, M=self.preconditioner) if self.method is scipy.sparse.linalg.gmres: # Replace GMRES's outer iteration with Newton steps self.method_kw['restrt'] = inner_maxiter self.method_kw['maxiter'] = 1 elif self.method is scipy.sparse.linalg.lgmres: self.method_kw['outer_k'] = outer_k # Replace LGMRES's outer iteration with Newton steps self.method_kw['maxiter'] = 1 # Carry LGMRES's `outer_v` vectors across nonlinear iterations self.method_kw.setdefault('outer_v', []) # But don't carry the corresponding Jacobian*v products, in case # the Jacobian changes a lot in the nonlinear step # # XXX: some trust-region inspired ideas might be more efficient... # See eg. Brown & Saad. But needs to be implemented separately # since it's not an inexact Newton method. self.method_kw.setdefault('store_outer_Av', False) for key, value in kw.items(): if not key.startswith('inner_'): raise ValueError("Unknown parameter %s" % key) self.method_kw[key[6:]] = value def _update_diff_step(self): mx = abs(self.x0).max() mf = abs(self.f0).max() self.omega = self.rdiff * max(1, mx) / max(1, mf) def matvec(self, v): nv = norm(v) if nv == 0: return 0*v sc = self.omega / nv r = (self.func(self.x0 + sc*v) - self.f0) / sc if not np.all(np.isfinite(r)) and np.all(np.isfinite(v)): raise ValueError('Function returned non-finite results') return r def solve(self, rhs, tol=0): sol, info = self.method(self.op, rhs, tol=tol, **self.method_kw) return sol def update(self, x, f): self.x0 = x self.f0 = f self._update_diff_step() # Update also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'update'): self.preconditioner.update(x, f) def setup(self, x, f, func): Jacobian.setup(self, x, f, func) self.x0 = x self.f0 = f self.op = scipy.sparse.linalg.aslinearoperator(self) if self.rdiff is None: self.rdiff = np.finfo(x.dtype).eps ** (1./2) self._update_diff_step() # Setup also the preconditioner, if possible if self.preconditioner is not None: if hasattr(self.preconditioner, 'setup'): self.preconditioner.setup(x, f, func) #------------------------------------------------------------------------------ # Wrapper functions #------------------------------------------------------------------------------ def _nonlin_wrapper(name, jac): """ Construct a solver wrapper with given name and jacobian approx. It inspects the keyword arguments of ``jac.__init__``, and allows to use the same arguments in the wrapper function, in addition to the keyword arguments of `nonlin_solve` """ import inspect args, varargs, varkw, defaults = inspect.getargspec(jac.__init__) kwargs = zip(args[-len(defaults):], defaults) kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs]) if kw_str: kw_str = ", " + kw_str kwkw_str = ", ".join(["%s=%s" % (k, k) for k, v in kwargs]) if kwkw_str: kwkw_str = kwkw_str + ", " # Construct the wrapper function so that it's keyword arguments # are visible in pydoc.help etc. wrapper = """ def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, f_tol=None, f_rtol=None, x_tol=None, x_rtol=None, tol_norm=None, line_search='armijo', callback=None, **kw): jac = %(jac)s(%(kwkw)s **kw) return nonlin_solve(F, xin, jac, iter, verbose, maxiter, f_tol, f_rtol, x_tol, x_rtol, tol_norm, line_search, callback) """ wrapper = wrapper % dict(name=name, kw=kw_str, jac=jac.__name__, kwkw=kwkw_str) ns = {} ns.update(globals()) exec wrapper in ns func = ns[name] func.__doc__ = jac.__doc__ _set_doc(func) return func broyden1 = _nonlin_wrapper('broyden1', BroydenFirst) broyden2 = _nonlin_wrapper('broyden2', BroydenSecond) anderson = _nonlin_wrapper('anderson', Anderson) linearmixing = _nonlin_wrapper('linearmixing', LinearMixing) diagbroyden = _nonlin_wrapper('diagbroyden', DiagBroyden) excitingmixing = _nonlin_wrapper('excitingmixing', ExcitingMixing) newton_krylov = _nonlin_wrapper('newton_krylov', KrylovJacobian) # Deprecated functions @np.deprecate def broyden_generalized(*a, **kw): """Use *anderson(..., w0=0)* instead""" kw.setdefault('w0', 0) return anderson(*a, **kw) @np.deprecate def broyden1_modified(*a, **kw): """Use `broyden1` instead""" return broyden1(*a, **kw) @np.deprecate def broyden_modified(*a, **kw): """Use `anderson` instead""" return anderson(*a, **kw) @np.deprecate def anderson2(*a, **kw): """Use `anderson` instead""" return anderson(*a, **kw) @np.deprecate def broyden3(*a, **kw): """Use `broyden2` instead""" return broyden2(*a, **kw) @np.deprecate def vackar(*a, **kw): """Use `diagbroyden` instead""" return diagbroyden(*a, **kw)
gpl-3.0
mariocannistra/radio-astronomy
findsessionrange.py
1
1973
#!/usr/bin/python # this source is part of my Hackster.io project: https://www.hackster.io/mariocannistra/radio-astronomy-with-rtl-sdr-raspberrypi-and-amazon-aws-iot-45b617 # this program will determine the overall range of signal strengths received during the whole session. # this program can be run standalone but is usually run at end of session by doscan.py # Its output will be stored in 2 files: # dbminmax.txt and session-overview.png . The first contains two rows of text with just the maximum # and minimum of the whole session. The second contains a chart of all the min and max values for each of # the scan files from glob import glob import numpy as np import radioConfig import subprocess import os import datetime import matplotlib # Force matplotlib to not use any Xwindows backend. matplotlib.use('Agg') import matplotlib.pyplot as plt globmax = -9000 globmin = 9000 sessmin = np.empty(shape=[0, 1]) sessmax = np.empty(shape=[0, 1]) scantimeline = np.empty(shape=[0, 1]) files_in_dir = sorted(glob("*.csv")) for fname in files_in_dir: dbs = np.genfromtxt(fname,dtype='float',delimiter = ',', skip_header=0, skip_footer=0, usecols=(6,),usemask=True) thismin=dbs.min() thismax=dbs.max() scantime=str(fname)[11:17] print scantime,thismin,thismax if thismin < globmin: globmin = thismin if thismax > globmax: globmax = thismax sessmin = np.append(sessmin, thismin) sessmax = np.append(sessmax, thismax) scantimeline = np.append(scantimeline, scantime) mytitle = 'Signal strength range: min %f .. max %f' % (globmin,globmax) print mytitle xs = range(len(scantimeline)) plt.plot(xs,sessmin ) plt.plot(xs,sessmax ) plt.xticks(xs,scantimeline,rotation=70) plt.grid() plt.title(mytitle) #plt.show() plt.savefig('session-overview.png') sessfile = open("dbminmax.txt", "w") sessfile.write(str(globmax)) sessfile.write("\n") sessfile.write(str(globmin)) sessfile.write("\n") sessfile.close()
mit
TAMU-CPT/galaxy-tools
tools/genome_viz/brigaid.py
1
36126
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ AUTHOR Pedro Cerqueira github: @pedrorvc DESCRIPTION This script serves to create xml files contaning the information necessary for the execution of BRIG (Blast Ring Image Generator), reducing the time performing the tedious task of setting up all the information on the GUI and provides a quick way to produce an image. The arguments for this script provide some (but not all) of the available options in BRIG, which were the ones I used to change the most. USAGE: brigaid.py -q reference_sequence.fna -rfd path/to/reference/dir -od path/to/output/dir -of path/to/output/dir/output_file -oi path/to/output/BRIG/output_image -t Image_title -a annotation_file.gbk --genes genes_of_interest.txt --contig-order contig_order.tsv """ import argparse import csv import os import xml.etree.ElementTree as ET from collections import OrderedDict from xml.dom import minidom from Bio import SeqIO from matplotlib import cm def listdir_fullpath(path): """ Gets the full path of the files from a directory Args: path (str): full path to a directory Returns: list containing the full path of every file contained in the input directory """ return [os.path.join(path, f) for f in os.listdir(path)] def ring_attributes(colour, name, position): """ Creates ring attributes. Args: colour (str): color of the ring. name (str): name of the ring. position (str): position of the ring. Returns: ring_attrs (dict): attributes of any regular ring of the BRIG xml. """ ring_attrs = {"colour" : colour, "name": name, "position" : position, "upperInt" : "90", "lowerInt" : "70", "legend" : "yes", "size" : "30", "labels" : "no", "blastType" : "blastn"} return ring_attrs def annotation_ring_attributes(position): """ Creates annotation ring attributes. Args: position (str): position of the ring. Returns: annotation_ring_attrs (dict): attributes of the annotation ring of the BRIG xml. """ annotation_ring_attrs = {"colour" : '172,14,225', "name": 'null', "position" : position, "upperInt" : "70", "lowerInt" : "50", "legend" : "yes", "size" : "30", "labels" : "no", "blastType" : "blastn"} return annotation_ring_attrs def create_feature_attrs(label, colour, decoration, start, stop): """ Create attributes for the Feature SubElements of the annotation ring. Args: label (str): name of the gene/CDS to annotate colour (str): colour of the decoration for the annotation decoration (str): shape of the gene/CDS to annotate, for example, 'clockwise-arrow' start (str): start of the gene/CDS to annotate stop (str): stop of the gene/CDS to annotate Results: feature_element_attrs (dict): attributes of the feature element. feature_range_element_attrs (dict): attributes of the feature range element """ feature_element_attrs = {'label' : label, 'colour' : colour, 'decoration' : decoration} feature_range_element_attrs = {'start' : start, 'stop' : stop} return feature_element_attrs, feature_range_element_attrs def create_annotation_ring_tsv(annotation_ring, annotation_file): """ Uses a tsv file to annotate the reference genome. Args: annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes. annotation_file (str): Full path to the file containing annotations for the reference genome. """ with open(annotation_file) as tsvfile: reader = csv.DictReader(tsvfile, dialect='excel-tab') # Obtain the annotations from the file contents for row in reader: start = row['#START'] stop = row['STOP'] label = row['Label'] colour = row['Colour'] decoration = row['Decoration'] # Create xml attributes feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, colour, decoration, start, stop) # Create xml elements feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs) feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs) def annotation_ring_feature_elements_gbk_concat(annotation_ring, record, genome_size=False): """ Creates the annotation ring feature elements, using a concatenated Genbank annotation file. Args: annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes. record (SeqRecord): Object of BioPython containing the information of the input Genbank. genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided. Boolean (False) when a concatenated Genbank is provided. """ #if type(genome_size) == int: # Obtain the features of the Genbank file records for fea in record.features: # Get the start and end position of the genome # Also get the strand if fea.type == 'CDS': start = str(fea.location.start.position) end = str(fea.location.end.position) strand = fea.location.strand # Get the label of the gene or product if 'gene' in fea.qualifiers: label = str(fea.qualifiers['gene'][0]) elif 'product' in fea.qualifiers: product = fea.qualifiers['product'][0] label = str(product) else: continue # Define the decoration of the annotation based on the strand if strand == -1: decoration = 'counterclockwise-arrow' elif strand == 1: decoration = 'clockwise-arrow' # Create xml attributes feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end) # Create xml elements feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs) feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs) # If a genome size is provided, get the size of the records if type(genome_size) == int: if fea.type == 'source': size = fea.location.end.position try: size genome_size += size return genome_size except NameError: pass def annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, record, genes, genome_size=False): """ Creates the annotation ring feature elements, using a concatenated Genbank annotation file and specific gene annotations. Args: annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes. record (SeqRecord): Object of BioPython containing the information of the input Genbank. genome_size (bool): Size of genome. Integer when a Genbank divided by contigs is provided. Boolean (False) when a concatenated Genbank is provided. """ for f in record.features: if f.type == 'CDS': # Find the 'gene' tag and determine if the gene belongs to the specified genes to be annotated if 'gene' in f.qualifiers and f.qualifiers['gene'][0] in genes: label = f.qualifiers['gene'][0] elif 'product' in f.qualifiers and f.qualifiers['product'][0] in genes: product = f.qualifiers['product'][0] label = product else: continue # Determine the start, stop and strand of the gene start = str(f.location.start.position + genome_size) end = str(f.location.end.position + genome_size) strand = f.location.strand # Define the decoration of the annotation based on the strand if strand == -1: decoration = 'counterclockwise-arrow' elif strand == 1: decoration = 'clockwise-arrow' # Create xml attributes feature_element_attrs, feature_range_element_attrs = create_feature_attrs(label, "black", decoration, start, end) # Create xml elements feature_element = ET.SubElement(annotation_ring, 'feature', attrib=feature_element_attrs) feature_range_element = ET.SubElement(feature_element, 'featureRange', attrib=feature_range_element_attrs) # If a genome size is provided, get the size of the records if type(genome_size) == int: if f.type == "source": size = f.location.end.position try: size genome_size += size return genome_size except NameError: pass def create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records): """ Create annotation ring using a concatenated Genbank annotation file. Args: annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes. annotation_file (str): Full path to the file containing annotations for the reference genome. genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file. records (SeqRecord): Object of BioPython containing the information of the input Genbank. """ if genes_of_interest != []: # Get the genes to serach in the Genbank file with open(genes_of_interest, "r") as f: genes = f.readlines() genes = [gene.rstrip() for gene in genes] # Create feature elements of the annotation ring for seq_record in records: annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes) else: for seq_record in records: annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record) def create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order): """ Create annotation ring using a Genbank annotation file divided by contigs. Args: annotation_ring: ElementTree SubElement object containing the 'ring' tag and its attributes. annotation_file (str): Full path to the file containing annotations for the reference genome. genes_of_interest (str): Full path to the file containing the genes to search for in the Genbank file. records (SeqRecord): Object of BioPython containing the information of the input Genbank. contig_order (str): Full path to the file containing the order of the contigs. """ if contig_order != []: with open(contig_order) as tsvfile: reader = csv.DictReader(tsvfile, dialect='excel-tab') # Create an OrderedDict with the contents of the file # The keys are the order are a number representing the order of the contig # The values are the names of the contigs content_dict = OrderedDict() for r in reader: content_dict[r["order"]] = r["contig"] # Create an OrderedDict with the content of each contig # The keys are the names of the contigs # The values are SeqRecord objects from BipPython seq_records_dict = OrderedDict() for record in records: seq_records_dict[record.id] = record if genes_of_interest != []: with open(genes_of_interest, "r") as f: genes = f.readlines() genes = [gene.rstrip() for gene in genes] genome_size = 0 for i in range(1, len(records)+1): ord_record = seq_records_dict[content_dict[str(i)]] gsize = annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, ord_record, genes, genome_size) genome_size = gsize else: genome_size = 0 for i in range(1, len(records)+1): ord_record = seq_records_dict[content_dict[str(i)]] gsize = annotation_ring_feature_elements_gbk_concat(annotation_ring, ord_record, genome_size) genome_size = gsize else: if genes_of_interest != []: with open(genes_of_interest, "r") as f: genes = f.readlines() genes = [gene.rstrip() for gene in genes] for seq_record in records: annotation_ring_feature_elements_genes_of_interest_gbk_concat(annotation_ring, seq_record, genes) else: for seq_record in records: annotation_ring_feature_elements_gbk_concat(annotation_ring, seq_record) def write_xml(root_elem, output_file): """ Writes a xml file. Args: root_elem is a ElementTree Element object containing all the information required for the output file. output_file (str): full path to the output file """ xml_file = ET.tostring(root_elem, encoding='utf8').decode('utf8') pretty_xml_file = minidom.parseString(xml_file).toprettyxml(indent=' ') output_file = output_file + ".xml" with open(output_file, "w") as f: f.write(pretty_xml_file) ####### Create xml elemnts # Create root element def create_root_element(blast_options, legend_position, query_file, output_folder, image_output_file, title, image_format): """ Creates the root element of the xml file and its attributes. Args: blast_options (str): additional options for blast, for example, -evalue or num_threads legend_position (str): position of the legend on the image query_file (str): full path to the query file output_folder (str): full path to the output folder image_output_file (str): full path to the image output file title (str): title of the output image image_format (str): format of the image output file Returns: root: ElementTree Element object containing the BRIG tag and its attributes """ root_attrs = {"blastOptions" : blast_options, "legendPosition" : legend_position, "queryFile" : query_file, "outputFolder" : output_folder, "blastPlus" : "yes", "outputFile" : os.path.join(output_folder, image_output_file), "title" : title, "imageFormat" : image_format, "queryFastaFile" : query_file, "cgXML" : os.path.join(output_folder + "/scratch", os.path.basename(query_file) + ".xml")} root = ET.Element('BRIG', attrib=root_attrs) return root #### Create root children # Create cgview_settings element def create_cgview_settings_element(root, height, width): """ Creates the cgview_settings element of the xml file and its attributes. Args: root: ElementTree Element object containing the BRIG tag and its attributes. height (str): height of the output image in pixels width (str): width of the output image in pixels Returns: cgview_settings: ElementTree SubElement object containing the cgview settings tag and its attributes """ cgview_settings_attrs = {"arrowheadLength" : "medium", "backboneColor" : "black", "backboneRadius" : "600", "backboneThickness" : "medium", "backgroundColor" : "white", "borderColor" : "black", "featureSlotSpacing" : "medium", "featureThickness" : "30", "giveFeaturePositions" : "false", "globalLabel" : "true", "height" : height, "isLinear" : "false", "labelFont" : "SansSerif,plain,25", "labelLineLength" : "medium", "labelLineThickness" : "medium", "labelPlacementQuality" : "best", "labelsToKeep" : "1000", "longTickColor" : "black", "minimumFeatureLength" : "medium", "moveInnerLabelsToOuter" :"true", "origin" : "12", "rulerFont" : "SansSerif,plain,35", "rulerFontColor" : "black", "rulerPadding" : "40", "rulerUnits" : "bases", "shortTickColor" : "black", "shortTickThickness" : "medium", "showBorder" : "false", "showShading" : "true", "showWarning" : "false", "tickDensity" : "0.2333", "tickThickness" : "medium", "titleFont" : "SansSerif,plain,45", "titleFontColor" : "black", "useColoredLabelBackgrounds" : "false", "useInnerLabels" : "true", "warningFont" : "Default,plain,35", "warningFontColor" : "black", "width" : width, "zeroTickColor" : "black", "tickLength" : "medium"} cgview_settings = ET.SubElement(root, 'cgview_settings', attrib=cgview_settings_attrs) return cgview_settings # Create brig_settings element def create_brig_settings_element(root, java_memory): """ Creates the brig_settings element of the xml file and its attributes. Args: root: ElementTree Element object containing the BRIG tag and its attributes. java_memory (str): amount of memory (in bytes) java is allowed to use for BRIG Returns: brig_settings: ElementTree SubElement object containing the brig settings tag and its attributes """ brig_settings_attrs = {"Ring1" : "172,14,225", "Ring2" : "222,149,220", "Ring3" : "161,221,231", "Ring4" : "49,34,221", "Ring5" : "116,152,226", "Ring6" : "224,206,38", "Ring7" : "40,191,140", "Ring8" : "158,223,139", "Ring9" : "226,38,122", "Ring10" :"211,41,77", "defaultUpper" : "70", "defaultLower" : "50", "defaultMinimum" : "50", "genbankFiles" : "gbk,gb,genbank", "fastaFiles" : "fna,faa,fas,fasta,fa", "emblFiles" : "embl", "blastLocation" : "", "divider" : "3", "multiplier" : "3", "memory" : java_memory, "defaultSpacer" : "0"} brig_settings = ET.SubElement(root, "brig_settings", attrib=brig_settings_attrs) return brig_settings ## Create special element def create_special_element(root): """Creates the 'special' element of the xml file and its attributes Args: root: ElementTree Element object containing the BRIG tag and its attributes. Returns: gc_content_special: ElementTree SubElement object containing the 'special' tag and its attributes gc_skew_special: ElementTree SubElement object containing the 'special' tag and its attributes """ gc_content_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Content'}) gc_skew_special = ET.SubElement(root, 'special', attrib={'value' : 'GC Skew'}) return gc_content_special, gc_skew_special # Create reference dir element def create_reference_directory_element(root, reference_directory): """ Creates the 'reference directory' element of the xml file and its attributes. Args: root: ElementTree Element object containing the 'BRIG' tag and its attributes. reference_directory (str): full path to the reference directory that contains the fasta files used to build the rings. Returns: ref_file: ElementTree SubElement object containing the 'refFile' tag and its attributes """ ref_dir = ET.SubElement(root, "refDir", attrib={"location" : reference_directory}) # Obtain the full path for all the files in the directory ref_dir_list = listdir_fullpath(reference_directory) for f in ref_dir_list: ref_file = ET.SubElement(ref_dir, "refFile", attrib={"location" : f}) return ref_file # Create the ring where the annotations are defined def create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order): """ Creates the ring that will contain the annotations for the reference genome. Args: root: ElementTree Element object containing the 'BRIG' tag and its attributes. reference_directory (str): full path to the reference directory that contains the fasta files used to build the rings. annotation_file (str): Full path to the file containing annotations for the reference genome. genes_of_interest (str): Full path to the file containing a list of specific genes. contig_order (str): Full path to the tab-delimited file containing the order of the contigs. """ # Determine the position of the annotation ring, which will be the position after the last reference genome ring_position = len(os.listdir(reference_directory)) + 2 # Create the annotation ring element annotation_ring = ET.SubElement(root, 'ring', attrib=annotation_ring_attributes(str(ring_position))) # Check for tab-delimited annotation file input if list(SeqIO.parse(annotation_file, "genbank")) == []: create_annotation_ring_tsv(annotation_ring, annotation_file) else: # Get the records of the Genbank file records = [r for r in SeqIO.parse(annotation_file, "genbank")] ### Check if a contig order file has been provided if len(records) > 1: # If more than 1 record exists, then the Genbank file is divided by contigs create_annotation_ring_gbk_contigs(annotation_ring, annotation_file, records, genes_of_interest, contig_order) else: create_annotation_ring_gbk_concat(annotation_ring, annotation_file, genes_of_interest, records) ## Create remaining rings def create_ring_element(root, reference_directory, colormap): """ Creates the ring elements of the xml file, containing the position and color of the rings. Args: root: ElementTree Element object containing the 'BRIG' tag and its attributes. reference_directory (str): full path to the reference directory that contains the fasta files used to build the rings. colormap (str): name of the colormap (available in matplotlib) to use for the color of the rings Returns: ring_number_element: ElementTree SubElement object containing the 'ring' tag and its attributes ring_sequence_element: ElementTree SubElement object containing the 'sequence' tag and its attributes """ ref_dir_list = listdir_fullpath(reference_directory) # Gets the colormap from matplotlib with as many colors as the number of files cmap = cm.get_cmap(colormap, len(ref_dir_list)) list_colormap = cmap.colors.tolist() # Remove the fourth element (transparency) because it is not necessary colors_to_use = [] for l in list_colormap: convert = [round(x * 255) for x in l] convert.pop() colors_to_use.append(convert) #reversed_colors_to_use = colors_to_use[::-1] # Check if the user provided an order for the rings has_digit = [os.path.basename(x).split("_")[0].isdigit() for x in ref_dir_list] if True in has_digit: # Obtain the ring positions ring_positions = [os.path.basename(x).split("_")[0] for x in ref_dir_list] # Reverse sort the positions of the rings, because they will be created # in a descending order of their positions ring_positions.sort(reverse=True) ref_dir_list.sort(reverse=True) for ring in range(len(ref_dir_list)): # The ring positions start at 2 due to the special rings (GC Content and GC Skew) ring_position = int(ring_positions[ring]) + 1 # Select a color for the ring ring_color = ",".join([str(e) for e in colors_to_use[ring]]) # Define the name of the ring ring_name = os.path.basename(ref_dir_list[ring]).split("_")[1] # Create the xml elements ring_number_element = ET.SubElement(root, 'ring', ring_attributes(ring_color, ring_name, str(ring_position))) ring_sequence_element = ET.SubElement(ring_number_element, "sequence", attrib={"location" : ref_dir_list[ring]}) else: # Sort files by lowercase ref_dir_list.sort(key=lambda y: y.lower()) # The number of rings starts at 2 due to the GC Content and GC Skew ring_number = len(ref_dir_list) + 1 for ring in range(len(ref_dir_list)): # Select a color for the ring ring_color = ",".join([str(e) for e in colors_to_use[ring]]) # Define the name of the ring ring_name = os.path.basename(ref_dir_list[ring]).split("_")[0] # Create the xml elements ring_number_element = ET.SubElement(root, 'ring', ring_attributes(ring_color, ring_name, str(ring_number))) ring_sequence_element = ET.SubElement(ring_number_element, "sequence", attrib={"location" : ref_dir_list[ring]}) ring_number -= 1 return ring_number_element, ring_sequence_element ## Create special rings def create_special_ring_element(root): """ Create the 'special' ring element and its attributes. Args: root: ElementTree Element object containing the 'BRIG' tag and its attributes. Returns: gc_content_location: ElementTree SubElement object containing the 'sequence' tag and its attributes gc_skew_location: ElementTree SubElement object containing the 'sequence' tag and its attributes """ # Create ring attributes gc_content_ring_attrs = ring_attributes('225,0,0', "GC Content", "0") gc_skew_ring_attrs = ring_attributes('225,0,0', "GC Skew", "1") # Add ring element to root gc_skew_ring = ET.SubElement(root, 'ring', attrib=gc_skew_ring_attrs) gc_content_ring = ET.SubElement(root, 'ring', attrib=gc_content_ring_attrs) # Add sequence element to ring gc_content_location = ET.SubElement(gc_content_ring, 'sequence', attrib={'location' : 'GC Content'}) gc_skew_location = ET.SubElement(gc_skew_ring, 'sequence', attrib={'location' : 'GC Skew'}) return gc_content_location, gc_skew_location def main(query_file, reference_directory, output_folder, output_xml, image_output_file, title, annotation_file, genes_of_interest, contig_order, blast_options, legend_position, image_format, height, width, java_memory, colormap): root = create_root_element(blast_options, legend_position, query_file, output_folder, image_output_file, title, image_format) cgview_settings = create_cgview_settings_element(root, height, width) brig_settings = create_brig_settings_element(root, java_memory) special = create_special_element(root) refdir = create_reference_directory_element(root, reference_directory) if annotation_file: create_annotation_ring(root, reference_directory, annotation_file, genes_of_interest, contig_order) rings = create_ring_element(root, reference_directory, colormap) special_ring = create_special_ring_element(root) write_xml(root, output_xml) print("\n File written to {}".format(output_xml)) def parse_arguments(): parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument('-q', '--query', type=str, required=True, dest='query_file', help='Path to the query/reference FASTA file.') parser.add_argument('-rfd', '--ref_dir', type=str, required=True, dest='reference_directory', help='Path to the directory where the FASTA files to compare against the reference are located.') parser.add_argument('-od', '--out_dir', type=str, required=True, dest='output_folder', help='Path to the output directory for the results of BRIG.') parser.add_argument('-of', '--out_xml', type=str, required=True, dest='output_file', help='Path to the output of this script.') parser.add_argument('-oi', '--out_img', type=str, required=True, dest='image_output_file', help='Path to the output file of the resulting image of BRIG.') parser.add_argument('-t', '--title', type=str, required=True, dest='title', help='Title of the resulting image from BRIG.') parser.add_argument('-a', '--annotation', type=str, required=False, dest='annotation_file', default=False, help='File containing annotations for the reference genome. ' 'The annoation file can be a tab-delimited file (.tsv) or a Genbank format file (.gbk, .gb)') parser.add_argument('--genes', type=str, required=False, dest='genes_of_interest', default=[], help='File containing a list of specific genes (one gene per line) to search when a Genbank annotation file is provided. ') parser.add_argument('--contig_order', type=str, required=False, dest='contig_order', default=[], help='Tab-delimited file containing the order of the contigs when a Genbank (divided by contigs) annotation file is provided. ' 'Example: order contig ' '1 Contig8') parser.add_argument('-b', '--blast_options', type=str, required=False, dest="blast_options", default="-evalue 0.001 -num_threads 6", help='Options for running BLAST.') parser.add_argument('-l', '--legend_pos', type=str, required=False, dest="legend_position", default="middle-right", help='Positon of the legend on the resulting image.' 'The options available are upper, center or lower, ' 'paired with left, center or right') parser.add_argument('-if', '--image_format', type=str, required=False, dest="image_format", default="jpg", help='Format of the resulting image file.' 'The available options are: jpg, png, svg or svgz.') parser.add_argument('-ht', '--height', type=str, required=False, dest="height", default="3000", help='Height (in pixels) of the resulting image.') parser.add_argument('-wd', '--width', type=str, required=False, dest="width", default="3000", help='Width (in pixels) of the resulting image.') parser.add_argument('-jm', '--java_memory', type=str, required=False, dest="java_memory", default="1500", help='Amount of memory (in bytes) that Java is allowed to use for BRIG.') parser.add_argument('-cm', '--colormap', type=str, required=False, dest="colormap", default="viridis", help='Colormap from matplotlib to use for the color of the rings. ' 'The available options are: viridis, plasma, inferno, magma and cividis.' 'More options for colormaps at: ' 'https://matplotlib.org/users/colormaps.html') args = parser.parse_args() return [args.query_file, args.reference_directory, args.output_folder, args.output_file, args.image_output_file, args.title, args.annotation_file, args.genes_of_interest, args.contig_order, args.blast_options, args.legend_position, args.image_format, args.height, args.width, args.java_memory, args.colormap] if __name__ == '__main__': args = parse_arguments() main(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], args[8], args[9], args[10], args[11], args[12], args[13], args[14], args[15])
gpl-3.0
yuanagain/seniorthesis
venv/lib/python2.7/site-packages/matplotlib/tests/test_basic.py
7
1290
from __future__ import (absolute_import, division, print_function, unicode_literals) from matplotlib.externals import six from nose.tools import assert_equal from matplotlib.testing.decorators import knownfailureif from pylab import * def test_simple(): assert_equal(1 + 1, 2) @knownfailureif(True) def test_simple_knownfail(): # Test the known fail mechanism. assert_equal(1 + 1, 3) def test_override_builtins(): ok_to_override = set([ '__name__', '__doc__', '__package__', '__loader__', '__spec__', 'any', 'all', 'sum' ]) # We could use six.moves.builtins here, but that seems # to do a little more than just this. if six.PY3: builtins = sys.modules['builtins'] else: builtins = sys.modules['__builtin__'] overridden = False for key in globals().keys(): if key in dir(builtins): if (globals()[key] != getattr(builtins, key) and key not in ok_to_override): print("'%s' was overridden in globals()." % key) overridden = True assert not overridden if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
mit
crichardson17/starburst_atlas
Low_resolution_sims/DustFree_LowRes/Geneva_noRot_cont/Geneva_noRot_cont_age5/UV2.py
33
7365
import csv import matplotlib.pyplot as plt from numpy import * import scipy.interpolate import math from pylab import * from matplotlib.ticker import MultipleLocator, FormatStrFormatter import matplotlib.patches as patches from matplotlib.path import Path import os # ------------------------------------------------------------------------------------------------------ #inputs for file in os.listdir('.'): if file.endswith(".grd"): inputfile = file for file in os.listdir('.'): if file.endswith(".txt"): inputfile2 = file # ------------------------------------------------------------------------------------------------------ #Patches data #for the Kewley and Levesque data verts = [ (1., 7.97712125471966000000), # left, bottom (1., 9.57712125471966000000), # left, top (2., 10.57712125471970000000), # right, top (2., 8.97712125471966000000), # right, bottom (0., 0.), # ignored ] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(verts, codes) # ------------------------ #for the Kewley 01 data verts2 = [ (2.4, 9.243038049), # left, bottom (2.4, 11.0211893), # left, top (2.6, 11.0211893), # right, top (2.6, 9.243038049), # right, bottom (0, 0.), # ignored ] path = Path(verts, codes) path2 = Path(verts2, codes) # ------------------------- #for the Moy et al data verts3 = [ (1., 6.86712125471966000000), # left, bottom (1., 10.18712125471970000000), # left, top (3., 12.18712125471970000000), # right, top (3., 8.86712125471966000000), # right, bottom (0., 0.), # ignored ] path = Path(verts, codes) path3 = Path(verts3, codes) # ------------------------------------------------------------------------------------------------------ #the routine to add patches for others peoples' data onto our plots. def add_patches(ax): patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0) patch2 = patches.PathPatch(path2, facecolor='green', lw=0) patch = patches.PathPatch(path, facecolor='red', lw=0) ax1.add_patch(patch3) ax1.add_patch(patch2) ax1.add_patch(patch) # ------------------------------------------------------------------------------------------------------ #the subplot routine def add_sub_plot(sub_num): numplots = 16 plt.subplot(numplots/4.,4,sub_num) rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear') zi = rbf(xi, yi) contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed') contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5) plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*') plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10) plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10) if sub_num == numplots / 2.: print "half the plots are complete" #axis limits yt_min = 8 yt_max = 23 xt_min = 0 xt_max = 12 plt.ylim(yt_min,yt_max) plt.xlim(xt_min,xt_max) plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10) plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10) if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]: plt.tick_params(labelleft = 'off') else: plt.tick_params(labelleft = 'on') plt.ylabel('Log ($ \phi _{\mathrm{H}} $)') if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]: plt.tick_params(labelbottom = 'off') else: plt.tick_params(labelbottom = 'on') plt.xlabel('Log($n _{\mathrm{H}} $)') if sub_num == 1: plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10) if sub_num == 13: plt.yticks(arange(yt_min,yt_max,1),fontsize=10) plt.xticks(arange(xt_min,xt_max,1), fontsize = 10) if sub_num == 16 : plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10) # --------------------------------------------------- #this is where the grid information (phi and hdens) is read in and saved to grid. grid = []; with open(inputfile, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') for row in csvReader: grid.append(row); grid = asarray(grid) #here is where the data for each line is read in and saved to dataEmissionlines dataEmissionlines = []; with open(inputfile2, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') headers = csvReader.next() for row in csvReader: dataEmissionlines.append(row); dataEmissionlines = asarray(dataEmissionlines) print "import files complete" # --------------------------------------------------- #for grid phi_values = grid[1:len(dataEmissionlines)+1,6] hdens_values = grid[1:len(dataEmissionlines)+1,7] #for lines headers = headers[1:] Emissionlines = dataEmissionlines[:, 1:] concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0]))) max_values = zeros((len(Emissionlines[0]),4)) #select the scaling factor #for 1215 #incident = Emissionlines[1:,4] #for 4860 incident = Emissionlines[:,57] #take the ratio of incident and all the lines and put it all in an array concatenated_data for i in range(len(Emissionlines)): for j in range(len(Emissionlines[0])): if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0: concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) else: concatenated_data[i,j] == 0 # for 1215 #for i in range(len(Emissionlines)): # for j in range(len(Emissionlines[0])): # if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0: # concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) # else: # concatenated_data[i,j] == 0 #find the maxima to plot onto the contour plots for j in range(len(concatenated_data[0])): max_values[j,0] = max(concatenated_data[:,j]) max_values[j,1] = argmax(concatenated_data[:,j], axis = 0) max_values[j,2] = hdens_values[max_values[j,1]] max_values[j,3] = phi_values[max_values[j,1]] #to round off the maxima max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ] print "data arranged" # --------------------------------------------------- #Creating the grid to interpolate with for contours. gridarray = zeros((len(Emissionlines),2)) gridarray[:,0] = hdens_values gridarray[:,1] = phi_values x = gridarray[:,0] y = gridarray[:,1] #change desired lines here! line = [18, #1549 19, #1640 20, #1665 21, #1671 23, #1750 24, #1860 25, #1888 26, #1907 27, #2297 28, #2321 29, #2471 30, #2326 31, #2335 32, #2665 33, #2798 34] #2803 #create z array for this plot z = concatenated_data[:,line[:]] # --------------------------------------------------- # Interpolate print "starting interpolation" xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10) xi, yi = meshgrid(xi, yi) # --------------------------------------------------- print "interpolatation complete; now plotting" #plot plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots levels = arange(10**-1,10, .2) levels2 = arange(10**-2,10**2, 1) plt.suptitle("UV Lines Continued", fontsize=14) # --------------------------------------------------- for i in range(16): add_sub_plot(i) ax1 = plt.subplot(4,4,1) add_patches(ax1) print "complete" plt.savefig('UV_Lines_cntd.pdf') plt.clf()
gpl-2.0
mlperf/training_results_v0.7
NVIDIA/benchmarks/minigo/implementations/tensorflow/minigo/oneoffs/training_curve.py
8
5964
# Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Used to plot the accuracy of the policy and value networks in predicting professional game moves and results over the course of training. Check FLAGS for default values for what models to load and what sgf files to parse. Usage: python training_curve.py Sample 3 positions from each game python training_curve.py --num_positions=3 Only grab games after 2005 (default is 2000) python training_curve.py --min_year=2005 """ import sys sys.path.insert(0, '.') import os.path import numpy as np import matplotlib.pyplot as plt import pandas as pd from absl import app, flags from tqdm import tqdm import coords from rl_loop import fsdb import oneoff_utils flags.DEFINE_string("sgf_dir", None, "sgf database") flags.DEFINE_string("plot_dir", "data", "Where to save the plots.") flags.DEFINE_integer("min_year", "2000", "Only take sgf games with date >= min_year") flags.DEFINE_string("komi", "7.5", "Only take sgf games with given komi") flags.DEFINE_integer("idx_start", 150, "Only take models after given idx") flags.DEFINE_integer("num_positions", 1, "How many positions from each game to sample from.") flags.DEFINE_integer("eval_every", 5, "Eval every k models to generate the curve") flags.mark_flag_as_required('sgf_dir') FLAGS = flags.FLAGS def batch_run_many(player, positions, batch_size=100): """Used to avoid a memory oveflow issue when running the network on too many positions. TODO: This should be a member function of player.network?""" prob_list = [] value_list = [] for idx in range(0, len(positions), batch_size): probs, values = player.network.run_many(positions[idx:idx + batch_size]) prob_list.append(probs) value_list.append(values) return np.concatenate(prob_list, axis=0), np.concatenate(value_list, axis=0) def eval_player(player, positions, moves, results): probs, values = batch_run_many(player, positions) policy_moves = [coords.from_flat(c) for c in np.argmax(probs, axis=1)] top_move_agree = [moves[idx] == policy_moves[idx] for idx in range(len(moves))] square_err = (values - results) ** 2 / 4 return top_move_agree, square_err def sample_positions_from_games(sgf_files, num_positions=1): pos_data = [] move_data = [] result_data = [] move_idxs = [] fail_count = 0 for path in tqdm(sgf_files, desc="loading sgfs", unit="games"): try: positions, moves, results = oneoff_utils.parse_sgf_to_examples(path) except KeyboardInterrupt: raise except Exception as e: print("Parse exception:", e) fail_count += 1 continue # add entire game if num_positions == -1: pos_data.extend(positions) move_data.extend(moves) move_idxs.extend(range(len(positions))) result_data.extend(results) else: for idx in np.random.choice(len(positions), num_positions): pos_data.append(positions[idx]) move_data.append(moves[idx]) result_data.append(results[idx]) move_idxs.append(idx) print("Sampled {} positions, failed to parse {} files".format( len(pos_data), fail_count)) return pos_data, move_data, result_data, move_idxs def get_training_curve_data( model_dir, pos_data, move_data, result_data, idx_start, eval_every): model_paths = oneoff_utils.get_model_paths(model_dir) df = pd.DataFrame() player = None print("Evaluating models {}-{}, eval_every={}".format( idx_start, len(model_paths), eval_every)) for idx in tqdm(range(idx_start, len(model_paths), eval_every)): if player: oneoff_utils.restore_params(model_paths[idx], player) else: player = oneoff_utils.load_player(model_paths[idx]) correct, squared_errors = eval_player( player=player, positions=pos_data, moves=move_data, results=result_data) avg_acc = np.mean(correct) avg_mse = np.mean(squared_errors) print("Model: {}, acc: {:.4f}, mse: {:.4f}".format( model_paths[idx], avg_acc, avg_mse)) df = df.append({"num": idx, "acc": avg_acc, "mse": avg_mse}, ignore_index=True) return df def save_plots(data_dir, df): plt.plot(df["num"], df["acc"]) plt.xlabel("Model idx") plt.ylabel("Accuracy") plt.title("Accuracy in Predicting Professional Moves") plot_path = os.path.join(data_dir, "move_acc.pdf") plt.savefig(plot_path) plt.figure() plt.plot(df["num"], df["mse"]) plt.xlabel("Model idx") plt.ylabel("MSE/4") plt.title("MSE in predicting outcome") plot_path = os.path.join(data_dir, "value_mse.pdf") plt.savefig(plot_path) def main(unusedargv): sgf_files = oneoff_utils.find_and_filter_sgf_files( FLAGS.sgf_dir, FLAGS.min_year, FLAGS.komi) pos_data, move_data, result_data, move_idxs = sample_positions_from_games( sgf_files=sgf_files, num_positions=FLAGS.num_positions) df = get_training_curve_data(fsdb.models_dir(), pos_data, move_data, result_data, FLAGS.idx_start, FLAGS.eval_every) save_plots(FLAGS.plot_dir, df) if __name__ == "__main__": app.run(main)
apache-2.0
RachitKansal/scikit-learn
examples/model_selection/plot_confusion_matrix.py
244
2496
""" ================ Confusion matrix ================ Example of confusion matrix usage to evaluate the quality of the output of a classifier on the iris data set. The diagonal elements represent the number of points for which the predicted label is equal to the true label, while off-diagonal elements are those that are mislabeled by the classifier. The higher the diagonal values of the confusion matrix the better, indicating many correct predictions. The figures show the confusion matrix with and without normalization by class support size (number of elements in each class). This kind of normalization can be interesting in case of class imbalance to have a more visual interpretation of which class is being misclassified. Here the results are not as good as they could be as our choice for the regularization parameter C was not the best. In real life applications this parameter is usually chosen using :ref:`grid_search`. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets from sklearn.cross_validation import train_test_split from sklearn.metrics import confusion_matrix # import some data to play with iris = datasets.load_iris() X = iris.data y = iris.target # Split the data into a training set and a test set X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # Run classifier, using a model that is too regularized (C too low) to see # the impact on the results classifier = svm.SVC(kernel='linear', C=0.01) y_pred = classifier.fit(X_train, y_train).predict(X_test) def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(iris.target_names)) plt.xticks(tick_marks, iris.target_names, rotation=45) plt.yticks(tick_marks, iris.target_names) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') # Compute confusion matrix cm = confusion_matrix(y_test, y_pred) np.set_printoptions(precision=2) print('Confusion matrix, without normalization') print(cm) plt.figure() plot_confusion_matrix(cm) # Normalize the confusion matrix by row (i.e by the number of samples # in each class) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print('Normalized confusion matrix') print(cm_normalized) plt.figure() plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix') plt.show()
bsd-3-clause
lthurlow/Network-Grapher
proj/external/matplotlib-1.2.1/build/lib.linux-i686-2.7/matplotlib/transforms.py
2
88425
""" matplotlib includes a framework for arbitrary geometric transformations that is used determine the final position of all elements drawn on the canvas. Transforms are composed into trees of :class:`TransformNode` objects whose actual value depends on their children. When the contents of children change, their parents are automatically invalidated. The next time an invalidated transform is accessed, it is recomputed to reflect those changes. This invalidation/caching approach prevents unnecessary recomputations of transforms, and contributes to better interactive performance. For example, here is a graph of the transform tree used to plot data to the graph: .. image:: ../_static/transforms.png The framework can be used for both affine and non-affine transformations. However, for speed, we want use the backend renderers to perform affine transformations whenever possible. Therefore, it is possible to perform just the affine or non-affine part of a transformation on a set of data. The affine is always assumed to occur after the non-affine. For any transform:: full transform == non-affine part + affine part The backends are not expected to handle non-affine transformations themselves. """ from __future__ import print_function, division import numpy as np from numpy import ma from matplotlib._path import (affine_transform, count_bboxes_overlapping_bbox, update_path_extents) from numpy.linalg import inv from weakref import WeakValueDictionary import warnings try: set except NameError: from sets import Set as set from path import Path DEBUG = False MaskedArray = ma.MaskedArray class TransformNode(object): """ :class:`TransformNode` is the base class for anything that participates in the transform tree and needs to invalidate its parents or be invalidated. This includes classes that are not really transforms, such as bounding boxes, since some transforms depend on bounding boxes to compute their values. """ _gid = 0 # Invalidation may affect only the affine part. If the # invalidation was "affine-only", the _invalid member is set to # INVALID_AFFINE_ONLY INVALID_NON_AFFINE = 1 INVALID_AFFINE = 2 INVALID = INVALID_NON_AFFINE | INVALID_AFFINE # Some metadata about the transform, used to determine whether an # invalidation is affine-only is_affine = False is_bbox = False pass_through = False """ If pass_through is True, all ancestors will always be invalidated, even if 'self' is already invalid. """ def __init__(self, shorthand_name=None): """ Creates a new :class:`TransformNode`. **shorthand_name** - a string representing the "name" of this transform. The name carries no significance other than to improve the readability of ``str(transform)`` when DEBUG=True. """ # Parents are stored in a WeakValueDictionary, so that if the # parents are deleted, references from the children won't keep # them alive. self._parents = WeakValueDictionary() # TransformNodes start out as invalid until their values are # computed for the first time. self._invalid = 1 self._shorthand_name = shorthand_name or '' if DEBUG: def __str__(self): # either just return the name of this TransformNode, or it's repr return self._shorthand_name or repr(self) def __getstate__(self): d = self.__dict__.copy() # turn the weakkey dictionary into a normal dictionary d['_parents'] = dict(self._parents.iteritems()) return d def __setstate__(self, data_dict): self.__dict__ = data_dict # turn the normal dictionary back into a WeakValueDictionary self._parents = WeakValueDictionary(self._parents) def __copy__(self, *args): raise NotImplementedError( "TransformNode instances can not be copied. " + "Consider using frozen() instead.") __deepcopy__ = __copy__ def invalidate(self): """ Invalidate this :class:`TransformNode` and triggers an invalidation of its ancestors. Should be called any time the transform changes. """ value = self.INVALID if self.is_affine: value = self.INVALID_AFFINE return self._invalidate_internal(value, invalidating_node=self) def _invalidate_internal(self, value, invalidating_node): """ Called by :meth:`invalidate` and subsequently ascends the transform stack calling each TransformNode's _invalidate_internal method. """ # determine if this call will be an extension to the invalidation # status. If not, then a shortcut means that we needn't invoke an # invalidation up the transform stack as it will already have been # invalidated. # N.B This makes the invalidation sticky, once a transform has been # invalidated as NON_AFFINE, then it will always be invalidated as # NON_AFFINE even when triggered with a AFFINE_ONLY invalidation. # In most cases this is not a problem (i.e. for interactive panning and # zooming) and the only side effect will be on performance. status_changed = self._invalid < value if self.pass_through or status_changed: self._invalid = value for parent in self._parents.values(): parent._invalidate_internal(value=value, invalidating_node=self) def set_children(self, *children): """ Set the children of the transform, to let the invalidation system know which transforms can invalidate this transform. Should be called from the constructor of any transforms that depend on other transforms. """ for child in children: child._parents[id(self)] = self if DEBUG: _set_children = set_children def set_children(self, *children): self._set_children(*children) self._children = children set_children.__doc__ = _set_children.__doc__ def frozen(self): """ Returns a frozen copy of this transform node. The frozen copy will not update when its children change. Useful for storing a previously known state of a transform where ``copy.deepcopy()`` might normally be used. """ return self if DEBUG: def write_graphviz(self, fobj, highlight=[]): """ For debugging purposes. Writes the transform tree rooted at 'self' to a graphviz "dot" format file. This file can be run through the "dot" utility to produce a graph of the transform tree. Affine transforms are marked in blue. Bounding boxes are marked in yellow. *fobj*: A Python file-like object Once the "dot" file has been created, it can be turned into a png easily with:: $> dot -Tpng -o $OUTPUT_FILE $DOT_FILE """ seen = set() def recurse(root): if root in seen: return seen.add(root) props = {} label = root.__class__.__name__ if root._invalid: label = '[%s]' % label if root in highlight: props['style'] = 'bold' props['shape'] = 'box' props['label'] = '"%s"' % label props = ' '.join(['%s=%s' % (key, val) for key, val in props.iteritems()]) fobj.write('%s [%s];\n' % (hash(root), props)) if hasattr(root, '_children'): for child in root._children: name = '?' for key, val in root.__dict__.iteritems(): if val is child: name = key break fobj.write('"%s" -> "%s" [label="%s", fontsize=10];\n' % (hash(root), hash(child), name)) recurse(child) fobj.write("digraph G {\n") recurse(self) fobj.write("}\n") class BboxBase(TransformNode): """ This is the base class of all bounding boxes, and provides read-only access to its data. A mutable bounding box is provided by the :class:`Bbox` class. The canonical representation is as two points, with no restrictions on their ordering. Convenience properties are provided to get the left, bottom, right and top edges and width and height, but these are not stored explicitly. """ is_bbox = True is_affine = True #* Redundant: Removed for performance # # def __init__(self): # TransformNode.__init__(self) if DEBUG: def _check(points): if ma.isMaskedArray(points): warnings.warn("Bbox bounds are a masked array.") points = np.asarray(points) if (points[1, 0] - points[0, 0] == 0 or points[1, 1] - points[0, 1] == 0): warnings.warn("Singular Bbox.") _check = staticmethod(_check) def frozen(self): return Bbox(self.get_points().copy()) frozen.__doc__ = TransformNode.__doc__ def __array__(self, *args, **kwargs): return self.get_points() def is_unit(self): """ Returns True if the :class:`Bbox` is the unit bounding box from (0, 0) to (1, 1). """ return list(self.get_points().flatten()) == [0., 0., 1., 1.] def _get_x0(self): return self.get_points()[0, 0] x0 = property(_get_x0, None, None, """ (property) :attr:`x0` is the first of the pair of *x* coordinates that define the bounding box. :attr:`x0` is not guaranteed to be less than :attr:`x1`. If you require that, use :attr:`xmin`.""") def _get_y0(self): return self.get_points()[0, 1] y0 = property(_get_y0, None, None, """ (property) :attr:`y0` is the first of the pair of *y* coordinates that define the bounding box. :attr:`y0` is not guaranteed to be less than :attr:`y1`. If you require that, use :attr:`ymin`.""") def _get_x1(self): return self.get_points()[1, 0] x1 = property(_get_x1, None, None, """ (property) :attr:`x1` is the second of the pair of *x* coordinates that define the bounding box. :attr:`x1` is not guaranteed to be greater than :attr:`x0`. If you require that, use :attr:`xmax`.""") def _get_y1(self): return self.get_points()[1, 1] y1 = property(_get_y1, None, None, """ (property) :attr:`y1` is the second of the pair of *y* coordinates that define the bounding box. :attr:`y1` is not guaranteed to be greater than :attr:`y0`. If you require that, use :attr:`ymax`.""") def _get_p0(self): return self.get_points()[0] p0 = property(_get_p0, None, None, """ (property) :attr:`p0` is the first pair of (*x*, *y*) coordinates that define the bounding box. It is not guaranteed to be the bottom-left corner. For that, use :attr:`min`.""") def _get_p1(self): return self.get_points()[1] p1 = property(_get_p1, None, None, """ (property) :attr:`p1` is the second pair of (*x*, *y*) coordinates that define the bounding box. It is not guaranteed to be the top-right corner. For that, use :attr:`max`.""") def _get_xmin(self): return min(self.get_points()[:, 0]) xmin = property(_get_xmin, None, None, """ (property) :attr:`xmin` is the left edge of the bounding box.""") def _get_ymin(self): return min(self.get_points()[:, 1]) ymin = property(_get_ymin, None, None, """ (property) :attr:`ymin` is the bottom edge of the bounding box.""") def _get_xmax(self): return max(self.get_points()[:, 0]) xmax = property(_get_xmax, None, None, """ (property) :attr:`xmax` is the right edge of the bounding box.""") def _get_ymax(self): return max(self.get_points()[:, 1]) ymax = property(_get_ymax, None, None, """ (property) :attr:`ymax` is the top edge of the bounding box.""") def _get_min(self): return [min(self.get_points()[:, 0]), min(self.get_points()[:, 1])] min = property(_get_min, None, None, """ (property) :attr:`min` is the bottom-left corner of the bounding box.""") def _get_max(self): return [max(self.get_points()[:, 0]), max(self.get_points()[:, 1])] max = property(_get_max, None, None, """ (property) :attr:`max` is the top-right corner of the bounding box.""") def _get_intervalx(self): return self.get_points()[:, 0] intervalx = property(_get_intervalx, None, None, """ (property) :attr:`intervalx` is the pair of *x* coordinates that define the bounding box. It is not guaranteed to be sorted from left to right.""") def _get_intervaly(self): return self.get_points()[:, 1] intervaly = property(_get_intervaly, None, None, """ (property) :attr:`intervaly` is the pair of *y* coordinates that define the bounding box. It is not guaranteed to be sorted from bottom to top.""") def _get_width(self): points = self.get_points() return points[1, 0] - points[0, 0] width = property(_get_width, None, None, """ (property) The width of the bounding box. It may be negative if :attr:`x1` < :attr:`x0`.""") def _get_height(self): points = self.get_points() return points[1, 1] - points[0, 1] height = property(_get_height, None, None, """ (property) The height of the bounding box. It may be negative if :attr:`y1` < :attr:`y0`.""") def _get_size(self): points = self.get_points() return points[1] - points[0] size = property(_get_size, None, None, """ (property) The width and height of the bounding box. May be negative, in the same way as :attr:`width` and :attr:`height`.""") def _get_bounds(self): x0, y0, x1, y1 = self.get_points().flatten() return (x0, y0, x1 - x0, y1 - y0) bounds = property(_get_bounds, None, None, """ (property) Returns (:attr:`x0`, :attr:`y0`, :attr:`width`, :attr:`height`).""") def _get_extents(self): return self.get_points().flatten().copy() extents = property(_get_extents, None, None, """ (property) Returns (:attr:`x0`, :attr:`y0`, :attr:`x1`, :attr:`y1`).""") def get_points(self): return NotImplementedError() def containsx(self, x): """ Returns True if *x* is between or equal to :attr:`x0` and :attr:`x1`. """ x0, x1 = self.intervalx return ((x0 < x1 and (x >= x0 and x <= x1)) or (x >= x1 and x <= x0)) def containsy(self, y): """ Returns True if *y* is between or equal to :attr:`y0` and :attr:`y1`. """ y0, y1 = self.intervaly return ((y0 < y1 and (y >= y0 and y <= y1)) or (y >= y1 and y <= y0)) def contains(self, x, y): """ Returns *True* if (*x*, *y*) is a coordinate inside the bounding box or on its edge. """ return self.containsx(x) and self.containsy(y) def overlaps(self, other): """ Returns True if this bounding box overlaps with the given bounding box *other*. """ ax1, ay1, ax2, ay2 = self._get_extents() bx1, by1, bx2, by2 = other._get_extents() if ax2 < ax1: ax2, ax1 = ax1, ax2 if ay2 < ay1: ay2, ay1 = ay1, ay2 if bx2 < bx1: bx2, bx1 = bx1, bx2 if by2 < by1: by2, by1 = by1, by2 return not ((bx2 < ax1) or (by2 < ay1) or (bx1 > ax2) or (by1 > ay2)) def fully_containsx(self, x): """ Returns True if *x* is between but not equal to :attr:`x0` and :attr:`x1`. """ x0, x1 = self.intervalx return ((x0 < x1 and (x > x0 and x < x1)) or (x > x1 and x < x0)) def fully_containsy(self, y): """ Returns True if *y* is between but not equal to :attr:`y0` and :attr:`y1`. """ y0, y1 = self.intervaly return ((y0 < y1 and (y > y0 and y < y1)) or (y > y1 and y < y0)) def fully_contains(self, x, y): """ Returns True if (*x*, *y*) is a coordinate inside the bounding box, but not on its edge. """ return self.fully_containsx(x) \ and self.fully_containsy(y) def fully_overlaps(self, other): """ Returns True if this bounding box overlaps with the given bounding box *other*, but not on its edge alone. """ ax1, ay1, ax2, ay2 = self._get_extents() bx1, by1, bx2, by2 = other._get_extents() if ax2 < ax1: ax2, ax1 = ax1, ax2 if ay2 < ay1: ay2, ay1 = ay1, ay2 if bx2 < bx1: bx2, bx1 = bx1, bx2 if by2 < by1: by2, by1 = by1, by2 return not ((bx2 <= ax1) or (by2 <= ay1) or (bx1 >= ax2) or (by1 >= ay2)) def transformed(self, transform): """ Return a new :class:`Bbox` object, statically transformed by the given transform. """ return Bbox(transform.transform(self.get_points())) def inverse_transformed(self, transform): """ Return a new :class:`Bbox` object, statically transformed by the inverse of the given transform. """ return Bbox(transform.inverted().transform(self.get_points())) coefs = {'C': (0.5, 0.5), 'SW': (0, 0), 'S': (0.5, 0), 'SE': (1.0, 0), 'E': (1.0, 0.5), 'NE': (1.0, 1.0), 'N': (0.5, 1.0), 'NW': (0, 1.0), 'W': (0, 0.5)} def anchored(self, c, container=None): """ Return a copy of the :class:`Bbox`, shifted to position *c* within a container. *c*: may be either: * a sequence (*cx*, *cy*) where *cx* and *cy* range from 0 to 1, where 0 is left or bottom and 1 is right or top * a string: - 'C' for centered - 'S' for bottom-center - 'SE' for bottom-left - 'E' for left - etc. Optional argument *container* is the box within which the :class:`Bbox` is positioned; it defaults to the initial :class:`Bbox`. """ if container is None: container = self l, b, w, h = container.bounds if isinstance(c, basestring): cx, cy = self.coefs[c] else: cx, cy = c L, B, W, H = self.bounds return Bbox(self._points + [(l + cx * (w - W)) - L, (b + cy * (h - H)) - B]) def shrunk(self, mx, my): """ Return a copy of the :class:`Bbox`, shrunk by the factor *mx* in the *x* direction and the factor *my* in the *y* direction. The lower left corner of the box remains unchanged. Normally *mx* and *my* will be less than 1, but this is not enforced. """ w, h = self.size return Bbox([self._points[0], self._points[0] + [mx * w, my * h]]) def shrunk_to_aspect(self, box_aspect, container=None, fig_aspect=1.0): """ Return a copy of the :class:`Bbox`, shrunk so that it is as large as it can be while having the desired aspect ratio, *box_aspect*. If the box coordinates are relative---that is, fractions of a larger box such as a figure---then the physical aspect ratio of that figure is specified with *fig_aspect*, so that *box_aspect* can also be given as a ratio of the absolute dimensions, not the relative dimensions. """ assert box_aspect > 0 and fig_aspect > 0 if container is None: container = self w, h = container.size H = w * box_aspect / fig_aspect if H <= h: W = w else: W = h * fig_aspect / box_aspect H = h return Bbox([self._points[0], self._points[0] + (W, H)]) def splitx(self, *args): """ e.g., ``bbox.splitx(f1, f2, ...)`` Returns a list of new :class:`Bbox` objects formed by splitting the original one with vertical lines at fractional positions *f1*, *f2*, ... """ boxes = [] xf = [0] + list(args) + [1] x0, y0, x1, y1 = self._get_extents() w = x1 - x0 for xf0, xf1 in zip(xf[:-1], xf[1:]): boxes.append(Bbox([[x0 + xf0 * w, y0], [x0 + xf1 * w, y1]])) return boxes def splity(self, *args): """ e.g., ``bbox.splitx(f1, f2, ...)`` Returns a list of new :class:`Bbox` objects formed by splitting the original one with horizontal lines at fractional positions *f1*, *f2*, ... """ boxes = [] yf = [0] + list(args) + [1] x0, y0, x1, y1 = self._get_extents() h = y1 - y0 for yf0, yf1 in zip(yf[:-1], yf[1:]): boxes.append(Bbox([[x0, y0 + yf0 * h], [x1, y0 + yf1 * h]])) return boxes def count_contains(self, vertices): """ Count the number of vertices contained in the :class:`Bbox`. *vertices* is a Nx2 Numpy array. """ if len(vertices) == 0: return 0 vertices = np.asarray(vertices) x0, y0, x1, y1 = self._get_extents() dx0 = np.sign(vertices[:, 0] - x0) dy0 = np.sign(vertices[:, 1] - y0) dx1 = np.sign(vertices[:, 0] - x1) dy1 = np.sign(vertices[:, 1] - y1) inside = (abs(dx0 + dx1) + abs(dy0 + dy1)) <= 2 return np.sum(inside) def count_overlaps(self, bboxes): """ Count the number of bounding boxes that overlap this one. bboxes is a sequence of :class:`BboxBase` objects """ return count_bboxes_overlapping_bbox(self, bboxes) def expanded(self, sw, sh): """ Return a new :class:`Bbox` which is this :class:`Bbox` expanded around its center by the given factors *sw* and *sh*. """ width = self.width height = self.height deltaw = (sw * width - width) / 2.0 deltah = (sh * height - height) / 2.0 a = np.array([[-deltaw, -deltah], [deltaw, deltah]]) return Bbox(self._points + a) def padded(self, p): """ Return a new :class:`Bbox` that is padded on all four sides by the given value. """ points = self.get_points() return Bbox(points + [[-p, -p], [p, p]]) def translated(self, tx, ty): """ Return a copy of the :class:`Bbox`, statically translated by *tx* and *ty*. """ return Bbox(self._points + (tx, ty)) def corners(self): """ Return an array of points which are the four corners of this rectangle. For example, if this :class:`Bbox` is defined by the points (*a*, *b*) and (*c*, *d*), :meth:`corners` returns (*a*, *b*), (*a*, *d*), (*c*, *b*) and (*c*, *d*). """ l, b, r, t = self.get_points().flatten() return np.array([[l, b], [l, t], [r, b], [r, t]]) def rotated(self, radians): """ Return a new bounding box that bounds a rotated version of this bounding box by the given radians. The new bounding box is still aligned with the axes, of course. """ corners = self.corners() corners_rotated = Affine2D().rotate(radians).transform(corners) bbox = Bbox.unit() bbox.update_from_data_xy(corners_rotated, ignore=True) return bbox @staticmethod def union(bboxes): """ Return a :class:`Bbox` that contains all of the given bboxes. """ assert(len(bboxes)) if len(bboxes) == 1: return bboxes[0] x0 = np.inf y0 = np.inf x1 = -np.inf y1 = -np.inf for bbox in bboxes: points = bbox.get_points() xs = points[:, 0] ys = points[:, 1] x0 = min(x0, np.min(xs)) y0 = min(y0, np.min(ys)) x1 = max(x1, np.max(xs)) y1 = max(y1, np.max(ys)) return Bbox.from_extents(x0, y0, x1, y1) class Bbox(BboxBase): """ A mutable bounding box. """ def __init__(self, points, **kwargs): """ *points*: a 2x2 numpy array of the form [[x0, y0], [x1, y1]] If you need to create a :class:`Bbox` object from another form of data, consider the static methods :meth:`unit`, :meth:`from_bounds` and :meth:`from_extents`. """ BboxBase.__init__(self, **kwargs) self._points = np.asarray(points, np.float_) self._minpos = np.array([0.0000001, 0.0000001]) self._ignore = True # it is helpful in some contexts to know if the bbox is a # default or has been mutated; we store the orig points to # support the mutated methods self._points_orig = self._points.copy() if DEBUG: ___init__ = __init__ def __init__(self, points, **kwargs): self._check(points) self.___init__(points, **kwargs) def invalidate(self): self._check(self._points) TransformNode.invalidate(self) _unit_values = np.array([[0.0, 0.0], [1.0, 1.0]], np.float_) @staticmethod def unit(): """ (staticmethod) Create a new unit :class:`Bbox` from (0, 0) to (1, 1). """ return Bbox(Bbox._unit_values.copy()) @staticmethod def from_bounds(x0, y0, width, height): """ (staticmethod) Create a new :class:`Bbox` from *x0*, *y0*, *width* and *height*. *width* and *height* may be negative. """ return Bbox.from_extents(x0, y0, x0 + width, y0 + height) @staticmethod def from_extents(*args): """ (staticmethod) Create a new Bbox from *left*, *bottom*, *right* and *top*. The *y*-axis increases upwards. """ points = np.array(args, dtype=np.float_).reshape(2, 2) return Bbox(points) def __repr__(self): return 'Bbox(%r)' % repr(self._points) def ignore(self, value): """ Set whether the existing bounds of the box should be ignored by subsequent calls to :meth:`update_from_data` or :meth:`update_from_data_xy`. *value*: - When True, subsequent calls to :meth:`update_from_data` will ignore the existing bounds of the :class:`Bbox`. - When False, subsequent calls to :meth:`update_from_data` will include the existing bounds of the :class:`Bbox`. """ self._ignore = value def update_from_data(self, x, y, ignore=None): """ Update the bounds of the :class:`Bbox` based on the passed in data. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. *x*: a numpy array of *x*-values *y*: a numpy array of *y*-values *ignore*: - when True, ignore the existing bounds of the :class:`Bbox`. - when False, include the existing bounds of the :class:`Bbox`. - when None, use the last value passed to :meth:`ignore`. """ warnings.warn( "update_from_data requires a memory copy -- please replace with " "update_from_data_xy") xy = np.hstack((x.reshape((len(x), 1)), y.reshape((len(y), 1)))) return self.update_from_data_xy(xy, ignore) def update_from_path(self, path, ignore=None, updatex=True, updatey=True): """ Update the bounds of the :class:`Bbox` based on the passed in data. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. *path*: a :class:`~matplotlib.path.Path` instance *ignore*: - when True, ignore the existing bounds of the :class:`Bbox`. - when False, include the existing bounds of the :class:`Bbox`. - when None, use the last value passed to :meth:`ignore`. *updatex*: when True, update the x values *updatey*: when True, update the y values """ if ignore is None: ignore = self._ignore if path.vertices.size == 0: return points, minpos, changed = update_path_extents( path, None, self._points, self._minpos, ignore) if changed: self.invalidate() if updatex: self._points[:, 0] = points[:, 0] self._minpos[0] = minpos[0] if updatey: self._points[:, 1] = points[:, 1] self._minpos[1] = minpos[1] def update_from_data_xy(self, xy, ignore=None, updatex=True, updatey=True): """ Update the bounds of the :class:`Bbox` based on the passed in data. After updating, the bounds will have positive *width* and *height*; *x0* and *y0* will be the minimal values. *xy*: a numpy array of 2D points *ignore*: - when True, ignore the existing bounds of the :class:`Bbox`. - when False, include the existing bounds of the :class:`Bbox`. - when None, use the last value passed to :meth:`ignore`. *updatex*: when True, update the x values *updatey*: when True, update the y values """ if len(xy) == 0: return path = Path(xy) self.update_from_path(path, ignore=ignore, updatex=updatex, updatey=updatey) def _set_x0(self, val): self._points[0, 0] = val self.invalidate() x0 = property(BboxBase._get_x0, _set_x0) def _set_y0(self, val): self._points[0, 1] = val self.invalidate() y0 = property(BboxBase._get_y0, _set_y0) def _set_x1(self, val): self._points[1, 0] = val self.invalidate() x1 = property(BboxBase._get_x1, _set_x1) def _set_y1(self, val): self._points[1, 1] = val self.invalidate() y1 = property(BboxBase._get_y1, _set_y1) def _set_p0(self, val): self._points[0] = val self.invalidate() p0 = property(BboxBase._get_p0, _set_p0) def _set_p1(self, val): self._points[1] = val self.invalidate() p1 = property(BboxBase._get_p1, _set_p1) def _set_intervalx(self, interval): self._points[:, 0] = interval self.invalidate() intervalx = property(BboxBase._get_intervalx, _set_intervalx) def _set_intervaly(self, interval): self._points[:, 1] = interval self.invalidate() intervaly = property(BboxBase._get_intervaly, _set_intervaly) def _set_bounds(self, bounds): l, b, w, h = bounds points = np.array([[l, b], [l + w, b + h]], np.float_) if np.any(self._points != points): self._points = points self.invalidate() bounds = property(BboxBase._get_bounds, _set_bounds) def _get_minpos(self): return self._minpos minpos = property(_get_minpos) def _get_minposx(self): return self._minpos[0] minposx = property(_get_minposx) def _get_minposy(self): return self._minpos[1] minposy = property(_get_minposy) def get_points(self): """ Get the points of the bounding box directly as a numpy array of the form: [[x0, y0], [x1, y1]]. """ self._invalid = 0 return self._points def set_points(self, points): """ Set the points of the bounding box directly from a numpy array of the form: [[x0, y0], [x1, y1]]. No error checking is performed, as this method is mainly for internal use. """ if np.any(self._points != points): self._points = points self.invalidate() def set(self, other): """ Set this bounding box from the "frozen" bounds of another :class:`Bbox`. """ if np.any(self._points != other.get_points()): self._points = other.get_points() self.invalidate() def mutated(self): 'return whether the bbox has changed since init' return self.mutatedx() or self.mutatedy() def mutatedx(self): 'return whether the x-limits have changed since init' return (self._points[0, 0] != self._points_orig[0, 0] or self._points[1, 0] != self._points_orig[1, 0]) def mutatedy(self): 'return whether the y-limits have changed since init' return (self._points[0, 1] != self._points_orig[0, 1] or self._points[1, 1] != self._points_orig[1, 1]) class TransformedBbox(BboxBase): """ A :class:`Bbox` that is automatically transformed by a given transform. When either the child bounding box or transform changes, the bounds of this bbox will update accordingly. """ def __init__(self, bbox, transform, **kwargs): """ *bbox*: a child :class:`Bbox` *transform*: a 2D :class:`Transform` """ assert bbox.is_bbox assert isinstance(transform, Transform) assert transform.input_dims == 2 assert transform.output_dims == 2 BboxBase.__init__(self, **kwargs) self._bbox = bbox self._transform = transform self.set_children(bbox, transform) self._points = None def __repr__(self): return "TransformedBbox(%r, %r)" % (self._bbox, self._transform) def get_points(self): if self._invalid: points = self._transform.transform(self._bbox.get_points()) points = np.ma.filled(points, 0.0) self._points = points self._invalid = 0 return self._points get_points.__doc__ = Bbox.get_points.__doc__ if DEBUG: _get_points = get_points def get_points(self): points = self._get_points() self._check(points) return points class Transform(TransformNode): """ The base class of all :class:`TransformNode` instances that actually perform a transformation. All non-affine transformations should be subclasses of this class. New affine transformations should be subclasses of :class:`Affine2D`. Subclasses of this class should override the following members (at minimum): - :attr:`input_dims` - :attr:`output_dims` - :meth:`transform` - :attr:`is_separable` - :attr:`has_inverse` - :meth:`inverted` (if :attr:`has_inverse` is True) If the transform needs to do something non-standard with :class:`matplotlib.path.Path` objects, such as adding curves where there were once line segments, it should override: - :meth:`transform_path` """ input_dims = None """ The number of input dimensions of this transform. Must be overridden (with integers) in the subclass. """ output_dims = None """ The number of output dimensions of this transform. Must be overridden (with integers) in the subclass. """ has_inverse = False """True if this transform has a corresponding inverse transform.""" is_separable = False """True if this transform is separable in the x- and y- dimensions.""" def __add__(self, other): """ Composes two transforms together such that *self* is followed by *other*. """ if isinstance(other, Transform): return composite_transform_factory(self, other) raise TypeError( "Can not add Transform to object of type '%s'" % type(other)) def __radd__(self, other): """ Composes two transforms together such that *self* is followed by *other*. """ if isinstance(other, Transform): return composite_transform_factory(other, self) raise TypeError( "Can not add Transform to object of type '%s'" % type(other)) def __eq__(self, other): # equality is based on transform object id. Hence: # Transform() != Transform(). # Some classes, such as TransformWrapper & AffineBase, will override. return self is other def _iter_break_from_left_to_right(self): """ Returns an iterator breaking down this transform stack from left to right recursively. If self == ((A, N), A) then the result will be an iterator which yields I : ((A, N), A), followed by A : (N, A), followed by (A, N) : (A), but not ((A, N), A) : I. This is equivalent to flattening the stack then yielding ``flat_stack[:i], flat_stack[i:]`` where i=0..(n-1). """ yield IdentityTransform(), self @property def depth(self): """ Returns the number of transforms which have been chained together to form this Transform instance. .. note:: For the special case of a Composite transform, the maximum depth of the two is returned. """ return 1 def contains_branch(self, other): """ Return whether the given transform is a sub-tree of this transform. This routine uses transform equality to identify sub-trees, therefore in many situations it is object id which will be used. For the case where the given transform represents the whole of this transform, returns True. """ if self.depth < other.depth: return False # check that a subtree is equal to other (starting from self) for _, sub_tree in self._iter_break_from_left_to_right(): if sub_tree == other: return True return False def contains_branch_seperately(self, other_transform): """ Returns whether the given branch is a sub-tree of this transform on each seperate dimension. A common use for this method is to identify if a transform is a blended transform containing an axes' data transform. e.g.:: x_isdata, y_isdata = trans.contains_branch_seperately(ax.transData) """ if self.output_dims != 2: raise ValueError('contains_branch_seperately only supports ' 'transforms with 2 output dimensions') # for a non-blended transform each seperate dimension is the same, so # just return the appropriate shape. return [self.contains_branch(other_transform)] * 2 def __sub__(self, other): """ Returns a transform stack which goes all the way down self's transform stack, and then ascends back up other's stack. If it can, this is optimised:: # normally A - B == a + b.inverted() # sometimes, when A contains the tree B there is no need to # descend all the way down to the base of A (via B), instead we # can just stop at B. (A + B) - (B)^-1 == A # similarly, when B contains tree A, we can avoid decending A at # all, basically: A - (A + B) == ((B + A) - A).inverted() or B^-1 For clarity, the result of ``(A + B) - B + B == (A + B)``. """ # we only know how to do this operation if other is a Transform. if not isinstance(other, Transform): return NotImplemented for remainder, sub_tree in self._iter_break_from_left_to_right(): if sub_tree == other: return remainder for remainder, sub_tree in other._iter_break_from_left_to_right(): if sub_tree == self: if not remainder.has_inverse: raise ValueError("The shortcut cannot be computed since " "other's transform includes a non-invertable component.") return remainder.inverted() # if we have got this far, then there was no shortcut possible if other.has_inverse: return self + other.inverted() else: raise ValueError('It is not possible to compute transA - transB ' 'since transB cannot be inverted and there is no ' 'shortcut possible.') def __array__(self, *args, **kwargs): """ Array interface to get at this Transform's affine matrix. """ return self.get_affine().get_matrix() def transform(self, values): """ Performs the transformation on the given array of values. Accepts a numpy array of shape (N x :attr:`input_dims`) and returns a numpy array of shape (N x :attr:`output_dims`). """ return self.transform_affine(self.transform_non_affine(values)) def transform_affine(self, values): """ Performs only the affine part of this transformation on the given array of values. ``transform(values)`` is always equivalent to ``transform_affine(transform_non_affine(values))``. In non-affine transformations, this is generally a no-op. In affine transformations, this is equivalent to ``transform(values)``. Accepts a numpy array of shape (N x :attr:`input_dims`) and returns a numpy array of shape (N x :attr:`output_dims`). """ return self.get_affine().transform(values) def transform_non_affine(self, values): """ Performs only the non-affine part of the transformation. ``transform(values)`` is always equivalent to ``transform_affine(transform_non_affine(values))``. In non-affine transformations, this is generally equivalent to ``transform(values)``. In affine transformations, this is always a no-op. Accepts a numpy array of shape (N x :attr:`input_dims`) and returns a numpy array of shape (N x :attr:`output_dims`). """ return values def get_affine(self): """ Get the affine part of this transform. """ return IdentityTransform() def get_matrix(self): """ Get the Affine transformation array for the affine part of this transform. """ return self.get_affine().get_matrix() def transform_point(self, point): """ A convenience function that returns the transformed copy of a single point. The point is given as a sequence of length :attr:`input_dims`. The transformed point is returned as a sequence of length :attr:`output_dims`. """ assert len(point) == self.input_dims return self.transform(np.asarray([point]))[0] def transform_path(self, path): """ Returns a transformed path. *path*: a :class:`~matplotlib.path.Path` instance. In some cases, this transform may insert curves into the path that began as line segments. """ return self.transform_path_affine(self.transform_path_non_affine(path)) def transform_path_affine(self, path): """ Returns a path, transformed only by the affine part of this transform. *path*: a :class:`~matplotlib.path.Path` instance. ``transform_path(path)`` is equivalent to ``transform_path_affine(transform_path_non_affine(values))``. """ return self.get_affine().transform_path_affine(path) def transform_path_non_affine(self, path): """ Returns a path, transformed only by the non-affine part of this transform. *path*: a :class:`~matplotlib.path.Path` instance. ``transform_path(path)`` is equivalent to ``transform_path_affine(transform_path_non_affine(values))``. """ return Path(self.transform_non_affine(path.vertices), path.codes, path._interpolation_steps) def transform_angles(self, angles, pts, radians=False, pushoff=1e-5): """ Performs transformation on a set of angles anchored at specific locations. The *angles* must be a column vector (i.e., numpy array). The *pts* must be a two-column numpy array of x,y positions (angle transforms currently only work in 2D). This array must have the same number of rows as *angles*. *radians* indicates whether or not input angles are given in radians (True) or degrees (False; the default). *pushoff* is the distance to move away from *pts* for determining transformed angles (see discussion of method below). The transformed angles are returned in an array with the same size as *angles*. The generic version of this method uses a very generic algorithm that transforms *pts*, as well as locations very close to *pts*, to find the angle in the transformed system. """ # Must be 2D if self.input_dims != 2 or self.output_dims != 2: raise NotImplementedError('Only defined in 2D') # pts must be array with 2 columns for x,y assert pts.shape[1] == 2 # angles must be a column vector and have same number of # rows as pts assert np.prod(angles.shape) == angles.shape[0] == pts.shape[0] # Convert to radians if desired if not radians: angles = angles / 180.0 * np.pi # Move a short distance away pts2 = pts + pushoff * np.c_[np.cos(angles), np.sin(angles)] # Transform both sets of points tpts = self.transform(pts) tpts2 = self.transform(pts2) # Calculate transformed angles d = tpts2 - tpts a = np.arctan2(d[:, 1], d[:, 0]) # Convert back to degrees if desired if not radians: a = a * 180.0 / np.pi return a def inverted(self): """ Return the corresponding inverse transformation. The return value of this method should be treated as temporary. An update to *self* does not cause a corresponding update to its inverted copy. ``x === self.inverted().transform(self.transform(x))`` """ raise NotImplementedError() class TransformWrapper(Transform): """ A helper class that holds a single child transform and acts equivalently to it. This is useful if a node of the transform tree must be replaced at run time with a transform of a different type. This class allows that replacement to correctly trigger invalidation. Note that :class:`TransformWrapper` instances must have the same input and output dimensions during their entire lifetime, so the child transform may only be replaced with another child transform of the same dimensions. """ pass_through = True def __init__(self, child): """ *child*: A class:`Transform` instance. This child may later be replaced with :meth:`set`. """ assert isinstance(child, Transform) Transform.__init__(self) self.input_dims = child.input_dims self.output_dims = child.output_dims self._set(child) self._invalid = 0 def __eq__(self, other): return self._child.__eq__(other) if DEBUG: def __str__(self): return str(self._child) def __getstate__(self): # only store the child return {'child': self._child} def __setstate__(self, state): # re-initialise the TransformWrapper with the state's child self.__init__(state['child']) def __repr__(self): return "TransformWrapper(%r)" % self._child def frozen(self): return self._child.frozen() frozen.__doc__ = Transform.frozen.__doc__ def _set(self, child): self._child = child self.set_children(child) self.transform = child.transform self.transform_affine = child.transform_affine self.transform_non_affine = child.transform_non_affine self.transform_path = child.transform_path self.transform_path_affine = child.transform_path_affine self.transform_path_non_affine = child.transform_path_non_affine self.get_affine = child.get_affine self.inverted = child.inverted self.get_matrix = child.get_matrix # note we do not wrap other properties here since the transform's # child can be changed with WrappedTransform.set and so checking # is_affine and other such properties may be dangerous. def set(self, child): """ Replace the current child of this transform with another one. The new child must have the same number of input and output dimensions as the current child. """ assert child.input_dims == self.input_dims assert child.output_dims == self.output_dims self._set(child) self._invalid = 0 self.invalidate() self._invalid = 0 def _get_is_affine(self): return self._child.is_affine is_affine = property(_get_is_affine) def _get_is_separable(self): return self._child.is_separable is_separable = property(_get_is_separable) def _get_has_inverse(self): return self._child.has_inverse has_inverse = property(_get_has_inverse) class AffineBase(Transform): """ The base class of all affine transformations of any number of dimensions. """ is_affine = True def __init__(self, *args, **kwargs): Transform.__init__(self, *args, **kwargs) self._inverted = None def __array__(self, *args, **kwargs): # optimises the access of the transform matrix vs the superclass return self.get_matrix() @staticmethod def _concat(a, b): """ Concatenates two transformation matrices (represented as numpy arrays) together. """ return np.dot(b, a) def __eq__(self, other): if other.is_affine: return np.all(self.get_matrix() == other.get_matrix()) return NotImplemented def transform(self, values): return self.transform_affine(values) transform.__doc__ = Transform.transform.__doc__ def transform_affine(self, values): raise NotImplementedError('Affine subclasses should override this ' 'method.') transform_affine.__doc__ = Transform.transform_affine.__doc__ def transform_non_affine(self, points): return points transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ def transform_path(self, path): return self.transform_path_affine(path) transform_path.__doc__ = Transform.transform_path.__doc__ def transform_path_affine(self, path): return Path(self.transform_affine(path.vertices), path.codes, path._interpolation_steps) transform_path_affine.__doc__ = Transform.transform_path_affine.__doc__ def transform_path_non_affine(self, path): return path transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__ def get_affine(self): return self get_affine.__doc__ = Transform.get_affine.__doc__ class Affine2DBase(AffineBase): """ The base class of all 2D affine transformations. 2D affine transformations are performed using a 3x3 numpy array:: a c e b d f 0 0 1 This class provides the read-only interface. For a mutable 2D affine transformation, use :class:`Affine2D`. Subclasses of this class will generally only need to override a constructor and :meth:`get_matrix` that generates a custom 3x3 matrix. """ has_inverse = True input_dims = 2 output_dims = 2 def frozen(self): return Affine2D(self.get_matrix().copy()) frozen.__doc__ = AffineBase.frozen.__doc__ def _get_is_separable(self): mtx = self.get_matrix() return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0 is_separable = property(_get_is_separable) def to_values(self): """ Return the values of the matrix as a sequence (a,b,c,d,e,f) """ mtx = self.get_matrix() return tuple(mtx[:2].swapaxes(0, 1).flatten()) @staticmethod def matrix_from_values(a, b, c, d, e, f): """ (staticmethod) Create a new transformation matrix as a 3x3 numpy array of the form:: a c e b d f 0 0 1 """ return np.array([[a, c, e], [b, d, f], [0.0, 0.0, 1.0]], np.float_) def transform_affine(self, points): mtx = self.get_matrix() if isinstance(points, MaskedArray): tpoints = affine_transform(points.data, mtx) return ma.MaskedArray(tpoints, mask=ma.getmask(points)) return affine_transform(points, mtx) def transform_point(self, point): mtx = self.get_matrix() return affine_transform(point, mtx) transform_point.__doc__ = AffineBase.transform_point.__doc__ if DEBUG: _transform_affine = transform_affine def transform_affine(self, points): # The major speed trap here is just converting to the # points to an array in the first place. If we can use # more arrays upstream, that should help here. if (not ma.isMaskedArray(points) and not isinstance(points, np.ndarray)): warnings.warn( ('A non-numpy array of type %s was passed in for ' + 'transformation. Please correct this.') % type(points)) return self._transform_affine(points) transform_affine.__doc__ = AffineBase.transform_affine.__doc__ def inverted(self): if self._inverted is None or self._invalid: mtx = self.get_matrix() shorthand_name = None if self._shorthand_name: shorthand_name = '(%s)-1' % self._shorthand_name self._inverted = Affine2D(inv(mtx), shorthand_name=shorthand_name) self._invalid = 0 return self._inverted inverted.__doc__ = AffineBase.inverted.__doc__ class Affine2D(Affine2DBase): """ A mutable 2D affine transformation. """ def __init__(self, matrix=None, **kwargs): """ Initialize an Affine transform from a 3x3 numpy float array:: a c e b d f 0 0 1 If *matrix* is None, initialize with the identity transform. """ Affine2DBase.__init__(self, **kwargs) if matrix is None: matrix = np.identity(3) elif DEBUG: matrix = np.asarray(matrix, np.float_) assert matrix.shape == (3, 3) self._mtx = matrix self._invalid = 0 def __repr__(self): return "Affine2D(%s)" % repr(self._mtx) # def __cmp__(self, other): # # XXX redundant. this only tells us eq. # if (isinstance(other, Affine2D) and # (self.get_matrix() == other.get_matrix()).all()): # return 0 # return -1 @staticmethod def from_values(a, b, c, d, e, f): """ (staticmethod) Create a new Affine2D instance from the given values:: a c e b d f 0 0 1 . """ return Affine2D( np.array([a, c, e, b, d, f, 0.0, 0.0, 1.0], np.float_) .reshape((3, 3))) def get_matrix(self): """ Get the underlying transformation matrix as a 3x3 numpy array:: a c e b d f 0 0 1 . """ self._invalid = 0 return self._mtx def set_matrix(self, mtx): """ Set the underlying transformation matrix from a 3x3 numpy array:: a c e b d f 0 0 1 . """ self._mtx = mtx self.invalidate() def set(self, other): """ Set this transformation from the frozen copy of another :class:`Affine2DBase` object. """ assert isinstance(other, Affine2DBase) self._mtx = other.get_matrix() self.invalidate() @staticmethod def identity(): """ (staticmethod) Return a new :class:`Affine2D` object that is the identity transform. Unless this transform will be mutated later on, consider using the faster :class:`IdentityTransform` class instead. """ return Affine2D(np.identity(3)) def clear(self): """ Reset the underlying matrix to the identity transform. """ self._mtx = np.identity(3) self.invalidate() return self def rotate(self, theta): """ Add a rotation (in radians) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ a = np.cos(theta) b = np.sin(theta) rotate_mtx = np.array( [[a, -b, 0.0], [b, a, 0.0], [0.0, 0.0, 1.0]], np.float_) self._mtx = np.dot(rotate_mtx, self._mtx) self.invalidate() return self def rotate_deg(self, degrees): """ Add a rotation (in degrees) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ return self.rotate(degrees * np.pi / 180.) def rotate_around(self, x, y, theta): """ Add a rotation (in radians) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ return self.translate(-x, -y).rotate(theta).translate(x, y) def rotate_deg_around(self, x, y, degrees): """ Add a rotation (in degrees) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ return self.translate(-x, -y).rotate_deg(degrees).translate(x, y) def translate(self, tx, ty): """ Adds a translation in place. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ translate_mtx = np.array( [[1.0, 0.0, tx], [0.0, 1.0, ty], [0.0, 0.0, 1.0]], np.float_) self._mtx = np.dot(translate_mtx, self._mtx) self.invalidate() return self def scale(self, sx, sy=None): """ Adds a scale in place. If *sy* is None, the same scale is applied in both the *x*- and *y*-directions. Returns *self*, so this method can easily be chained with more calls to :meth:`rotate`, :meth:`rotate_deg`, :meth:`translate` and :meth:`scale`. """ if sy is None: sy = sx scale_mtx = np.array( [[sx, 0.0, 0.0], [0.0, sy, 0.0], [0.0, 0.0, 1.0]], np.float_) self._mtx = np.dot(scale_mtx, self._mtx) self.invalidate() return self def _get_is_separable(self): mtx = self.get_matrix() return mtx[0, 1] == 0.0 and mtx[1, 0] == 0.0 is_separable = property(_get_is_separable) class IdentityTransform(Affine2DBase): """ A special class that does on thing, the identity transform, in a fast way. """ _mtx = np.identity(3) def frozen(self): return self frozen.__doc__ = Affine2DBase.frozen.__doc__ def __repr__(self): return "IdentityTransform()" def get_matrix(self): return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ def transform(self, points): return points transform.__doc__ = Affine2DBase.transform.__doc__ transform_affine = transform transform_affine.__doc__ = Affine2DBase.transform_affine.__doc__ transform_non_affine = transform transform_non_affine.__doc__ = Affine2DBase.transform_non_affine.__doc__ def transform_path(self, path): return path transform_path.__doc__ = Affine2DBase.transform_path.__doc__ transform_path_affine = transform_path transform_path_affine.__doc__ = Affine2DBase.transform_path_affine.__doc__ transform_path_non_affine = transform_path transform_path_non_affine.__doc__ = Affine2DBase.transform_path_non_affine.__doc__ def get_affine(self): return self get_affine.__doc__ = Affine2DBase.get_affine.__doc__ inverted = get_affine inverted.__doc__ = Affine2DBase.inverted.__doc__ class BlendedGenericTransform(Transform): """ A "blended" transform uses one transform for the *x*-direction, and another transform for the *y*-direction. This "generic" version can handle any given child transform in the *x*- and *y*-directions. """ input_dims = 2 output_dims = 2 is_separable = True pass_through = True def __init__(self, x_transform, y_transform, **kwargs): """ Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. You will generally not call this constructor directly but use the :func:`blended_transform_factory` function instead, which can determine automatically which kind of blended transform to create. """ # Here we ask: "Does it blend?" Transform.__init__(self, **kwargs) self._x = x_transform self._y = y_transform self.set_children(x_transform, y_transform) self._affine = None def __eq__(self, other): # Note, this is an exact copy of BlendedAffine2D.__eq__ if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)): return (self._x == other._x) and (self._y == other._y) elif self._x == self._y: return self._x == other else: return NotImplemented def contains_branch_seperately(self, transform): # Note, this is an exact copy of BlendedAffine2D.contains_branch_seperately return self._x.contains_branch(transform), self._y.contains_branch(transform) @property def depth(self): return max([self._x.depth, self._y.depth]) def contains_branch(self, other): # a blended transform cannot possibly contain a branch from two different transforms. return False def _get_is_affine(self): return self._x.is_affine and self._y.is_affine is_affine = property(_get_is_affine) def _get_has_inverse(self): return self._x.has_inverse and self._y.has_inverse has_inverse = property(_get_has_inverse) def frozen(self): return blended_transform_factory(self._x.frozen(), self._y.frozen()) frozen.__doc__ = Transform.frozen.__doc__ def __repr__(self): return "BlendedGenericTransform(%s,%s)" % (self._x, self._y) def transform_non_affine(self, points): if self._x.is_affine and self._y.is_affine: return points x = self._x y = self._y if x == y and x.input_dims == 2: return x.transform_non_affine(points) if x.input_dims == 2: x_points = x.transform_non_affine(points)[:, 0:1] else: x_points = x.transform_non_affine(points[:, 0]) x_points = x_points.reshape((len(x_points), 1)) if y.input_dims == 2: y_points = y.transform_non_affine(points)[:, 1:] else: y_points = y.transform_non_affine(points[:, 1]) y_points = y_points.reshape((len(y_points), 1)) if isinstance(x_points, MaskedArray) or isinstance(y_points, MaskedArray): return ma.concatenate((x_points, y_points), 1) else: return np.concatenate((x_points, y_points), 1) transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ def inverted(self): return BlendedGenericTransform(self._x.inverted(), self._y.inverted()) inverted.__doc__ = Transform.inverted.__doc__ def get_affine(self): if self._invalid or self._affine is None: if self._x == self._y: self._affine = self._x.get_affine() else: x_mtx = self._x.get_affine().get_matrix() y_mtx = self._y.get_affine().get_matrix() # This works because we already know the transforms are # separable, though normally one would want to set b and # c to zero. mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0])) self._affine = Affine2D(mtx) self._invalid = 0 return self._affine get_affine.__doc__ = Transform.get_affine.__doc__ class BlendedAffine2D(Affine2DBase): """ A "blended" transform uses one transform for the *x*-direction, and another transform for the *y*-direction. This version is an optimization for the case where both child transforms are of type :class:`Affine2DBase`. """ is_separable = True def __init__(self, x_transform, y_transform, **kwargs): """ Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. Both *x_transform* and *y_transform* must be 2D affine transforms. You will generally not call this constructor directly but use the :func:`blended_transform_factory` function instead, which can determine automatically which kind of blended transform to create. """ assert x_transform.is_affine assert y_transform.is_affine assert x_transform.is_separable assert y_transform.is_separable Transform.__init__(self, **kwargs) self._x = x_transform self._y = y_transform self.set_children(x_transform, y_transform) Affine2DBase.__init__(self) self._mtx = None def __eq__(self, other): # Note, this is an exact copy of BlendedGenericTransform.__eq__ if isinstance(other, (BlendedAffine2D, BlendedGenericTransform)): return (self._x == other._x) and (self._y == other._y) elif self._x == self._y: return self._x == other else: return NotImplemented def contains_branch_seperately(self, transform): # Note, this is an exact copy of BlendedTransform.contains_branch_seperately return self._x.contains_branch(transform), self._y.contains_branch(transform) def __repr__(self): return "BlendedAffine2D(%s,%s)" % (self._x, self._y) def get_matrix(self): if self._invalid: if self._x == self._y: self._mtx = self._x.get_matrix() else: x_mtx = self._x.get_matrix() y_mtx = self._y.get_matrix() # This works because we already know the transforms are # separable, though normally one would want to set b and # c to zero. self._mtx = np.vstack((x_mtx[0], y_mtx[1], [0.0, 0.0, 1.0])) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ def blended_transform_factory(x_transform, y_transform): """ Create a new "blended" transform using *x_transform* to transform the *x*-axis and *y_transform* to transform the *y*-axis. A faster version of the blended transform is returned for the case where both child transforms are affine. """ if (isinstance(x_transform, Affine2DBase) and isinstance(y_transform, Affine2DBase)): return BlendedAffine2D(x_transform, y_transform) return BlendedGenericTransform(x_transform, y_transform) class CompositeGenericTransform(Transform): """ A composite transform formed by applying transform *a* then transform *b*. This "generic" version can handle any two arbitrary transformations. """ pass_through = True def __init__(self, a, b, **kwargs): """ Create a new composite transform that is the result of applying transform *a* then transform *b*. You will generally not call this constructor directly but use the :func:`composite_transform_factory` function instead, which can automatically choose the best kind of composite transform instance to create. """ assert a.output_dims == b.input_dims self.input_dims = a.input_dims self.output_dims = b.output_dims Transform.__init__(self, **kwargs) self._a = a self._b = b self.set_children(a, b) is_affine = property(lambda self: self._a.is_affine and self._b.is_affine) def frozen(self): self._invalid = 0 frozen = composite_transform_factory(self._a.frozen(), self._b.frozen()) if not isinstance(frozen, CompositeGenericTransform): return frozen.frozen() return frozen frozen.__doc__ = Transform.frozen.__doc__ def _invalidate_internal(self, value, invalidating_node): # In some cases for a composite transform, an invalidating call to AFFINE_ONLY needs # to be extended to invalidate the NON_AFFINE part too. These cases are when the right # hand transform is non-affine and either: # (a) the left hand transform is non affine # (b) it is the left hand node which has triggered the invalidation if value == Transform.INVALID_AFFINE \ and not self._b.is_affine \ and (not self._a.is_affine or invalidating_node is self._a): value = Transform.INVALID Transform._invalidate_internal(self, value=value, invalidating_node=invalidating_node) def __eq__(self, other): if isinstance(other, (CompositeGenericTransform, CompositeAffine2D)): return self is other or (self._a == other._a and self._b == other._b) else: return False def _iter_break_from_left_to_right(self): for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right(): yield lh_compliment, rh_compliment + self._b for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right(): yield self._a + lh_compliment, rh_compliment @property def depth(self): return self._a.depth + self._b.depth def _get_is_affine(self): return self._a.is_affine and self._b.is_affine is_affine = property(_get_is_affine) def _get_is_separable(self): return self._a.is_separable and self._b.is_separable is_separable = property(_get_is_separable) if DEBUG: def __str__(self): return '(%s, %s)' % (self._a, self._b) def __repr__(self): return "CompositeGenericTransform(%r, %r)" % (self._a, self._b) def transform_affine(self, points): return self.get_affine().transform(points) transform_affine.__doc__ = Transform.transform_affine.__doc__ def transform_non_affine(self, points): if self._a.is_affine and self._b.is_affine: return points elif not self._a.is_affine and self._b.is_affine: return self._a.transform_non_affine(points) else: return self._b.transform_non_affine( self._a.transform(points)) transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__ def transform_path_non_affine(self, path): if self._a.is_affine and self._b.is_affine: return path elif not self._a.is_affine and self._b.is_affine: return self._a.transform_path_non_affine(path) else: return self._b.transform_path_non_affine( self._a.transform_path(path)) transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__ def get_affine(self): if not self._b.is_affine: return self._b.get_affine() else: return Affine2D(np.dot(self._b.get_affine().get_matrix(), self._a.get_affine().get_matrix())) get_affine.__doc__ = Transform.get_affine.__doc__ def inverted(self): return CompositeGenericTransform(self._b.inverted(), self._a.inverted()) inverted.__doc__ = Transform.inverted.__doc__ def _get_has_inverse(self): return self._a.has_inverse and self._b.has_inverse has_inverse = property(_get_has_inverse) class CompositeAffine2D(Affine2DBase): """ A composite transform formed by applying transform *a* then transform *b*. This version is an optimization that handles the case where both *a* and *b* are 2D affines. """ def __init__(self, a, b, **kwargs): """ Create a new composite transform that is the result of applying transform *a* then transform *b*. Both *a* and *b* must be instances of :class:`Affine2DBase`. You will generally not call this constructor directly but use the :func:`composite_transform_factory` function instead, which can automatically choose the best kind of composite transform instance to create. """ assert a.output_dims == b.input_dims self.input_dims = a.input_dims self.output_dims = b.output_dims assert a.is_affine assert b.is_affine Affine2DBase.__init__(self, **kwargs) self._a = a self._b = b self.set_children(a, b) self._mtx = None if DEBUG: def __str__(self): return '(%s, %s)' % (self._a, self._b) @property def depth(self): return self._a.depth + self._b.depth def _iter_break_from_left_to_right(self): for lh_compliment, rh_compliment in self._a._iter_break_from_left_to_right(): yield lh_compliment, rh_compliment + self._b for lh_compliment, rh_compliment in self._b._iter_break_from_left_to_right(): yield self._a + lh_compliment, rh_compliment def __repr__(self): return "CompositeAffine2D(%r, %r)" % (self._a, self._b) def get_matrix(self): if self._invalid: self._mtx = np.dot( self._b.get_matrix(), self._a.get_matrix()) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ def composite_transform_factory(a, b): """ Create a new composite transform that is the result of applying transform a then transform b. Shortcut versions of the blended transform are provided for the case where both child transforms are affine, or one or the other is the identity transform. Composite transforms may also be created using the '+' operator, e.g.:: c = a + b """ # check to see if any of a or b are IdentityTransforms. We use # isinstance here to guarantee that the transforms will *always* # be IdentityTransforms. Since TransformWrappers are mutable, # use of equality here would be wrong. if isinstance(a, IdentityTransform): return b elif isinstance(b, IdentityTransform): return a elif isinstance(a, Affine2D) and isinstance(b, Affine2D): return CompositeAffine2D(a, b) return CompositeGenericTransform(a, b) class BboxTransform(Affine2DBase): """ :class:`BboxTransform` linearly transforms points from one :class:`Bbox` to another :class:`Bbox`. """ is_separable = True def __init__(self, boxin, boxout, **kwargs): """ Create a new :class:`BboxTransform` that linearly transforms points from *boxin* to *boxout*. """ assert boxin.is_bbox assert boxout.is_bbox Affine2DBase.__init__(self, **kwargs) self._boxin = boxin self._boxout = boxout self.set_children(boxin, boxout) self._mtx = None self._inverted = None def __repr__(self): return "BboxTransform(%r, %r)" % (self._boxin, self._boxout) def get_matrix(self): if self._invalid: inl, inb, inw, inh = self._boxin.bounds outl, outb, outw, outh = self._boxout.bounds x_scale = outw / inw y_scale = outh / inh if DEBUG and (x_scale == 0 or y_scale == 0): raise ValueError("Transforming from or to a singular bounding box.") self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale+outl)], [0.0 , y_scale, (-inb*y_scale+outb)], [0.0 , 0.0 , 1.0 ]], np.float_) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class BboxTransformTo(Affine2DBase): """ :class:`BboxTransformTo` is a transformation that linearly transforms points from the unit bounding box to a given :class:`Bbox`. """ is_separable = True def __init__(self, boxout, **kwargs): """ Create a new :class:`BboxTransformTo` that linearly transforms points from the unit bounding box to *boxout*. """ assert boxout.is_bbox Affine2DBase.__init__(self, **kwargs) self._boxout = boxout self.set_children(boxout) self._mtx = None self._inverted = None def __repr__(self): return "BboxTransformTo(%r)" % (self._boxout) def get_matrix(self): if self._invalid: outl, outb, outw, outh = self._boxout.bounds if DEBUG and (outw == 0 or outh == 0): raise ValueError("Transforming to a singular bounding box.") self._mtx = np.array([[outw, 0.0, outl], [ 0.0, outh, outb], [ 0.0, 0.0, 1.0]], np.float_) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class BboxTransformToMaxOnly(BboxTransformTo): """ :class:`BboxTransformTo` is a transformation that linearly transforms points from the unit bounding box to a given :class:`Bbox` with a fixed upper left of (0, 0). """ def __repr__(self): return "BboxTransformToMaxOnly(%r)" % (self._boxout) def get_matrix(self): if self._invalid: xmax, ymax = self._boxout.max if DEBUG and (xmax == 0 or ymax == 0): raise ValueError("Transforming to a singular bounding box.") self._mtx = np.array([[xmax, 0.0, 0.0], [ 0.0, ymax, 0.0], [ 0.0, 0.0, 1.0]], np.float_) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class BboxTransformFrom(Affine2DBase): """ :class:`BboxTransformFrom` linearly transforms points from a given :class:`Bbox` to the unit bounding box. """ is_separable = True def __init__(self, boxin, **kwargs): assert boxin.is_bbox Affine2DBase.__init__(self, **kwargs) self._boxin = boxin self.set_children(boxin) self._mtx = None self._inverted = None def __repr__(self): return "BboxTransformFrom(%r)" % (self._boxin) def get_matrix(self): if self._invalid: inl, inb, inw, inh = self._boxin.bounds if DEBUG and (inw == 0 or inh == 0): raise ValueError("Transforming from a singular bounding box.") x_scale = 1.0 / inw y_scale = 1.0 / inh self._mtx = np.array([[x_scale, 0.0 , (-inl*x_scale)], [0.0 , y_scale, (-inb*y_scale)], [0.0 , 0.0 , 1.0 ]], np.float_) self._inverted = None self._invalid = 0 return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class ScaledTranslation(Affine2DBase): """ A transformation that translates by *xt* and *yt*, after *xt* and *yt* have been transformad by the given transform *scale_trans*. """ def __init__(self, xt, yt, scale_trans, **kwargs): Affine2DBase.__init__(self, **kwargs) self._t = (xt, yt) self._scale_trans = scale_trans self.set_children(scale_trans) self._mtx = None self._inverted = None def __repr__(self): return "ScaledTranslation(%r)" % (self._t,) def get_matrix(self): if self._invalid: xt, yt = self._scale_trans.transform_point(self._t) self._mtx = np.array([[1.0, 0.0, xt], [0.0, 1.0, yt], [0.0, 0.0, 1.0]], np.float_) self._invalid = 0 self._inverted = None return self._mtx get_matrix.__doc__ = Affine2DBase.get_matrix.__doc__ class TransformedPath(TransformNode): """ A :class:`TransformedPath` caches a non-affine transformed copy of the :class:`~matplotlib.path.Path`. This cached copy is automatically updated when the non-affine part of the transform changes. .. note:: Paths are considered immutable by this class. Any update to the path's vertices/codes will not trigger a transform recomputation. """ def __init__(self, path, transform): """ Create a new :class:`TransformedPath` from the given :class:`~matplotlib.path.Path` and :class:`Transform`. """ assert isinstance(transform, Transform) TransformNode.__init__(self) self._path = path self._transform = transform self.set_children(transform) self._transformed_path = None self._transformed_points = None def _revalidate(self): # only recompute if the invalidation includes the non_affine part of the transform if ((self._invalid & self.INVALID_NON_AFFINE == self.INVALID_NON_AFFINE) or self._transformed_path is None): self._transformed_path = \ self._transform.transform_path_non_affine(self._path) self._transformed_points = \ Path(self._transform.transform_non_affine(self._path.vertices), None, self._path._interpolation_steps) self._invalid = 0 def get_transformed_points_and_affine(self): """ Return a copy of the child path, with the non-affine part of the transform already applied, along with the affine part of the path necessary to complete the transformation. Unlike :meth:`get_transformed_path_and_affine`, no interpolation will be performed. """ self._revalidate() return self._transformed_points, self.get_affine() def get_transformed_path_and_affine(self): """ Return a copy of the child path, with the non-affine part of the transform already applied, along with the affine part of the path necessary to complete the transformation. """ self._revalidate() return self._transformed_path, self.get_affine() def get_fully_transformed_path(self): """ Return a fully-transformed copy of the child path. """ self._revalidate() return self._transform.transform_path_affine(self._transformed_path) def get_affine(self): return self._transform.get_affine() def nonsingular(vmin, vmax, expander=0.001, tiny=1e-15, increasing=True): ''' Modify the endpoints of a range as needed to avoid singularities. *vmin*, *vmax* the initial endpoints. *tiny* threshold for the ratio of the interval to the maximum absolute value of its endpoints. If the interval is smaller than this, it will be expanded. This value should be around 1e-15 or larger; otherwise the interval will be approaching the double precision resolution limit. *expander* fractional amount by which *vmin* and *vmax* are expanded if the original interval is too small, based on *tiny*. *increasing*: [True | False] If True (default), swap *vmin*, *vmax* if *vmin* > *vmax* Returns *vmin*, *vmax*, expanded and/or swapped if necessary. If either input is inf or NaN, or if both inputs are 0, returns -*expander*, *expander*. ''' if (not np.isfinite(vmin)) or (not np.isfinite(vmax)): return -expander, expander swapped = False if vmax < vmin: vmin, vmax = vmax, vmin swapped = True if vmax - vmin <= max(abs(vmin), abs(vmax)) * tiny: if vmax == 0 and vmin == 0: vmin = -expander vmax = expander else: vmin -= expander*abs(vmin) vmax += expander*abs(vmax) if swapped and not increasing: vmin, vmax = vmax, vmin return vmin, vmax def interval_contains(interval, val): a, b = interval return ( ((a < b) and (a <= val and b >= val)) or (b <= val and a >= val)) def interval_contains_open(interval, val): a, b = interval return ( ((a < b) and (a < val and b > val)) or (b < val and a > val)) def offset_copy(trans, fig=None, x=0.0, y=0.0, units='inches'): ''' Return a new transform with an added offset. args: trans is any transform kwargs: fig is the current figure; it can be None if units are 'dots' x, y give the offset units is 'inches', 'points' or 'dots' ''' if units == 'dots': return trans + Affine2D().translate(x, y) if fig is None: raise ValueError('For units of inches or points a fig kwarg is needed') if units == 'points': x /= 72.0 y /= 72.0 elif not units == 'inches': raise ValueError('units must be dots, points, or inches') return trans + ScaledTranslation(x, y, fig.dpi_scale_trans)
mit
jblackburne/scikit-learn
doc/sphinxext/sphinx_gallery/notebook.py
9
3565
# -*- coding: utf-8 -*- r""" ============================ Parser for Jupyter notebooks ============================ Class that holds the Ipython notebook information """ # Author: Óscar Nájera # License: 3-clause BSD from __future__ import division, absolute_import, print_function import json import os import re import sys def ipy_notebook_skeleton(): """Returns a dictionary with the elements of a Jupyter notebook""" py_version = sys.version_info notebook_skeleton = { "cells": [], "metadata": { "kernelspec": { "display_name": "Python " + str(py_version[0]), "language": "python", "name": "python" + str(py_version[0]) }, "language_info": { "codemirror_mode": { "name": "ipython", "version": py_version[0] }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython" + str(py_version[0]), "version": '{0}.{1}.{2}'.format(*sys.version_info[:3]) } }, "nbformat": 4, "nbformat_minor": 0 } return notebook_skeleton def rst2md(text): """Converts the RST text from the examples docstrigs and comments into markdown text for the IPython notebooks""" top_heading = re.compile(r'^=+$\s^([\w\s-]+)^=+$', flags=re.M) text = re.sub(top_heading, r'# \1', text) math_eq = re.compile(r'^\.\. math::((?:.+)?(?:\n+^ .+)*)', flags=re.M) text = re.sub(math_eq, lambda match: r'$${0}$$'.format(match.group(1).strip()), text) inline_math = re.compile(r':math:`(.+)`') text = re.sub(inline_math, r'$\1$', text) return text class Notebook(object): """Ipython notebook object Constructs the file cell-by-cell and writes it at the end""" def __init__(self, file_name, target_dir): """Declare the skeleton of the notebook Parameters ---------- file_name : str original script file name, .py extension will be renamed target_dir: str directory where notebook file is to be saved """ self.file_name = file_name.replace('.py', '.ipynb') self.write_file = os.path.join(target_dir, self.file_name) self.work_notebook = ipy_notebook_skeleton() self.add_code_cell("%matplotlib inline") def add_code_cell(self, code): """Add a code cell to the notebook Parameters ---------- code : str Cell content """ code_cell = { "cell_type": "code", "execution_count": None, "metadata": {"collapsed": False}, "outputs": [], "source": [code.strip()] } self.work_notebook["cells"].append(code_cell) def add_markdown_cell(self, text): """Add a markdown cell to the notebook Parameters ---------- code : str Cell content """ markdown_cell = { "cell_type": "markdown", "metadata": {}, "source": [rst2md(text)] } self.work_notebook["cells"].append(markdown_cell) def save_file(self): """Saves the notebook to a file""" with open(self.write_file, 'w') as out_nb: json.dump(self.work_notebook, out_nb, indent=2)
bsd-3-clause
sonalranjit/SECS
SECS_trace.py
2
1609
__author__ = 'sonal' import numpy as np from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt from matplotlib import cm from mpl_toolkits.axes_grid1 import make_axes_locatable import os from math import * def polar_plot(grid, title): #z = grid[:,8] u = grid[:,8] v = grid[:,9] plt.figure(figsize=(18,18)) ax = plt.gca() #m = Basemap(projection='npaeqd',boundinglat=20,lon_0=-100.,resolution='l') m = Basemap(width=8000000, height=8000000, resolution='l', projection='lcc',\ lat_0=60,lon_0=-100.) m.drawcoastlines() m.drawparallels(np.arange(-80.,81,20.),labels=[1,0,0,0],fontsize=10) m.drawmeridians(np.arange(-180.,181.,20.),labels=[0,0,0,1],fontsize=10) x,y =m(grid[:,7],grid[:,6]) sc = m.scatter(x,y,s=abs(u),c=u,marker=',',cmap=cm.jet,alpha=0.9,edgecolors='none') plt.title(title) divider = make_axes_locatable(ax) cax = divider.append_axes("right", size="5%", pad=0.05) cb1 = plt.colorbar(sc,cax=cax) cb1.set_label("mA/m",fontsize=18) plt.savefig('GOCE_asc_EICSu_krigged_201104.png',bbox_inches='tight',pad_inches=0.2) #plt.show() def asc_desc(data): asc = [] desc = [] lat = data[:,6] for i in range(0,len(data)-1): if lat[i+1] >= lat[i]: asc.append(i) else: desc.append(i) return asc, desc SECS_data = np.loadtxt('EICS_201103_krigged.txt') asc_idx, desc_idx= asc_desc(SECS_data) asc_track = SECS_data[asc_idx,:] desc_track = SECS_data[desc_idx,:] polar_plot(asc_track,'GOCE Ascending EICS u component Krigged April, 2011')
gpl-2.0
rjonnal/zernike
__init__.py
1
20006
"""This module contains functions for Zernike calculations. Mainly the private function _zgen, a generator function for Zernike polynomials. The public functions make use of _zgen to create height or slope maps in a unit pupil, corresponding to individual Zernike terms. Author: Ravi S. Jonnal / Werner Lab, UC Davis Revision: 2.0 / 28 June 2014 """ import numpy as np from matplotlib import pyplot as plt import sys from time import sleep import os USE_CACHE_FILE = False def fact(num): """Implementation of factorial function. """ # Check that the number is an integer. assert(num%1==0) # Check that $num\geq 0$. assert(num>=0) # Compute $num!$ recursively. if num==0 or num==1: return 1 else: return num * fact(num-1) def choose(a,b): """Binomial coefficient, implemented using this module's factorial function. See [here](http://www.encyclopediaofmath.org/index.php/Newton_binomial) for detail. """ assert(a>=b) return fact(a)/(fact(b)*fact(a-b)) def splitEquation(eqStr,width,bookend): if len(eqStr)<=width or len(eqStr)==0: return eqStr else: spaceIndices = [] idx = 0 while idx>-1: idx = eqStr.find(' ',idx+1) spaceIndices.append(idx) spaceIndices = spaceIndices[:-1] idxList = [x for x in spaceIndices if x<width] if len(idxList)==0: return eqStr else: idx = idxList[-1] head = eqStr[:idx] innards = ' ' + bookend + '\n' + bookend tail = splitEquation(eqStr[idx:],width,bookend) test =head + innards + tail return test class Zernike: def __init__(self): if USE_CACHE_FILE: cachedir = './cache/' self._cachefn = os.path.join(cachedir,'zernike_cache.txt') if not os.path.exists(cachedir): os.makedirs(cachedir) try: self._termMatrix = np.loadtxt(self._cachefn).astype(np.int32) except Exception as e: print 'No term cache file. Creating.' self._termMatrix = np.array([]) np.savetxt(self._cachefn,self._termMatrix) # Make a dictionary of precomputed coefficients, using the cache file. # This dictionary will be used to look up values when they exist in # the dictionary, and will recompute them otherwise. self._termDict = {} if USE_CACHE_FILE: for row in self._termMatrix: n,m,kindIndex,s,j,k = row[:6] t1,t2,t3,c,tXexp,tYexp = row[6:] self._termDict[(n,m,kindIndex,s,j,k)] = (t1,t2,t3,c,tXexp,tYexp) # The functions in this class can be asked for phase height, # or partial x or partial y derivatives. 'Kind' refers to # which of these is requested. Numerical encodings for 'kind' # permit some arithmetical simplicity and generality # (utilizing a number associated with the kind in a single # equation, rather than having different sets of equations # for each kind case). self._kindDictionary = {} self._kindDictionary['h'] = 0 self._kindDictionary['dx'] = 1 self._kindDictionary['dy'] = 2 def j2nm(self,j): n = np.ceil((-3+np.sqrt(9+8*j))/2) m = 2*j-n*(n+2) return np.int(n),np.int(m) def nm2j(self,n,m): return np.int(n*(n+1)/2.0+(n+m)/2.0) def _zeqn(self,n,m,kind='h',forceRecompute=False): """Return parameters sufficient for specifying a Zernike term of desired order and azimuthal frequency. Given an order (or degree) n and azimuthal frequency f, and x- and y- rectangular (Cartesian) coordinates, produce parameters necessary for constructing the appropriate Zernike representation. An individual polynomial has the format: $$ Z_n^m = \sqrt{c} \Sigma^j\Sigma^k [a_{jk}X^jY^k] $$ This function returns a tuple ($c$,cdict). $c$ is the square of the normalizing coefficient $\sqrt{c}$, and cdict contains key-value pairs (($j$,$k$),$a$), mapping the $X$ and $Y$ exponents ($j$ and $k$, respectively) onto polynomial term coefficients ($a$). The resulting structure can be used to compute the wavefront height or slope for arbitrary pupil coordinates, or to generate string representations of the polynomials. Zernike terms are only defined when n and m have the same parity (both odd or both even). Please see Schwiegerling lecture notes in /doc/supporting_docs/ for eqn. references. Args: n (int): The Zernike order or degree. m (int): The azimuthal frequency. kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope) or partial y derivative, respectively. Returns: params (tuple): (c,cdict), with c being the normalizing coefficient c and cdict being the map of exponent pairs onto inner coefficients. """ absm = np.abs(m) kindIndex = self._kindDictionary[kind.lower()] if USE_CACHE_FILE: # open cache file in append mode: self._cacheHandle = file(self._cachefn,'a') # check that n and m are both even or both odd if (float(n-absm))%2.0: errString = 'zernike._zgen error: ' + \ 'parity of n and m are different; n = %d, m = %d'%(n,m) sys.exit(errString) # check that n is non-negative: if n<0: errString = 'zernike._zgen error: ' + \ 'n must be non-negative; n = %d'%n sys.exit(errString) # $|m|$ must be less than or equal to $n$. if abs(m)>n: errString = 'zernike._zgen error: ' + \ '|m| must be less than or equal to n, but n=%d and m=%d.'%(n,m) sys.exit(errString) # These are the squares of the outer coefficients. It's useful # to keep them this way for _convertToString, since we'd # prefer to print the $\sqrt{}$ rather than a truncated irrational # number. if m==0: outerCoef = n+1 else: outerCoef = 2*(n+1) srange = range((n-absm)/2+1) cdict = {} for s in srange: jrange = range(((n-absm)/2)-s+1) for j in jrange: # Subtract 1 from absm to determine range, # only when m<0. if m<0: krange = range((absm-1)/2+1) else: krange = range(absm/2+1) for k in krange: # If m==0, k must also be 0; # see eqn. 13c, 19c, and 20c, each of which # only sum over s and j, not k. if m==0: assert(k==0) # For m==0 cases, n/2 is used in coef denominator. Make # sure that n is even, or else n/2 is not well-defined # because n is an integer. if m==0: assert n%2==0 # Check to see if calculations are cached. # If so, use cached values; if not, recalculate. cached = self._termDict.has_key((n,m,kindIndex,s,j,k)) if cached and not forceRecompute: t1,t2,t3,c,tXexp,tYexp = self._termDict[(n,m,kindIndex,s,j,k)] else: # The coefficient for each term in this # polynomial has the format: $$\frac{t1n}{t1d1 # t1d2 t1d3} t2 t3$$. These six terms are # computed here. t1n = ((-1)**(s+k))*fact(n-s) t1d1 = fact(s) t1d2 = fact((n + absm)/2-s) t1d3 = fact((n - absm)/2-s) t1 = t1n/(t1d1*t1d2*t1d3) t2 = choose((n - absm)/2 - s, j) t3 = choose(absm, 2*k + (m<0)) if kind.lower()=='h': # The (implied) coefficient of the $X^a Y^b$ # term at the end of eqns. 13a-c. c = 1 tXexp = n - 2*(s+j+k) - (m<0) tYexp = 2*(j+k) + (m<0) elif kind.lower()=='dx': # The coefficient of the $X^a Y^b$ term at # the end of eqns. 19a-c. c = (n - 2*(s+j+k) - (m<0)) # Could cacluate explicitly: # $tXexp = X^{(n - 2*(s+j+k)- 1 - (m<0))}$ # # However, piggy-backing on previous # calculation of c speeds things up. tXexp = c - 1 tYexp = 2*(j+k) + (m<0) elif kind.lower()=='dy': # The coefficient of the $X^a Y^b$ term at # the end of eqns. 20a-c. c = 2*(j+k) + (m<0) tXexp = n - 2*(s+j+k) - (m<0) tYexp = c - 1 else: errString = 'zernike._zgen error: ' + \ 'invalid kind \'%s\'; should be \'h\', \'dx\', or \'dy\'.'%kind sys.exit(errString) if not cached and USE_CACHE_FILE: self._cacheHandle.write('%d\t'*12%(n,m,kindIndex,s,j,k,t1,t2,t3,c,tXexp,tYexp)+'\n') ct123 = c*t1*t2*t3 # The key for the polynomial dictionary is the pair of X,Y # coefficients. termKey = (tXexp,tYexp) # Leave this term out of the dictionary if its coefficient # is 0. if ct123: # If we already have this term, add to its coefficient. if cdict.has_key(termKey): cdict[termKey] = cdict[termKey] + ct123 # If not, add it to the dictionary. else: cdict[termKey] = ct123 # Remove zeros to speed up computations later. cdict = {key: value for key, value in cdict.items() if value} return (outerCoef,cdict) def _convertToString(self,params): """Return a string representation of a Zernike polynomial. This function takes a tuple, consisting of a squared normalizing coefficient and dictionary of inner coefficients and exponents, provided by _zeqn, and returns a string representation of the polynomial, with LaTeX- style markup. Example: a params of (10, {(3,4): 7, (2,5): -1}) would produce a two-term polynomial '\sqrt{10} [7 X^3 Y^4 - X^2 Y^5]', which could be used in LaTeX, pandoc, markdown, MathJax, or Word with MathType, to produce: $$ \sqrt{10} [7 X^3 Y^4 - X^2 Y^5] $$ Args: params (tuple): A pair consisting of an outer coefficient $c$ and a dictionary mapping tuples (xexp,yexp) of exponents onto the corresponding term coefficients. Returns: string: A string representation of the polynomial. """ c = params[0] cdict = params[1] keys = sorted(cdict.keys(), key=lambda tup: (tup[0]+tup[1],tup[0]))[::-1] outstr = '' firstKey = True for key in keys: coef = cdict[key] if coef>0: sign = '+' else: sign = '-' coef = abs(coef) if coef<0 or not firstKey: outstr = outstr + '%s'%sign if coef>1 or (key[0]==0 and key[1]==0): outstr = outstr + '%d'%coef if key[0]: outstr = outstr + 'X^{%d}'%key[0] if key[1]: outstr = outstr + 'Y^{%d}'%key[1] firstKey = False outstr = outstr + ' ' outstr = outstr.strip() if np.sqrt(float(c))%1.0<.00001: cstr = '%d'%(np.sqrt(c)) else: cstr = '\sqrt{%d}'%(c) if len(outstr): outstr = '%s [%s]'%(cstr,outstr) else: outstr = '%s'%(cstr) return outstr def _convertToSurface(self,params,X,Y,mask=None): """Return a phase map specified by a Zernike polynomial. This function takes a tuple, consisting of a squared normalizing coefficient and dictionary of inner coefficients and exponents, provided by _zeqn, and x- and y- rectangular (Cartesian) coordinates, and produces a phase map. This function works by evaluating the polynomial expressed by params at each coordinate specified by X and Y. Args: params (tuple): A pair consisting of an outer coefficient $c$ and a dictionary mapping tuples (xexp,yexp) of exponents onto the corresponding term coefficients. X (float): A scalar, vector, or matrix of X coordinates in unit pupil. Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil. kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope) or partial y derivative, respectively. Returns: float: height, dx, or dy; returned structure same size as X and Y. """ # Check that shapes of X and Y are equal (not necessarily square). if not (X.shape[0]==Y.shape[0] and \ X.shape[1]==Y.shape[1]): errString = 'zernike.getSurface error: ' + \ 'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \ 'and Y is %d x %d'%(Y.shape[0],Y.shape[1]) sys.exit(errString) if mask is None: mask = np.ones(X.shape) params = self._zeqn(n,m,kind) normalizer = np.sqrt(params[0]) matrix_out = np.zeros(X.shape) for item in params[1].items(): matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1]) matrix_out = matrix_out * np.sqrt(normalizer) matrix_out = matrix_out * mask return matrix_out def getSurface(self,n,m,X,Y,kind='h',mask=None): """Return a phase map specified by a Zernike order and azimuthal frequency. Given an order (or degree) n and azimuthal frequency f, and x- and y- rectangular (Cartesian) coordinates, produce a phase map of either height, partial x derivative, or partial y derivative. Zernike terms are only defined when n and m have the same parity (both odd or both even). The input X and Y values should be located inside a unit pupil, such that $$\sqrt{X^2 + Y^2}\leq 1$$ Please see Schwiegerling lecture notes in /doc/supporting_docs/ for eqn. references. This function works by calling Zernike._zeqn to calculate the coefficients and exponents of the polynomial, and then using the supplied X and Y coordinates to produce the height map (or partial derivative). Args: n (int): The Zernike order or degree. m (int): The azimuthal frequency. X (float): A scalar, vector, or matrix of X coordinates in unit pupil. Y (float): A scalar, vector, or matrix of Y coordinates in unit pupil. kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope) or partial y derivative, respectively. Returns: float: height, dx, or dy; returned structure same size as X and Y. """ # Check that shapes of X and Y are equal (not necessarily square). if not np.all(X.shape==Y.shape): errString = 'zernike.getSurface error: ' + \ 'X and Y must have the same shape, but X is %d x %d'%(X.shape[0],X.shape[1]) + \ 'and Y is %d x %d'%(Y.shape[0],Y.shape[1]) sys.exit(errString) if mask is None: mask = np.ones(X.shape) params = self._zeqn(n,m,kind) normalizer = np.sqrt(params[0]) matrix_out = np.zeros(X.shape) for item in params[1].items(): matrix_out = matrix_out + item[1] * X**(item[0][0]) * Y**(item[0][1]) matrix_out = matrix_out * normalizer matrix_out = matrix_out * mask return matrix_out def getEquationString(self,n,m,kind='h',doubleDollar=False): """Return LaTeX-encoded of the Zernike polynomial specified by order n, frequency m. Args: n (int): The Zernike order or degree. m (int): The azimuthal frequency. kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope) or partial y derivative, respectively. doubleDollar (bool): determines how to bookend the polynomial string; True causes bookending with '$$', to produce "display" math mode, whereas False would produce a string suitable for inline use. Returns: str: a LaTeX representation of the Zernike polynomial specified by n, m, and Kind. """ params = self._zeqn(n,m,kind) rightString = self._convertToString(params) if kind.lower()=='h': leftString = 'Z^{%d}_{%d}'%(m,n) elif kind.lower()=='dx': leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta x}'%(m,n) elif kind.lower()=='dy': leftString = '\\frac{\delta Z^{%d}_{%d}}{\delta y}'%(m,n) else: sys.exit('zernike.getEquationString: invalid kind %s'%kind) if doubleDollar: bookend = '$$' else: bookend = '$' return '%s %s = %s %s'%(bookend,leftString,rightString,bookend) def plotPolynomial(self,n,m,kind='h'): """Plot a polynomial surface specified by order n, frequency m, and kind. Args: n (int): The Zernike order or degree. m (int): The azimuthal frequency. kind (str): 'h', 'dx', or 'dy', for height, partial x derivative (slope) or partial y derivative, respectively. Calling function/script required to provide a plotting context (e.g. pyplot.figure). """ from mpl_toolkits.mplot3d import Axes3D N = 64 mask = np.zeros((N,N)) xx,yy = np.meshgrid(np.linspace(-1,1,N),np.linspace(-1,1,N)) d = np.sqrt(xx**2 + yy**2) mask[np.where(d<1)] = 1 surface = self.getSurface(n,m,xx,yy,kind,mask) surface = surface * mask #plt.figure() ax = plt.axes([0,.2,1,.8],projection='3d') surf = ax.plot_wireframe(xx,yy,surface,rstride=1,cstride=1,color='k') ax.view_init(elev=70., azim=40) eqstr = self.getEquationString(n,m,kind) eqstr = splitEquation(eqstr,160,'$') print 'plotting %s'%eqstr plt.axes([0,0,1,.2]) plt.xticks([]) plt.yticks([]) plt.box('off') fontsize = 12 plt.text(0.5,0.5,eqstr,ha='center',va='center',fontsize=fontsize)
gpl-2.0
B3AU/waveTree
examples/ensemble/plot_bias_variance.py
6
7330
""" ============================================================ Single estimator versus bagging: bias-variance decomposition ============================================================ This example illustrates and compares the bias-variance decomposition of the expected mean squared error of a single estimator against a bagging ensemble. In regression, the expected mean squared error of an estimator can be decomposed in terms of bias, variance and noise. On average over datasets of the regression problem, the bias term measures the average amount by which the predictions of the estimator differ from the predictions of the best possible estimator for the problem (i.e., the Bayes model). The variance term measures the variability of the predictions of the estimator when fit over different instances LS of the problem. Finally, the noise measures the irreducible part of the error which is due the variability in the data. The upper left figure illustrates the predictions (in dark red) of a single decision tree trained over a random dataset LS (the blue dots) of a toy 1d regression problem. It also illustrates the predictions (in light red) of other single decision trees trained over other (and different) randomly drawn instances LS of the problem. Intuitively, the variance term here corresponds to the width of the beam of predictions (in light red) of the individual estimators. The larger the variance, the more sensitive are the predictions for `x` to small changes in the training set. The bias term corresponds to the difference between the average prediction of the estimator (in cyan) and the best possible model (in dark blue). On this problem, we can thus observe that the bias is quite low (both the cyan and the blue curves are close to each other) while the variance is large (the red beam is rather wide). The lower left figure plots the pointwise decomposition of the expected mean squared error of a single decision tree. It confirms that the bias term (in blue) is low while the variance is large (in green). It also illustrates the noise part of the error which, as expected, appears to be constant and around `0.01`. The right figures correspond to the same plots but using instead a bagging ensemble of decision trees. In both figures, we can observe that the bias term is larger than in the previous case. In the upper right figure, the difference between the average prediction (in cyan) and the best possible model is larger (e.g., notice the offset around `x=2`). In the lower right figure, the bias curve is also slightly higher than in the lower left figure. In terms of variance however, the beam of predictions is narrower, which suggests that the variance is lower. Indeed, as the lower right figure confirms, the variance term (in green) is lower than for single decision trees. Overall, the bias- variance decomposition is therefore no longer the same. The tradeoff is better for bagging: averaging several decision trees fit on bootstrap copies of the dataset slightly increases the bias term but allows for a larger reduction of the variance, which results in a lower overall mean squared error (compare the red curves int the lower figures). The script output also confirms this intuition. The total error of the bagging ensemble is lower than the total error of a single decision tree, and this difference indeed mainly stems from a reduced variance. For further details on bias-variance decomposition, see section 7.3 of [1]_. References ---------- .. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical Learning", Springer, 2009. """ print(__doc__) # Author: Gilles Louppe <g.louppe@gmail.com> # License: BSD 3 clause import numpy as np from matplotlib import pyplot as plt from sklearn.ensemble import BaggingRegressor from sklearn.tree import DecisionTreeRegressor # Settings n_repeat = 50 # Number of iterations for computing expectations n_train = 50 # Size of the training set n_test = 1000 # Size of the test set noise = 0.1 # Standard deviation of the noise np.random.seed(0) # Change this for exploring the bias-variance decomposition of other # estimators. This should work well for estimators with high variance (e.g., # decision trees or KNN), but poorly for estimators with low variance (e.g., # linear models). estimators = [("Tree", DecisionTreeRegressor()), ("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))] n_estimators = len(estimators) # Generate data def f(x): x = x.ravel() return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2) def generate(n_samples, noise, n_repeat=1): X = np.random.rand(n_samples) * 10 - 5 X = np.sort(X) if n_repeat == 1: y = f(X) + np.random.normal(0.0, noise, n_samples) else: y = np.zeros((n_samples, n_repeat)) for i in range(n_repeat): y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples) X = X.reshape((n_samples, 1)) return X, y X_train = [] y_train = [] for i in range(n_repeat): X, y = generate(n_samples=n_train, noise=noise) X_train.append(X) y_train.append(y) X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat) # Loop over estimators to compare for n, (name, estimator) in enumerate(estimators): # Compute predictions y_predict = np.zeros((n_test, n_repeat)) for i in xrange(n_repeat): estimator.fit(X_train[i], y_train[i]) y_predict[:, i] = estimator.predict(X_test) # Bias^2 + Variance + Noise decomposition of the mean squared error y_error = np.zeros(n_test) for i in range(n_repeat): for j in range(n_repeat): y_error += (y_test[:, j] - y_predict[:, i]) ** 2 y_error /= (n_repeat * n_repeat) y_noise = np.var(y_test, axis=1) y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2 y_var = np.var(y_predict, axis=1) print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) " " + {3:.4f} (var) + {4:.4f} (noise)".format(name, np.mean(y_error), np.mean(y_bias), np.mean(y_var), np.mean(y_noise))) # Plot figures plt.subplot(2, n_estimators, n + 1) plt.plot(X_test, f(X_test), "b", label="$f(x)$") plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$") for i in range(n_repeat): if i == 0: plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$") else: plt.plot(X_test, y_predict[:, i], "r", alpha=0.05) plt.plot(X_test, np.mean(y_predict, axis=1), "c", label="$\mathbb{E}_{LS} \^y(x)$") plt.xlim([-5, 5]) plt.title(name) if n == 0: plt.legend(loc="upper left", prop={"size": 11}) plt.subplot(2, n_estimators, n_estimators + n + 1) plt.plot(X_test, y_error, "r", label="$error(x)$") plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"), plt.plot(X_test, y_var, "g", label="$variance(x)$"), plt.plot(X_test, y_noise, "c", label="$noise(x)$") plt.xlim([-5, 5]) plt.ylim([0, 0.1]) if n == 0: plt.legend(loc="upper left", prop={"size": 11}) plt.show()
bsd-3-clause
RuthAngus/LSST-max
code/GP_periodogram.py
1
1066
from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from GProtation import make_plot, lnprob, neglnlike import emcee import time import george from george.kernels import ExpSquaredKernel, ExpSine2Kernel import scipy.optimize as spo def GP_periodogram(x, y, yerr, p_init, plims, N): """ This function takes a light curves and attempts to produce a GP periodogram. It returns the value of the highest peak. The kernel hyperparameters are optimised over a grid of periods. This is also a "profile likelihood". x, y, yerr: the light curve. p_init: the initial guess for the period. plims: the (log) boundaries for the grid. N: the number of grid points. """ # create the grid periods = np.linspace(np.exp(plims[0], np.exp(plims[1], 10) # initial hyperparameters if __name__ == "__main__": # fake data x = np.arange(0, 10, 100) p = 2 err = .1 y = np.sin(2*np.pi*(1./p)*x) + np.random.randn(100)*err yerr = np.ones_like(y) * err p_init, plims = 2, np.log(.1, 5) GP_periodogram(x, y, yerr, p_init, plims, 10)
mit
locksmithone/qcnsim
tag/20140102/doc/validations/weibull/weibullGenerator.py
3
3655
import numpy #import scipy import matplotlib.pyplot import math def weibullGenerator(scale, shape, start, end, step): ''' Generates Weibull sample lists per the parameters. Returns two lists of X and Y values distributed per Weibull. ''' weibullSamplesY = [] weibullSamplesX = [] for i in numpy.arange(start, end, step): weibullSamplesY.append((shape/scale)*((i/scale)**(shape-1.0))*(math.exp(-(i/scale)**shape))) weibullSamplesX.append(i) return weibullSamplesX, weibullSamplesY def readValuesFromFile(filename): ''' Reads values from a file and returns a list of floats. ''' yValues = [] # Y values to be read from file. fileHandle = open(filename, 'r') # Opens file for reading. #yValues= list(fileHandle) # Read all values into yValues. for line in fileHandle: yValues.append(float(line.rstrip())) fileHandle.close() return yValues # Now construct a map of parameters per Weibull samples. # Key is filename with samples, value is list of lists: # list1 is weibullGenerator parameters to generate a Weibull graph from matplotlib, # list2 is set of parameters to plot the samples from the filename. weibullParameterMap = { 'weibull_Scale1.0_Shape5.0.csv': [[1.0,5.0,0.0,3.0,.01], [0.0,3.0,0.0,2.0]], 'weibull_Scale1.0_Shape1.0.csv': [[1.0,1.0,0.0,3.0,.01], [0.0,3.0,0.0,1.0]], 'weibull_Scale1.0_Shape2.0.csv': [[1.0,2.0,0.0,3.0,.01], [0.0,3.0,0.0,0.9]], 'weibull_Scale1.0_Shape0.5.csv': [[1.0,0.5,0.0,3.0,.01], [0.0,3.0,0.0,5.0]] } # Iterate through dictionary and generate graphs. for filename, parameters in weibullParameterMap.items(): weibullSamplesX, weibullSamplesY = weibullGenerator(parameters[0][0], parameters[0][1], parameters[0][2], parameters[0][3], parameters[0][4]) print("Parameters: ", parameters) matplotlib.pyplot.figure() matplotlib.pyplot.plot(weibullSamplesX, weibullSamplesY) matplotlib.pyplot.grid(True) matplotlib.pyplot.xlabel("x values") matplotlib.pyplot.ylabel("Probability") matplotlib.pyplot.title('Weibull pdf: ' + str(parameters[0])) matplotlib.pyplot.savefig(filename + '_pdf.png') matplotlib.pyplot.show() #matplotlib.pyplot.close() ySamples = readValuesFromFile(filename) matplotlib.pyplot.figure() matplotlib.pyplot.hist(ySamples, bins=300, range=(parameters[1][0], parameters[1][1]), normed=True, color='r') matplotlib.pyplot.axis(parameters[1]) matplotlib.pyplot.grid(True) matplotlib.pyplot.xlabel("x values") matplotlib.pyplot.ylabel("Probability") matplotlib.pyplot.title('Samples from ' + filename) matplotlib.pyplot.savefig(filename + '_sample.png') matplotlib.pyplot.show() #matplotlib.pyplot.close() print("*** Done! ***") ##print (weibullGenerator(1.0,5.0,0.0,3.0,.001)) #weibullSamplesX, weibullSamplesY = weibullGenerator(1.0,5.0,0.0,3.0,.001) #matplotlib.pyplot.figure() ##matplotlib.pyplot.hist(weibullSamplesY, 100) #matplotlib.pyplot.plot(weibullSamplesX, weibullSamplesY) #matplotlib.pyplot.grid(True) #matplotlib.pyplot.show() ##matplotlib.pyplot.close() #print("Done Figure 1.") #ySamples = readValuesFromFile('weibull_Scale1.0_Shape5.0.csv') #print("Done reading.") ##print (ySamples) #matplotlib.pyplot.figure() #matplotlib.pyplot.hist(ySamples, 500) #matplotlib.pyplot.axis([0.0,3.0,0.0,650]) #matplotlib.pyplot.grid(True) #matplotlib.pyplot.show() ##matplotlib.pyplot.close() #print("Done Figure 2.")
lgpl-2.1
ccd-utexas/OLD-MAID
ProEMOnline.py
2
57911
# -*- coding: utf-8 -*- """ This scripts sets an initial layout for the ProEMOnline software. It uses the PyQtGraph dockarea system and was designed from the dockarea.py example. Keaton wrote this. """ #Import everything you'll need from __future__ import absolute_import, division import pyqtgraph as pg from pyqtgraph.Qt import QtCore, QtGui import numpy as np import pickle #for saving layouts from functools import partial from glob import glob from scipy import stats from scipy.optimize import curve_fit from scipy.fftpack import fft,fftfreq import pandas as pd import os import subprocess import csv import sys import time import datetime as dt import dateutil.parser from astropy.io import fits from scipy.interpolate import interp1d import scipy.ndimage.filters as filters from astropy.stats import biweight_location, biweight_midvariance from photutils import daofind from photutils import CircularAperture, CircularAnnulus, aperture_photometry from pyqtgraph.dockarea import * from bs4 import BeautifulSoup import matplotlib.pyplot as plt # Local modules. import read_spe #Return a string of the current time def timestring(): date = dt.datetime.now() return date.strftime('%Y%m%d_%Hh%Mm%Ss') #Function to save a screenshot def saveScreenshot(): ssfilename=os.path.splitext(spefile)[0]+'_'+timestring()+'.png' log("Writing screenshot to file "+ssfilename,2) p=QtGui.QPixmap.grabWidget(area) writeout = p.save(ssfilename, 'png') if not writeout: log("Saving screenshot failed!",3) #### BEGIN PROGRAM #### #The organization and behavoir of the program are as follows: #This program operates in four stages. #Stage 0 - Program Initialized, waiting to open SPE file. #Stage 1 - SPE file open, stars are being selected #Stage 2 - Online data reduction and aperture photometry/plotting is being done. #Stage 3 - End of data acquisition detected. Final data written to file. Timestamps verified. Log saved. Weather/time log data saved. # -> revert back to Stage 0. stage=0 #start at 0 def stagechange(num): global stage if num in range(4): log("Program stage = "+str(num),1) stage=num else: log("Attempt to change stage to invalid value ("+str(num)+")",3) #### STAGE 0 #### #Set up the general GUI aspects defaultdir = 'D:/sync_to_White_Dwarf_Archive/'#where to search for SPE files #Set up main window with menu items class WithMenu(QtGui.QMainWindow): def __init__(self): super(WithMenu, self).__init__() self.initUI() def initUI(self): #SETUP THE MENUBAR! #Note: Exit is protected on Mac. This works on Windows. exitAction = QtGui.QAction('Exit', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') exitAction.triggered.connect(QtGui.qApp.quit) #Open SPE openFile = QtGui.QAction('&Open SPE', self) openFile.setShortcut('Ctrl+O') openFile.setStatusTip('Open SPE File') #openFile.setCheckable(True) openFile.triggered.connect(self.openSPE) #Run Photometry runPhot = QtGui.QAction('&Run Photometry', self) runPhot.setShortcut('Ctrl+R') runPhot.setStatusTip('Run Aperture Photometry on Frames') runPhot.triggered.connect(self.run) #Update FT updateFT = QtGui.QAction('&Update FT', self) updateFT.setShortcut('Ctrl+U') updateFT.setStatusTip('Update Fourier Transform with Current Light Curve') updateFT.triggered.connect(self.updateFTfunct) #Run Autoguider autoguide = QtGui.QAction('Feed to &Autoguider', self) autoguide.setShortcut('Ctrl+A') autoguide.setStatusTip('Send most recently acquired frame to Guide82') autoguide.triggered.connect(self.toAutoguider) #Load dark for science frames loadDark = QtGui.QAction('Load Darks', self) loadDark.setStatusTip('Open SPE Calibrations for Dark Subtracting Science Images') loadDark.triggered.connect(self.openDark) #Load dark for flat frames loadDarkForFlats = QtGui.QAction('Load Darks for Flats', self) loadDarkForFlats.setStatusTip('Open SPE Calibrations for Dark Subtracting Flat Images') loadDarkForFlats.triggered.connect(self.openDarkForFlats) #Load flat loadFlat = QtGui.QAction('Load Flats', self) loadFlat.setStatusTip('Open SPE Calibrations for Flatfielding Science Images') loadFlat.triggered.connect(self.openFlat) #Restore points restorePoints = QtGui.QAction('Restore Points', self) restorePoints.setStatusTip('Return All Previously Discarded Points to the Light Curve.') restorePoints.triggered.connect(self.restorePts) #undo recenly selected bad point undo = QtGui.QAction('Undo Bad Point Selection', self) undo.setShortcut('Ctrl+Z') undo.setStatusTip('Return Most Recently Discarded Point to the Light Curve.') undo.triggered.connect(self.undoBad) #Save Layout saveLayout = QtGui.QAction('Save Layout', self) saveLayout.setStatusTip('Save the current dock layout') saveLayout.triggered.connect(self.saveLayout) #Load Layout loadLayout = QtGui.QAction('Load Layout', self) loadLayout.setStatusTip('Load a saved dock layout') loadLayout.triggered.connect(self.loadLayout) #changeSmoothing changeSmoothing = QtGui.QAction('Change Smoothing', self) changeSmoothing.setStatusTip('Change Light Curve Smoothing Parameters.') changeSmoothing.triggered.connect(self.changeSmooth) #save screenshot screenshot = QtGui.QAction('Save Screenshot', self) screenshot.setStatusTip('Save a Screenshot of the Main Window.') savescreenshot = partial(saveScreenshot) screenshot.triggered.connect(savescreenshot) #Menubar menubar = self.menuBar() #File Menu fileMenu = menubar.addMenu('File') fileMenu.addAction(openFile) fileMenu.addAction(runPhot) fileMenu.addAction(updateFT) fileMenu.addAction(autoguide) fileMenu.addAction(exitAction) #Calibrations Menu calibrationsMenu = menubar.addMenu('Calibrations') calibrationsMenu.addAction(loadDark) calibrationsMenu.addAction(loadDarkForFlats) calibrationsMenu.addAction(loadFlat) #Interactions menu interactionsMenu = menubar.addMenu('Interact') interactionsMenu.addAction(restorePoints) interactionsMenu.addAction(undo) self.changeApertureMenu = interactionsMenu.addMenu('Select Aperture Size') self.changeCompStarMenu = interactionsMenu.addMenu('Select Comp Star for Division') interactionsMenu.addAction(changeSmoothing) #Layout Menu layoutMenu = menubar.addMenu('Layout') layoutMenu.addAction(saveLayout) layoutMenu.addAction(loadLayout) #Output Menu outputMenu = menubar.addMenu('Output') outputMenu.addAction(screenshot) #Functions to save and load layouts layoutsDir = './layouts/' layoutsExt = '.p' def saveLayout(self): layoutName, ok = QtGui.QInputDialog.getText(self, 'Save layout', 'Enter name for this layout:') if ok: #Save dict in pickle format pickle.dump( area.saveState(), open( self.layoutsDir+layoutName+self.layoutsExt, "wb" ) ) def loadLayout(self): layouts = glob(self.layoutsDir+'*'+self.layoutsExt) if len(layouts) == 0: _ = QtGui.QMessageBox.warning(self,'Load layout','No saved layouts found.') else: layouts = [layout[len(self.layoutsDir):-1*len(self.layoutsExt)] for layout in layouts] layout, ok = QtGui.QInputDialog().getItem(self,'Load layout','Select layout: ',layouts) if ok: state = pickle.load(open(self.layoutsDir+layout+self.layoutsExt, "rb" ) ) area.restoreState(state) #Function to open SPE files to operate on. def openSPE(self): ''' Select a new target SPE file to work on. Open dialog box, select file, verify that it is a SPE file. ''' global defaultdir,rundir fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open SPE file', defaultdir,filter='Data (*.spe)')) if fname[-4:]=='.spe': log("Opening file "+fname,1) #Set the default directory to a couple levels up from this file rundir = os.path.dirname(fname) defaultdir = os.path.dirname(rundir) #set target log text as filename to start targetEdit.setText(os.path.basename(fname)[:-4]) #This needs to trigger a major chain of events stage1(fname) else: log("Invalid file type (must be SPE).",3) #Update the FT at user's command def updateFTfunct(self): global framenum updateft(i=framenum) def toAutoguider(self): if spefile != '': log("Opening separate program to send incoming data to Guide82.",2) subprocess.Popen(["python",os.path.join(os.path.dirname(os.path.abspath(__file__)),'toAutoguider.py'),spefile]) else: log("Open SPE file first before trying to send data to Guide82.",3) #Load Dark frames def openDark(self): global dark, darkExists, darkExp, darkDark fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open dark file', defaultdir,filter='Data (*.spe *.fits)')) if fname[-4:]=='.spe': log("Opening dark file "+fname,1) dspe = read_spe.File(fname) num_darks=dspe.get_num_frames() #get all frames in SPE file #stack as 3D numpy array (frames,_)=dspe.get_frame(0) frames=np.array([frames]) for i in range(1,num_darks): (thisframe,_)=dspe.get_frame(i) frames=np.concatenate((frames,[thisframe]),0) dark=np.median(frames,axis=0) darkExists = True log("Mean dark counts: "+str(np.mean(dark))) processframe() displayFrame(autoscale=True,markstars=False) #Write out master dark file as fits #Set up header prihdr = fits.Header() prihdr['OBJECT'] = 'dark' prihdr['IMAGETYP'] = 'dark' prihdr['REDUCED'] = dt.datetime.now().isoformat() prihdr['COMMENT'] = "Reduced by Keaton Bell's OLD MAID Software" if hasattr(dspe, 'footer_metadata'): footer_metadata = BeautifulSoup(dspe.footer_metadata, "xml") ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime'] dt_begin = dateutil.parser.parse(ts_begin) prihdr['TICKRATE'] = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution']) prihdr['DATE-OBS'] = str(dt_begin.isoformat()) prihdr['XBINNING'] = footer_metadata.find(name="SensorMapping").attrs['xBinning'] prihdr['YBINNING'] = footer_metadata.find(name="SensorMapping").attrs['yBinning'] prihdr['INSTRUME'] = footer_metadata.find(name="Camera").attrs['model'] prihdr['TRIGGER'] = footer_metadata.find(name='TriggerResponse').text prihdr['COMMENT'] = "SPE file has footer metadata" darkExp=np.round(float(footer_metadata.find(name='ExposureTime').text)/1000.) if darkExp != exptime: log("Exp times for dark and science frames do not match!",3) log("Exposure time for dark: "+str(darkExp)+" s") prihdr['EXPTIME'] = str(float(footer_metadata.find(name='ExposureTime').text)/1000.) #prihdr['SOFTWARE'] = footer_metadata.find(name='Origin') prihdr['SHUTTER'] = footer_metadata.find(name='Mode').text if footer_metadata.find(name='Mode').text != 'AlwaysClosed': prihdr['WARNING'] = 'Shutter not closed for dark frame.' log("Shutter not closed for dark frame.",3) else: darkDark=True else: prihdr['WARNING'] = "No XML footer metadata." log("No XML footer metadata.",3) #Set up fits object hdu = fits.PrimaryHDU(dark,header=prihdr) darkpath = os.path.dirname(fname) fitsfilename = 'master_'+os.path.basename(fname).split('.spe')[0]+'.fits' log("Writing master dark as "+fitsfilename) hdu.writeto(os.path.join(darkpath, fitsfilename),clobber=True) #Close SPE dspe.close() #option to load as fits elif fname[-5:]=='.fits': log("Opening dark file "+fname,1) hdulist = fits.open(fname) prihdr = hdulist[0].header dark=hdulist[0].data darkExp = np.round(float(prihdr['EXPTIME'])) if darkExp != exptime: log("Exp times for dark and science frames do not match!",3) log("Exposure time for dark: "+str(darkExp)+" s") log("Mean dark counts: "+str(np.mean(dark))) if prihdr['SHUTTER'] != 'AlwaysClosed': prihdr['WARNING'] = 'Shutter not closed for dark frame.' log("Shutter not closed for dark frame.",3) else: darkDark=True darkExists = True processframe() displayFrame(autoscale=True,markstars=False) hdulist.close() else: log("Invalid file type (must be SPE or FITS).",3) #Load Dark frames for flat calibration def openDarkForFlats(self): global darkForFlat, darkForFlatExists, darkForFlatExp, darkForFlatDark fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open SPE dark for flat calibration', defaultdir,filter='Data (*.spe *.fits)')) if fname[-4:]=='.spe': log("Opening dark file "+fname+" for flat calibration.",1) dspe = read_spe.File(str(fname)) num_darks=dspe.get_num_frames() #get all frames in SPE file #stack as 3D numpy array (frames,_)=dspe.get_frame(0) frames=np.array([frames]) for i in range(1,num_darks): (thisframe,_)=dspe.get_frame(i) frames=np.concatenate((frames,[thisframe]),0) darkForFlat=np.median(frames,axis=0) darkForFlatExists = True log("Mean dark counts for flat: "+str(np.mean(darkForFlat))) #Write out master dark file as fits #Set up header prihdr = fits.Header() prihdr['OBJECT'] = 'dark' prihdr['IMAGETYP'] = 'dark' prihdr['REDUCED'] = dt.datetime.now().isoformat() prihdr['COMMENT'] = "Reduced by Keaton Bell's OLD MAID Software" if hasattr(dspe, 'footer_metadata'): footer_metadata = BeautifulSoup(dspe.footer_metadata, "xml") ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime'] dt_begin = dateutil.parser.parse(ts_begin) prihdr['TICKRATE'] = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution']) prihdr['DATE-OBS'] = str(dt_begin.isoformat()) prihdr['XBINNING'] = footer_metadata.find(name="SensorMapping").attrs['xBinning'] prihdr['YBINNING'] = footer_metadata.find(name="SensorMapping").attrs['yBinning'] prihdr['INSTRUME'] = footer_metadata.find(name="Camera").attrs['model'] prihdr['TRIGGER'] = footer_metadata.find(name='TriggerResponse').text prihdr['COMMENT'] = "SPE file has footer metadata" darkForFlatExp=np.round(float(footer_metadata.find(name='ExposureTime').text)/1000.) log("Exposure time for dark for flat: "+str(darkForFlatExp)+" s") prihdr['EXPTIME'] = str(float(footer_metadata.find(name='ExposureTime').text)/1000.) #prihdr['SOFTWARE'] = footer_metadata.find(name='Origin') prihdr['SHUTTER'] = footer_metadata.find(name='Mode').text if footer_metadata.find(name='Mode').text != 'AlwaysClosed': prihdr['WARNING'] = 'Shutter not closed for dark frame.' log("Shutter not closed for dark frame.",3) else: darkForFlatDark=True else: prihdr['WARNING'] = "No XML footer metadata." log("No XML footer metadata.",3) #Set up fits object hdu = fits.PrimaryHDU(darkForFlat,header=prihdr) darkpath = os.path.dirname(fname) fitsfilename = 'master_'+os.path.basename(fname).split('.spe')[0]+'.fits' log("Writing master dark as "+fitsfilename) hdu.writeto(os.path.join(darkpath, fitsfilename),clobber=True) #Close SPE dspe.close() #Option to load as Fits elif fname[-5:]=='.fits': log("Opening dark file "+fname+" for flat calibration.",1) hdulist = fits.open(fname) prihdr = hdulist[0].header darkForFlat=hdulist[0].data darkForFlatExp = np.round(float(prihdr['EXPTIME'])) log("Exposure time for dark for flat: "+str(darkForFlatExp)+" s") log("Mean dark counts: "+str(np.mean(darkForFlat))) if prihdr['SHUTTER'] != 'AlwaysClosed': prihdr['WARNING'] = 'Shutter not closed for dark frame.' log("Shutter not closed for dark frame for flat.",3) else: darkForFlatDark=True darkForFlatExists = True processframe() displayFrame(autoscale=True,markstars=False) hdulist.close() else: log("Invalid file type (must be SPE or FITS).",3) #Load Flat frames def openFlat(self): global flat, flatExists, flatReduced fname = str(QtGui.QFileDialog.getOpenFileName(self, 'Open SPE flat file', defaultdir,filter='Data (*.spe *.fits)')) if fname[-4:]=='.spe': if darkForFlatExists == False: log("Import dark for reducting flats before importing flat SPE file.",3) else: log("Opening flat file "+fname,1) fspe = read_spe.File(fname) num_flats=fspe.get_num_frames() #get all frames in SPE file #stack as 3D numpy array (frames,_)=fspe.get_frame(0) modes=[] frames = frames - darkForFlat modes.append(stats.mode(frames.flatten())[0][0]) frames=np.array([frames/modes[0]]) for i in range(1,num_flats): (thisframe,_)=fspe.get_frame(i) thisframe = thisframe-darkForFlat #modes.append(stats.mode(thisframe.flatten())[0][0]) modes.append(np.median(thisframe.flatten())) frames=np.concatenate((frames,[thisframe/modes[i]]),0) flat=np.median(frames,axis=0) flatExists=True log("Median flat counts: "+str(np.median(modes))) processframe() displayFrame(autoscale=True,markstars=False) #Write out fits file #Set up header prihdr = fits.Header() prihdr['OBJECT'] = 'flat' prihdr['IMAGETYP'] = 'flat' if hasattr(fspe, 'footer_metadata'): footer_metadata = BeautifulSoup(fspe.footer_metadata, "xml") ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime'] dt_begin = dateutil.parser.parse(ts_begin) prihdr['TICKRATE'] = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution']) prihdr['DATE-OBS'] = str(dt_begin.isoformat()) prihdr['XBINNING'] = footer_metadata.find(name="SensorMapping").attrs['xBinning'] prihdr['YBINNING'] = footer_metadata.find(name="SensorMapping").attrs['yBinning'] prihdr['INSTRUME'] = footer_metadata.find(name="Camera").attrs['model'] prihdr['TRIGGER'] = footer_metadata.find(name='TriggerResponse').text prihdr['MODE'] = 1 #normalized prihdr['COMMENT'] = "SPE file has footer metadata" prihdr['EXPTIME'] = str(float(footer_metadata.find(name='ExposureTime').text)/1000.) flatexptime = np.round(float(footer_metadata.find(name='ExposureTime').text)/1000.) #check that dark exp time matches flat if flatexptime == darkForFlatExp: flatReduced = True else: log("Exp times for dark and flat do not match!",3) if darkForFlatExp == 0: log("Bias being used for flat subtraction.",1) flatReduced=True #prihdr['SOFTWARE'] = footer_metadata.find(name='Origin') prihdr['SHUTTER'] = footer_metadata.find(name='Mode').text prihdr['REDUCED'] = dt.datetime.now().isoformat() else: prihdr['WARNING'] = "No XML footer metadata." log("No XML footer metadata.",3) #Set up fits object #Only write flat if properly dark subtracted: if darkForFlatDark and flatReduced: hdu = fits.PrimaryHDU(flat,header=prihdr) flatpath = os.path.dirname(fname) fitsfilename = 'master_'+os.path.basename(fname).split('.spe')[0]+'.fits' log("Writing master flat as "+fitsfilename) hdu.writeto(os.path.join(flatpath, fitsfilename),clobber=True) #Close SPE fspe.close() #Option to load as Fits elif fname[-5:]=='.fits': log("Opening flat file "+fname,1) hdulist = fits.open(fname) prihdr = hdulist[0].header flat=hdulist[0].data flatExists = True flatmode= float(prihdr["mode"]) if flatmode == 1: #Properly normalized? flatReduced=True else: log("Mode of master flat is "+str(flatmode)+". Not properly normalized?",3) processframe() displayFrame(autoscale=True,markstars=False) hdulist.close() else: log("Invalid file type (must be SPE).",3) #Restore previously "bad" points def restorePts(self): global bad log("Deselecting "+str(len(bad))+" points.") bad=[] updatelcs(i=framenum) #Undo most recently selected "bad" point def undoBad(self): global bad _ = bad.pop() #Set up aperture size menu options def setupApsizeMenu(self): for size in apsizes: self.changeApertureMenu.addAction(str(size)+' pixels',lambda s=size: setApSize(s)) #Set up comp star selection menu options def addCompStarOption(self,i): self.changeCompStarMenu.addAction('Comp Star #'+str(i),lambda s=i: setCompStar(s)) #Change Smoothing parameters def changeSmooth(self): kernel.openKernelDialog() #Run Photometry def run(self): #Do aperture photometry on selected stars global numstars, selectingstars if stage == 1: if len(stars) == 0: log("No stars selected. Select stars before running.",3) else: numstars = len(stars) #Write original coordinates and seeing to phot_coords.orig f = open(rundir+'/phot_coords.orig', 'w') for j,star in enumerate(stars): f.write('{:.2f} {:.2f} {:.2f}\n'.format(star[0],star[1],seeing[j])) f.close() selectingstars=False stage2() #Confirm Quit def closeEvent(self, event): reply = QtGui.QMessageBox.question(self, 'Message', "Really quit?", QtGui.QMessageBox.Yes | QtGui.QMessageBox.No, QtGui.QMessageBox.No) if reply == QtGui.QMessageBox.Yes: event.accept() else: event.ignore() # Make the App have a window and dock area. app = QtGui.QApplication([]) win = WithMenu() area = DockArea() win.setCentralWidget(area) win.resize(1500,800) win.setWindowTitle('OLD MAID Software') ## Set up each of the docks (to hold the widgets) d1 = Dock("Observing Log", size=(500,500)) d2 = Dock("Process Log", size=(500,500)) d3 = Dock("Fourier Transform", size=(500,500)) d4 = Dock("Smoothed Light Curve", size=(1000,250)) d5 = Dock("Image", size=(500,500)) d6 = Dock("Divided Light Curve", size=(1000,250)) d7 = Dock("Raw Counts", size=(500,250)) d8 = Dock("Sky Brightness", size=(1000,250)) d9 = Dock("Seeing", size=(1000,250)) #Define initial layout area.addDock(d4, 'left') area.addDock(d1, 'right',d4) area.addDock(d6, 'above', d4) area.addDock(d9, 'bottom', d4) area.addDock(d8, 'above', d9) area.addDock(d7, 'above', d8) area.addDock(d5, 'bottom',d1) area.addDock(d2, 'bottom', d5) area.addDock(d3, 'bottom', d7) area.moveDock(d5,'right',d3) #Define and place widgets into the docks ## First dock holds the Observing Log #Type of widget: Form w1 = pg.LayoutWidget() #Name the form elements observer = QtGui.QLabel('Observer') target = QtGui.QLabel('Target') filt = QtGui.QLabel('Filter') logtext = QtGui.QLabel('Log') #Define the types of fields observerEdit = QtGui.QLineEdit() targetEdit = QtGui.QLineEdit() filtEdit = QtGui.QComboBox() filtEdit.addItems(["BG40","u'","g'","r'","i'","z'","Other"]) logEdit = QtGui.QTextEdit() logEdit.setText("WARNING: None of these log fields are saved!") #Put the fields in the form w1.addWidget(observer, 1, 0) w1.addWidget(observerEdit, 1, 1) w1.addWidget(target, 2, 0) w1.addWidget(targetEdit, 2, 1) w1.addWidget(filt, 3, 0) w1.addWidget(filtEdit, 3, 1) w1.addWidget(logtext, 4, 0) w1.addWidget(logEdit, 4, 1, 6, 1) #Put the widget in the dock d1.addWidget(w1) ## Process Log # Records activity. w2 = pg.LayoutWidget() processLog = QtGui.QTextEdit() processLog.setReadOnly(True) w2.addWidget(processLog, 0, 0, 6, 1) d2.addWidget(w2) # This widget need special functions to get messages: def log(text,level=0): '''log messages to the process log and log file text is the message for the log level indicated how important it is: level=0: Routine background process: gray text; level=1: User influenced action: black text; level=2: Major change: bold black; level=3: Warning message: bold red; ''' text=str(text) colors = ['darkgray','black','black','red'] prefix = ['','','','WARNING: '] fontweight = [50,50,75,75] if level in range(4): processLog.setTextColor(QtGui.QColor(colors[level])) processLog.setFontWeight(fontweight[level]) processLog.append(prefix[level]+text) else: log('Level assigned to message "'+text+'" out of range.',level=3) ## Light Curve # It's a plot w6 = pg.PlotWidget(title="Divided Light Curve",labels={'left': 'rel. flux', 'bottom': 'time (s)'}) # Set up plot components # Raw points s1 = pg.ScatterPlotItem(brush=(255,0,0), pen='w',symbol='o') # Bad (ignored) points #Not currently displayed since it causes scaling issues. #s2 = pg.ScatterPlotItem(brush=(255,0,0), pen='b',symbol='o') # Connecting lines l1 = pg.PlotCurveItem() #Add components to plot widget. w6.addItem(s1) #w6.addItem(s2) w6.addItem(l1) #Add widget to dock d6.addWidget(w6) # Make points change color when clicked def clicked(plot, points): global bad for p in points: if p.pos()[0]/exptime in bad: bad.remove(p.pos()[0]/exptime) else: bad.append(p.pos()[0]/exptime) updatelcs(i=framenum) s1.sigClicked.connect(clicked) #s2.sigClicked.connect(clicked) ## Smoothed Light Curve w4 = pg.PlotWidget(title="Smoothed Light Curve",labels={'left': 'smoothed flux', 'bottom': 'time (s)'}) ss1 = pg.ScatterPlotItem(brush=(255,0,0), pen='w',symbol='o') sl1 = pg.PlotCurveItem() w4.addItem(ss1) w4.addItem(sl1) d4.addWidget(w4) ## Raw Star/Sky Counts w7 = pg.PlotWidget(title="Raw Star Counts",labels={'left': 'flux summed in aperture', 'bottom': 'time (s)'}) d7.addWidget(w7) #Hold the individual plot items in this list once they are created: rawcounts=[] ## Sky w8 = pg.PlotWidget(title="Sky Brightness",labels={'left': 'median sky counts', 'bottom': 'time (s)'}) sky = pg.PlotCurveItem() w8.addItem(sky) d8.addWidget(w8) ## Seeing w9 = pg.PlotWidget(title="Seeing",labels={'left': 'FWHM (pixels)', 'bottom': 'time (s)'}) d9.addWidget(w9) gridlines = pg.GridItem() w9.addItem(gridlines) #Hold the individual plot items in this list once they are created: seeingplots = [] ## Fourier Transform w3 = pg.PlotWidget(title="Fourier Transform",labels={'left': 'amplitude (mma)', 'bottom': 'freq (muHz)'}) ft = w3.plot(pen='y') d3.addWidget(w3) ## Image w5 = pg.ImageView() w5.ui.roiBtn.hide() #w5.ui.normBtn.hide() #Causing trouble on windows #Define function for selecting stars. (must be defined before linking the click action) def click(event):#Linked to image click event global stars, seeing if event.button() == 1 and selectingstars: event.accept() pos = event.pos() #x and y are swapped in the GUI! x=pos.x() y=pos.y() #log('Clicked at ({:.2f}, {:.2f})'.format(x,y),level=0) #improve coordinates dx,dy,newseeing = improvecoords(x,y) #round originals so original position *within* pixel doesn't affect answer newcoords=[np.floor(x)+dx,np.floor(y)+dy] stars.append(newcoords) seeing.append(newseeing) #make menuoption for comp star selection if len(stars) > 1: win.addCompStarOption(len(stars)-1) #Mark stars in image display targs.setData([p[0] for p in stars],[p[1] for p in stars]) targs.setPen(pencolors[0:len(stars)]) #Set up plot for raw counts and seeing: rawcounts.append(pg.ScatterPlotItem(pen=pencolors[len(stars)-1],symbol='o',size=1)) seeingplots.append(pg.PlotCurveItem(pen=seeingcolors[len(stars)-1])) log('Star selected at ({:.2f}, {:.2f})'.format(newcoords[0],newcoords[1]),level=1) elif event.button() == 2: event.accept()#Passed on to other functionality if not accepted. print "RIGHT!" w5.getImageItem().mouseClickEvent = click #Function defined below #w5.keyPressEvent = moveCircles # Seems to be the right thing for detecting frame changes, #But I can't connect to it without overriding other behavior. May need to subclass this. #Set up plot for apertures around stars #print QtGui.QColor.colorNames() for available names. stringcolors=['red','green','blue','magenta','orange','yellow', 'darkred','darkgreen','darkblue','darkmagenta','darkorange','darkgoldenrod', 'hotpink','seagreen','skyblue','salmon','brown','lightyellow'] pencolors = [pg.mkPen(QtGui.QColor(c), width=3) for c in stringcolors] seeingcolors = [pg.mkPen(QtGui.QColor(c), width=1.5) for c in stringcolors] targs = pg.ScatterPlotItem(brush=None, pen=pencolors[0],symbol='o',pxMode=False,size=8) w5.addItem(targs) #Add widget to dock d5.addWidget(w5) ## Show the program! win.show() win.raise_() #win.activateWindow() # I think everything is set up enough to start doing stuff # Send initial message to process log. log("ProEMOnline initialized",2) #log("Development version. Do not trust.",3) stagechange(0) log("Open SPE file to begin analysis.",1) #### STAGE 1 #### # Stage 1 starts when a SPE file is loaded. # It's the "getting everything set up" stage # Since the SPE file is loaded by the menu action, this will be one big # function that is called on the new image. #First define all the variables everything will need access to: #These will be called into action as global variables. #SPE Filename spefile = '' #SPE Data spe=[] #SPE file directory rundir='' #Does SPE have a footer? hasFooter=False #Number of frames in currently read spe file numframes=0 #Exposure time for science frames exptime=1. #If it can't be figured out, plots are in terms of frame # #Dark data dark = [] darkExists=False darkExp=0 #exp time should match spe exptime darkDark=False #shutter closed? darkForFlat = [] darkForFlatExists=False darkForFlatExp=0 darkForFlatDark=False #Flat data flat = [] flatExists=False flatReduced=False #proper dark subtracted? #Flag whether full reductions are being done (*correct* darks and flat) #Number of last *reduced* (photometry measures) frame framenum=-1 #none yet #Flag to indicate whether we are currently selecting stars in the frame: selectingstars = False #Number of stars to do photometry on (target first) numstars = 0 #0 means we haven't selected stars yet. #Star coords stars = [] #list of list of list of coords #Image data: img=[] #only hold current image to save tiem #And another version to look nice displayimg=[] #only hold current image to save tiem #Keep track of "Bad" points bad=[] #Elapsed timestamps rawtimes=[] #start of timestamp #Search radius (box for now), improve later pixdist=10 #(super)pixels #List of median background counts: backmed=[] #List of background variances backvar=[] #Seeing for each star,frame: seeing=[] #Binning binning=4 def stage1(fname): #Load SPE File #Access needed global vars global spefile,spe,binning,exptime,dark,flat #Announce Stage 1 stagechange(1) #Record SPE filename this once spefile = fname #Read in SPE data spe = read_spe.File(spefile) binning = 1024/spe.get_frame(0)[0].shape[0] log(str(spe.get_num_frames()) + ' frames read in.') exptime=getexptime(spe) log('Inferred exposure time: '+str(exptime)+' s') if hasattr(spe, 'footer_metadata'): #log('SPE file has footer.') exptime=np.round(float(BeautifulSoup(spe.footer_metadata, "xml").find(name='ExposureTime').text)/1000.) #log('Exposute time from footer: '+str(exptime)+' s') #now display the first frame processframe() displayFrame(autoscale=True,markstars=False) #Load calibration frames and set up log("Please load dark, flat, and dark for flat files",1) dark = np.zeros(img[0].shape) flat = np.ones(img[0].shape) #Select stars: selectstars() #spe.close() #In real version we'll close spe win.setupApsizeMenu() #Determine the exposuretime of a SPE file without a footer def getexptime(thisspe): #Input open SPE file #don't read lots of frames in large files numtoread = min([thisspe.get_num_frames(),11]) tstamps = np.zeros(numtoread) for f in range(numtoread): tstamps[f] = spe.get_frame(f)[1]['time_stamp_exposure_started'] timediff = tstamps[1:numtoread]-tstamps[:numtoread-1] return np.round(np.median(timediff/1e6)) #Define all the stuff that needs to be done to each incoming frame def processframe(i=0): global img,displayimg,rawtimes,backmed,backvar,framenum (thisframe,thistime) = spe.get_frame(i) #calibrate (doesn't do anything if calibration frames are not available): if darkExists: thisframe=(thisframe-dark) if flatExists: thisframe=thisframe/flat #read in frame img=np.transpose(thisframe) backgroundmed,backgroundvar=charbackground() #append stuff to global variables #Replace if this frame already exists, otherwise append if i <= framenum: #replace #log('Re-processing frame '+str(i)+' of '+str(framenum)) rawtimes[i]=thistime['time_stamp_exposure_started'] backmed[i]=backgroundmed backvar[i]=backgroundvar else: #append #log('Processing frame '+str(i)+' of '+str(framenum)) rawtimes.append(thistime['time_stamp_exposure_started']) backmed.append(backgroundmed) backvar.append(backgroundvar) #make display image newdisplayimg=np.copy(img) newdisplayimg[0,0]=0 imgvals = newdisplayimg.flatten() img99percentile = np.percentile(imgvals,99) newdisplayimg[newdisplayimg > img99percentile] = img99percentile #log("Framenum: "+str(framenum),2) #Replace if this frame already exists, otherwise append displayimg=newdisplayimg framenum=i #Function to characterize the background to find stellar centroids accurately #This should be done for each frame as it's read in def charbackground(): """Characterize the image background, median and variance for frame currenly held in img """ backgroundmed = biweight_location(img) backgroundvar = biweight_midvariance(img) return backgroundmed, backgroundvar #show the image to the widget def displayFrame(autoscale=False,markstars=True): """Display an RBG image i is index to display Autoscale optional. Return nothing. """ #Make sure i is in range if autoscale: #lowlevel=np.min(thisimg[thisimg > 0]) lowlevel=np.min(displayimg) if np.sum(displayimg > 0) > 100: lowlevel=np.percentile(displayimg[displayimg > 0],3) highlevel=np.max(displayimg)-1 w5.setImage(np.array(displayimg),autoRange=True,levels=[lowlevel,highlevel],) else: w5.setImage(np.array(displayimg),autoRange=False,autoLevels=False) #Draw position circles: if markstars and len(stars) > 0: targs.setData([p[0] for p in stars[framenum]],[p[1] for p in stars[framenum]]) targs.setSize(2.*apsizes[apsizeindex]) targs.setPen(pencolors[0:numstars]) def selectstars(): '''Select stars in the current frame. Click to select any number in the first image. Click to select numstars in later images to get following back on track. ''' global selectingstars selectingstars = True def gaussian(x, A, sigma): #Define a gaussian for finding FWHM return A*np.exp(-(x)**2/(2.*sigma**2)) def improvecoords(x,y,i=framenum,pixdist=pixdist,fwhm=4.0,sigma=5.): """Improve stellar centroid position from guess value. (one at a time) #return the adjustment than needs to be made in x and y directions #also calculate the FWHM seeing """ #x=(1024/binning)-x #y=(1024/binning)-y #Keep track of motion delta = np.zeros(2) #Get image subregion around guess position #Need to be careful not to ask for out-of-range indexes near a border x0=x-pixdist y0=y-pixdist xdist=2*pixdist ydist=2*pixdist if x0 < 0: #if near the left edge x0 = 0 #subregion from near given position delta[0] += pixdist-x #adjust delta accordingly if y0 < 0: #same in the y direction y0 = 0 delta[1] += pixdist-y if x+pixdist > img.shape[0]: xdist = img.shape[0]-x+pixdist if y+pixdist > img.shape[1]: ydist = img.shape[1]-y+pixdist subdata=img[x0:x0+xdist,y0:y0+ydist] #print subdata.shape sources = daofind(subdata - backmed[i], sigma*backvar[i], fwhm, sharplo=0.1, sharphi=1.5, roundlo=-2.0, roundhi=2.0) #From what I can tell, daofind returns x and y swapped, so fix it returnedx = sources['ycentroid'] returnedy = sources['xcentroid'] thisseeing = np.nan if len(sources) != 0: strongsignal= np.argmax(sources['peak']) delta[0]+=returnedx[strongsignal]-pixdist delta[1]+=returnedy[strongsignal]-pixdist #Fit with a gaussian seeingdata = subdata.flatten() - backmed[i] dist = [] for j in np.arange(subdata.shape[1])+0.5: for k in np.arange(subdata.shape[0])+0.5: dist.append(np.sqrt((returnedy[strongsignal]-k)**2. +(returnedx[strongsignal]-j)**2.)) dist=np.array(dist).flatten()#distance between new coord and pixel centers #plt.scatter(dist,seeingdata) try: #ignores error if max iterations is hit p0=[1000.,4.]#initial guesses popt,_ = curve_fit(gaussian,np.append(dist,dist*-1.),np.append(seeingdata,seeingdata),p0=p0) thisseeing = np.abs(popt[-1])*2.3548 #plt.plot(np.arange(0,10,.1),gaussian(np.arange(0,10,.1),popt[0],popt[1])) except RuntimeError: print "ERROR: gaussian fit did not converge for a star in frame "+str(i) #plt.show() else: delta=np.zeros(2) #also measure the seeing in this step: #check that unique source found ''' if len(sources) == 0: log("Frame #"+str(i),1) log("WARNING: no sources found in searched region near ({:.2f}, {:.2f}).".format(x,y)) #delta = [0,0] in this case else: if len(sources) > 1: log("Frame #"+str(i),1) log("WARNING: non-unique solution found for target near ({:.2f}, {:.2f}).".format(x,y)) log(str(len(sources))+" signals in window. Using brightest.") #Take brightest star found ''' #handle stars that were not found #Move this outside this function """ if [0,0] in delta and follow: meandeltax=np.mean(delta[np.where(delta[:,0] != 0),0]) meandeltay=np.mean(delta[np.where(delta[:,1] != 0),1]) delta[np.where(delta[:,0] == 0)] += [meandeltax,meandeltay] """ return delta[0],delta[1],thisseeing #### STAGE 2 #### #Aperture details (provide a way to change these!) apsizes=np.arange(1,11) apsizeindex=3 r_in = 16. #inner sky annulus radius #change in terms of binning eventually r_out = 24. #outer sky annulus radius #change in terms of binning eventually def setApSize(size): global apsizeindex log("Aperture size set to "+str(size)+" pixels.",1) #log("(Updates on next frame.)") if size in apsizes: apsizeindex=np.where(apsizes == size)[0][0] targs.setSize(2*size)# Currently doesn't update until next click/frame if stage > 1: updatelcs(i=framenum) compstar = 1 #which star to divide by def setCompStar(s): global compstar compstar = s log("Now dividing by comparsion star #"+str(s),1) updatelcs(framenum) #Phot results: variables to hold light curves and uncertainties photresults=np.array([]) #Run the stage 2 loop def stage2(): global stars,seeing, spe, stage, hasFooter stagechange(2) #Add plot items for raw counts panel to plot for splot in rawcounts: w7.addItem(splot) for splot in seeingplots: w9.addItem(splot) #Make stars array an array of arrays of star coord arrays (yikes) # i.e, it needs to get pushed a level deeper stars=[stars] #same with seeing seeing=np.array([seeing]) #Run photometry on the first frame dophot(0) updatelcs(i=0) updatehack() #Start timer that looks for new data timer2.start(min(exptime*1000.,5000.))# shorter of exptime and 5 sec timer3.start(1.*60*1000)#update every 1 minutes #This currently freezes up the UI. Need to thread, but not enough time #to implement this currently. Use a hack for now ''' #Run the loop: fsize_spe_old = 0 while not hasFooter: #Update only if there's new data fsize_spe_new = os.path.getsize(spefile) if fsize_spe_new > fsize_spe_old: spe = read_spe.File(spefile) numframes = spe.get_num_frames() log('Processing frames '+str(framenum)+'-'+str(numframes),1) while framenum < numframes: nextframe() if hasattr(spe, 'footer_metadata'): hasFooter = True log('SPE footer detected. Data acquisition complete.',2) stagechange(3) spe.close() fsize_spe_old = fsize_spe_new ''' fsize_spe_old = 0#Keep track if new spe file is larger that old one def updatehack(): global spe, hasFooter, numframes,fsize_spe_old #Only look for new data if not currently processing new data if not timer.isActive(): #Update only if there's new data fsize_spe_new = os.path.getsize(spefile) if fsize_spe_new > fsize_spe_old and stage ==2: spe = read_spe.File(spefile) numframes = spe.get_num_frames() if framenum+1==numframes-1:log('Processing frame '+str(framenum+1)) else: log('Processing frames '+str(framenum+1)+'-'+str(numframes-1),1) timer.start(100) #Update plots updatelcs(i=framenum) if hasattr(spe, 'footer_metadata'): hasFooter = True timer3.stop() fsize_spe_old = fsize_spe_new def nextframehack(): #call nextframe until you're caught up global framenum,spe nextframe() updatelcs(i=framenum) if framenum >= numframes-1: timer.stop() updateft(i=framenum) if hasFooter: log('SPE footer detected. Data acquisition complete.',2) stagechange(3) log("Image processing complete",2) writetimestamps() displayFrame(autoscale=True) spe.close() #This timer catches up on photometry timer = pg.QtCore.QTimer()#set up timer to avoid while loop timer.timeout.connect(nextframehack) #This timer checks for new data timer2 = pg.QtCore.QTimer() timer2.timeout.connect(updatehack) #For demo purposes, read in the next frame of the spe file each time this is called def nextframe(): global stars, seeing #if stage == 2: oldcoords = stars[framenum] processframe(i=framenum+1) #Frame num increases here. newcoords=[] newseeing=[] for coord in oldcoords: dx,dy,thisseeing = improvecoords(coord[0],coord[1],i=framenum) newcoords.append([np.floor(coord[0])+.5+dx,np.floor(coord[1])+.5+dy]) newseeing.append(thisseeing) stars.append(newcoords) seeing = np.append(seeing,[newseeing],axis=0) #Show the frame displayFrame(autoscale=True,markstars=True) #Perform photometry dophot(i=framenum) #Update light curves #updatelcs(i=framenum) #only after all the new photometry is done. def dophot(i): '''Do photometric measurements. Stars have been selected. Do aperture photometry on given frame ''' global photresults #print "dophot(i) called with i="+str(i) #Do the aperture photometry #The aperture_photometry() function can do many stars at once #But you must first do a background subtraction #We're going to save a lot of information in this step: #Total counts and uncertainty for every aperture size for every star #And eventually for every frame... #Note that the photometry package seems to reference x and y coords #as the tranpose of what we've been using. Switch the order here: coords = [star[::-1] for star in stars[i]] thisphotometry = np.zeros((len(coords),len(apsizes))) for n in range(numstars): #Loop through the stars in the image #annulus_aperture = CircularAnnulus(coords[n], r_in=r_in, r_out=r_out) #print aperture_photometry(img[i],annulus_aperture).keys() #background_mean = aperture_photometry(img[i],annulus_aperture)['aperture_sum'][0]/annulus_aperture.area() #NOTE on the above line: This should really be a median! #Issue 161 on photutils https://github.com/astropy/photutils/issues/161 is open as of 09/28/15 gain = 12.63 #? From PI Certificate of Performance for "traditional 5MHz gain." Confirm this value! #loop through aperture sizes for j,size in enumerate(apsizes): aperture = CircularAperture(np.array(coords[n]), r=size) #phot = aperture_photometry(x-background_mean,aperture,error=backgroundvar,gain=gain) #Why am I getting negative numbers? #phot = aperture_photometry(img[i]-np.median(img),aperture) phot = aperture_photometry(img-backmed[i],aperture) thisphotometry[n,j]=phot['aperture_sum'][0] #print "photometry ",thisphotometry if i == 0: photresults = np.array([thisphotometry]) else: #print "photresults dimensions are "+str(photresults.shape) #print "trying to append shape "+str(thisphotometry.shape) photresults = np.append(photresults,[thisphotometry],axis=0) #print "photresults dimensions are "+str(photresults.shape) #yay. This deserves to all be checked very carefully, especially since the gain only affects uncertainty and not overall counts. #Allow different kernel types: kerneltypes = ['Uniform','Epanechnikov'] #set up a dialog to change the kernel details: class KernelDialog(QtGui.QDialog): def __init__(self, parent=None): super(KernelDialog, self).__init__(parent) typeLabel = QtGui.QLabel("Kernel &type") self.typeEdit = QtGui.QComboBox() self.typeEdit.addItems(kerneltypes) #self.typeEdit.setCurrentIndex(currentind) typeLabel.setBuddy(self.typeEdit) widthLabel = QtGui.QLabel("Kernel &width") self.widthEdit = QtGui.QSpinBox() self.widthEdit.setMinimum(2) self.widthEdit.setMaximum(200) #self.widthEdit.setValue(currentwidth) widthLabel.setBuddy(self.widthEdit) self.buttons = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel, QtCore.Qt.Horizontal, self) self.buttons.accepted.connect(self.accept) self.buttons.rejected.connect(self.reject) grid = QtGui.QGridLayout(self) grid.addWidget(typeLabel,0,0) grid.addWidget(self.typeEdit,0,1) grid.addWidget(widthLabel,1,0) grid.addWidget(self.widthEdit,1,1) grid.addWidget(self.buttons, 3, 0) self.setLayout(grid) self.setWindowTitle("Define Smoothing Kernel") def kernelFormat(self): kerneltype=int(self.typeEdit.currentIndex()) width=int(self.widthEdit.value()) return (kerneltype,width) @staticmethod def getKernelFormat(parent = None): dialog = KernelDialog(parent) result = dialog.exec_() kerneltype,width = dialog.kernelFormat() return (kerneltype,width, result == QtGui.QDialog.Accepted) #set up a class that holds all the smoothing kernel information class smoothingkernel: """Holds all smoothing kernel info""" kerneltype = 0 width = 10 #points kernel=[] types = kerneltypes def setkernel(self,kerneltype,width): if kerneltype == 1: #Epanechnikov u=(2.*np.arange(width)/(float(width)-1.))-0.5 self.kernel = 0.75*(1.-u**2.) self.kernel /= np.sum(self.kernel) log("Using "+self.types[kerneltype]+" smoothing kernel of width "+str(width)) elif kerneltype == 0: #Uniform self.kernel = np.ones(width)/float(width) log("Using "+self.types[kerneltype]+" smoothing kernel of width "+str(width)) def openKernelDialog(self): dkerneltype,dwidth,daccepted = KernelDialog.getKernelFormat() if daccepted and (dkerneltype in range(len(kerneltypes))) and (dwidth > 1): self.setkernel(dkerneltype,dwidth) def __init__(self): self.setkernel(0,10) #set up the kernel object kernel=smoothingkernel() #Update display. def updatelcs(i): #Identify which points to include/exclude, up to frame i goodmask=np.ones(i+1, np.bool) goodmask[bad] = False badmask = np.zeros(i+1, np.bool) badmask[bad] = True targdivided = photresults[:i+1,0,apsizeindex]/photresults[:i+1,compstar,apsizeindex] times = np.arange(i+1)#Multiply by exptime for timestamps goodfluxnorm=targdivided[goodmask[:i+1]]/np.abs(np.mean(targdivided[goodmask[:i+1]])) s1.setData(exptime*times[goodmask[:i+1]],goodfluxnorm) #s2.setData(times[badmask[:i]],targdivided[badmask[:i]]) l1.setData(exptime*times[goodmask[:i+1]],goodfluxnorm) #sl1.setData(times[goodmask[:i]],fluxsmoothed[goodmask[:i]]) #Raw Counts: for j,splot in enumerate(rawcounts): splot.setData(exptime*times,photresults[:,j,apsizeindex]) #Seeing: for j,splot in enumerate(seeingplots[::-1]): splot.setData(exptime*times,seeing[:,j]) #Sky brightness sky.setData(exptime*times,backmed) def updateftfromtimer(): updateft(i=framenum) def updateft(i=framenum): oversample=10. #Oversampling factor goodmask=np.ones(i+1, np.bool) goodmask[bad] = False targdivided = photresults[:i+1,0,apsizeindex]/photresults[:i+1,compstar,apsizeindex] goodfluxnorm=targdivided[goodmask[:i+1]]/np.abs(np.mean(targdivided[goodmask[:i+1]])) times = np.arange(i+1)#Multiply by exptime for timestamps #Fourier Transform and smoothed lc if goodmask.sum() > 2: #This all requires at least two points #Only update once per file read-in interped = interp1d(exptime*times[goodmask[:i+1]],goodfluxnorm-1.) xnew = np.arange(exptime*min(times[goodmask[:i]]),exptime*max(times[goodmask[:i+1]]),exptime) ynew = interped(xnew) #calculate FT amp = 2.*np.abs(fft(ynew,n=len(ynew)*oversample))#FFT amp /= float(len(ynew)) freq = fftfreq(len(amp),d=exptime) pos = freq>=0 # keep positive part ft.setData(1e6*freq[pos],1e3*amp[pos]) #Smoothed LC #Update if there are enough points: if len(ynew) > kernel.width: fluxsmoothed=np.convolve(ynew,kernel.kernel,mode='same') ss1.setData(xnew,fluxsmoothed) #This timer recomputes the FT and smoothed lc infrequently timer3 = pg.QtCore.QTimer() timer3.timeout.connect(updateftfromtimer) ''' Not implemented yet! #To keep the GUI from locking up, computationally intensive processes must #be done in a thread. Set up that thread here: class Stage2Thread(QtCore.QThread): setTime = QtCore.pyqtSignal(int,int) iteration = QtCore.pyqtSignal(threading.Event, int) def run(self): self.setTime.emit(0,300) for i in range(300): time.sleep(0.05) event = threading.Event() self.iteration.emit(event, i) event.wait() ''' #Write timestamps def writetimestamps(): fpath_csv = os.path.splitext(spefile)[0]+'_timestamps.csv' log("Writing absolute timestamps to file "+fpath_csv,2) if hasattr(spe, 'footer_metadata'): footer_metadata = BeautifulSoup(spe.footer_metadata, "xml") trigger_response = footer_metadata.find(name='TriggerResponse').text ts_begin = footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['absoluteTime'] dt_begin = dateutil.parser.parse(ts_begin) ticks_per_second = int(footer_metadata.find(name='TimeStamp', event='ExposureStarted').attrs['resolution']) else: log(("No XML footer metadata.\n" + "Unknown trigger response.\n" + "Using file creation time as absolute timestamp.\n" + "Assuming 1E6 ticks per seconds."),3) trigger_response = "" dt_begin = dt.datetime.utcfromtimestamp(os.path.getctime(fpath_spe)) ticks_per_second = 1E6 idx_metadata_map = {} for idx in xrange(spe.get_num_frames()): (frame, metadata) = spe.get_frame(idx) idx_metadata_map[idx] = metadata df_metadata = pd.DataFrame.from_dict(idx_metadata_map, orient='index') df_metadata = df_metadata.set_index(keys='frame_tracking_number') df_metadata = df_metadata[['time_stamp_exposure_started', 'time_stamp_exposure_ended']].applymap(lambda x: x / ticks_per_second) df_metadata = df_metadata[['time_stamp_exposure_started', 'time_stamp_exposure_ended']].applymap(lambda x : dt_begin + dt.timedelta(seconds=x)) df_metadata[['diff_time_stamp_exposure_started', 'diff_time_stamp_exposure_ended']] = df_metadata - df_metadata.shift() log("Trigger response = {tr}".format(tr=trigger_response)) log("Absolute timestamp = {dt_begin}".format(dt_begin=dt_begin)) log("Ticks per second = {tps}".format(tps=ticks_per_second)) df_metadata.head() # Write out as CSV to source directory of SPE file. df_metadata.to_csv(fpath_csv, quoting=csv.QUOTE_NONNUMERIC) saveScreenshot() ## Start Qt event loop unless running in interactive mode or using pyside. if __name__ == '__main__': if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): if len(sys.argv) > 1: defaultdir = sys.argv[1] QtGui.QApplication.instance().exec_()
mit
rajathkumarmp/numpy
numpy/fft/fftpack.py
72
45497
""" Discrete Fourier Transforms Routines in this module: fft(a, n=None, axis=-1) ifft(a, n=None, axis=-1) rfft(a, n=None, axis=-1) irfft(a, n=None, axis=-1) hfft(a, n=None, axis=-1) ihfft(a, n=None, axis=-1) fftn(a, s=None, axes=None) ifftn(a, s=None, axes=None) rfftn(a, s=None, axes=None) irfftn(a, s=None, axes=None) fft2(a, s=None, axes=(-2,-1)) ifft2(a, s=None, axes=(-2, -1)) rfft2(a, s=None, axes=(-2,-1)) irfft2(a, s=None, axes=(-2, -1)) i = inverse transform r = transform of purely real data h = Hermite transform n = n-dimensional transform 2 = 2-dimensional transform (Note: 2D routines are just nD routines with different default behavior.) The underlying code for these functions is an f2c-translated and modified version of the FFTPACK routines. """ from __future__ import division, absolute_import, print_function __all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn'] from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate, take, sqrt) from . import fftpack_lite as fftpack _fft_cache = {} _real_fft_cache = {} def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, work_function=fftpack.cfftf, fft_cache=_fft_cache): a = asarray(a) if n is None: n = a.shape[axis] if n < 1: raise ValueError("Invalid number of FFT data points (%d) specified." % n) try: # Thread-safety note: We rely on list.pop() here to atomically # retrieve-and-remove a wsave from the cache. This ensures that no # other thread can get the same wsave while we're using it. wsave = fft_cache.setdefault(n, []).pop() except (IndexError): wsave = init_function(n) if a.shape[axis] != n: s = list(a.shape) if s[axis] > n: index = [slice(None)]*len(s) index[axis] = slice(0, n) a = a[index] else: index = [slice(None)]*len(s) index[axis] = slice(0, s[axis]) s[axis] = n z = zeros(s, a.dtype.char) z[index] = a a = z if axis != -1: a = swapaxes(a, axis, -1) r = work_function(a, wsave) if axis != -1: r = swapaxes(r, axis, -1) # As soon as we put wsave back into the cache, another thread could pick it # up and start using it, so we must not do this until after we're # completely done using it ourselves. fft_cache[n].append(wsave) return r def _unitary(norm): if norm not in (None, "ortho"): raise ValueError("Invalid norm value %s, should be None or \"ortho\"." % norm) return norm is not None def fft(a, n=None, axis=-1, norm=None): """ Compute the one-dimensional discrete Fourier Transform. This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) with the efficient Fast Fourier Transform (FFT) algorithm [CT]. Parameters ---------- a : array_like Input array, can be complex. n : int, optional Length of the transformed axis of the output. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. Raises ------ IndexError if `axes` is larger than the last axis of `a`. See Also -------- numpy.fft : for definition of the DFT and conventions used. ifft : The inverse of `fft`. fft2 : The two-dimensional FFT. fftn : The *n*-dimensional FFT. rfftn : The *n*-dimensional FFT of real input. fftfreq : Frequency bins for given FFT parameters. Notes ----- FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform (DFT) can be calculated efficiently, by using symmetries in the calculated terms. The symmetry is highest when `n` is a power of 2, and the transform is therefore most efficient for these sizes. The DFT is defined, with the conventions used in this implementation, in the documentation for the `numpy.fft` module. References ---------- .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the machine calculation of complex Fourier series," *Math. Comput.* 19: 297-301. Examples -------- >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) array([ -3.44505240e-16 +1.14383329e-17j, 8.00000000e+00 -5.71092652e-15j, 2.33482938e-16 +1.22460635e-16j, 1.64863782e-15 +1.77635684e-15j, 9.95839695e-17 +2.33482938e-16j, 0.00000000e+00 +1.66837030e-15j, 1.14383329e-17 +1.22460635e-16j, -1.64863782e-15 +1.77635684e-15j]) >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) >>> freq = np.fft.fftfreq(t.shape[-1]) >>> plt.plot(freq, sp.real, freq, sp.imag) [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>] >>> plt.show() In this example, real input has an FFT which is Hermitian, i.e., symmetric in the real part and anti-symmetric in the imaginary part, as described in the `numpy.fft` documentation. """ a = asarray(a).astype(complex) if n is None: n = a.shape[axis] output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache) if _unitary(norm): output *= 1 / sqrt(n) return output def ifft(a, n=None, axis=-1, norm=None): """ Compute the one-dimensional inverse discrete Fourier Transform. This function computes the inverse of the one-dimensional *n*-point discrete Fourier transform computed by `fft`. In other words, ``ifft(fft(a)) == a`` to within numerical accuracy. For a general description of the algorithm and definitions, see `numpy.fft`. The input should be ordered in the same way as is returned by `fft`, i.e., ``a[0]`` should contain the zero frequency term, ``a[1:n/2+1]`` should contain the positive-frequency terms, and ``a[n/2+1:]`` should contain the negative-frequency terms, in order of decreasingly negative frequency. See `numpy.fft` for details. Parameters ---------- a : array_like Input array, can be complex. n : int, optional Length of the transformed axis of the output. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. See notes about padding issues. axis : int, optional Axis over which to compute the inverse DFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. Raises ------ IndexError If `axes` is larger than the last axis of `a`. See Also -------- numpy.fft : An introduction, with definitions and general explanations. fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse ifft2 : The two-dimensional inverse FFT. ifftn : The n-dimensional inverse FFT. Notes ----- If the input parameter `n` is larger than the size of the input, the input is padded by appending zeros at the end. Even though this is the common approach, it might lead to surprising results. If a different padding is desired, it must be performed before calling `ifft`. Examples -------- >>> np.fft.ifft([0, 4, 0, 0]) array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) Create and plot a band-limited signal with random phases: >>> import matplotlib.pyplot as plt >>> t = np.arange(400) >>> n = np.zeros((400,), dtype=complex) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') [<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>] >>> plt.legend(('real', 'imaginary')) <matplotlib.legend.Legend object at 0x...> >>> plt.show() """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=complex) if n is None: n = a.shape[axis] unitary = _unitary(norm) output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) return output * (1 / (sqrt(n) if unitary else n)) def rfft(a, n=None, axis=-1, norm=None): """ Compute the one-dimensional discrete Fourier Transform for real input. This function computes the one-dimensional *n*-point discrete Fourier Transform (DFT) of a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). Parameters ---------- a : array_like Input array n : int, optional Number of points along transformation axis in the input to use. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. If `n` is even, the length of the transformed axis is ``(n/2)+1``. If `n` is odd, the length is ``(n+1)/2``. Raises ------ IndexError If `axis` is larger than the last axis of `a`. See Also -------- numpy.fft : For definition of the DFT and conventions used. irfft : The inverse of `rfft`. fft : The one-dimensional FFT of general (complex) input. fftn : The *n*-dimensional FFT. rfftn : The *n*-dimensional FFT of real input. Notes ----- When the DFT is computed for purely real input, the output is Hermitian-symmetric, i.e. the negative frequency terms are just the complex conjugates of the corresponding positive-frequency terms, and the negative-frequency terms are therefore redundant. This function does not compute the negative frequency terms, and the length of the transformed axis of the output is therefore ``n//2 + 1``. When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains the zero-frequency term 0*fs, which is real due to Hermitian symmetry. If `n` is even, ``A[-1]`` contains the term representing both positive and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains the largest positive frequency (fs/2*(n-1)/n), and is complex in the general case. If the input `a` contains an imaginary part, it is silently discarded. Examples -------- >>> np.fft.fft([0, 1, 0, 0]) array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) >>> np.fft.rfft([0, 1, 0, 0]) array([ 1.+0.j, 0.-1.j, -1.+0.j]) Notice how the final element of the `fft` output is the complex conjugate of the second element, for real input. For `rfft`, this symmetry is exploited to compute only the non-negative frequency terms. """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=float) output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache) if _unitary(norm): output *= 1 / sqrt(a.shape[axis]) return output def irfft(a, n=None, axis=-1, norm=None): """ Compute the inverse of the n-point DFT for real input. This function computes the inverse of the one-dimensional *n*-point discrete Fourier Transform of real input computed by `rfft`. In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical accuracy. (See Notes below for why ``len(a)`` is necessary here.) The input is expected to be in the form returned by `rfft`, i.e. the real zero-frequency term followed by the complex positive frequency terms in order of increasing frequency. Since the discrete Fourier Transform of real input is Hermitian-symmetric, the negative frequency terms are taken to be the complex conjugates of the corresponding positive frequency terms. Parameters ---------- a : array_like The input array. n : int, optional Length of the transformed axis of the output. For `n` output points, ``n//2+1`` input points are necessary. If the input is longer than this, it is cropped. If it is shorter than this, it is padded with zeros. If `n` is not given, it is determined from the length of the input along the axis specified by `axis`. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. The length of the transformed axis is `n`, or, if `n` is not given, ``2*(m-1)`` where ``m`` is the length of the transformed axis of the input. To get an odd number of output points, `n` must be specified. Raises ------ IndexError If `axis` is larger than the last axis of `a`. See Also -------- numpy.fft : For definition of the DFT and conventions used. rfft : The one-dimensional FFT of real input, of which `irfft` is inverse. fft : The one-dimensional FFT. irfft2 : The inverse of the two-dimensional FFT of real input. irfftn : The inverse of the *n*-dimensional FFT of real input. Notes ----- Returns the real valued `n`-point inverse discrete Fourier transform of `a`, where `a` contains the non-negative frequency terms of a Hermitian-symmetric sequence. `n` is the length of the result, not the input. If you specify an `n` such that `a` must be zero-padded or truncated, the extra/removed values will be added/removed at high frequencies. One can thus resample a series to `m` points via Fourier interpolation by: ``a_resamp = irfft(rfft(a), m)``. Examples -------- >>> np.fft.ifft([1, -1j, -1, 1j]) array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) >>> np.fft.irfft([1, -1j, -1]) array([ 0., 1., 0., 0.]) Notice how the last term in the input to the ordinary `ifft` is the complex conjugate of the second term, and the output has zero imaginary part everywhere. When calling `irfft`, the negative frequencies are not specified, and the output array is purely real. """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=complex) if n is None: n = (a.shape[axis] - 1) * 2 unitary = _unitary(norm) output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb, _real_fft_cache) return output * (1 / (sqrt(n) if unitary else n)) def hfft(a, n=None, axis=-1, norm=None): """ Compute the FFT of a signal which has Hermitian symmetry (real spectrum). Parameters ---------- a : array_like The input array. n : int, optional Length of the transformed axis of the output. For `n` output points, ``n//2+1`` input points are necessary. If the input is longer than this, it is cropped. If it is shorter than this, it is padded with zeros. If `n` is not given, it is determined from the length of the input along the axis specified by `axis`. axis : int, optional Axis over which to compute the FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. The length of the transformed axis is `n`, or, if `n` is not given, ``2*(m-1)`` where ``m`` is the length of the transformed axis of the input. To get an odd number of output points, `n` must be specified. Raises ------ IndexError If `axis` is larger than the last axis of `a`. See also -------- rfft : Compute the one-dimensional FFT for real input. ihfft : The inverse of `hfft`. Notes ----- `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the opposite case: here the signal has Hermitian symmetry in the time domain and is real in the frequency domain. So here it's `hfft` for which you must supply the length of the result if it is to be odd: ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. Examples -------- >>> signal = np.array([1, 2, 3, 4, 3, 2]) >>> np.fft.fft(signal) array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) >>> np.fft.hfft(signal[:4]) # Input first half of signal array([ 15., -4., 0., -1., 0., -4.]) >>> np.fft.hfft(signal, 6) # Input entire signal and truncate array([ 15., -4., 0., -1., 0., -4.]) >>> signal = np.array([[1, 1.j], [-1.j, 2]]) >>> np.conj(signal.T) - signal # check Hermitian symmetry array([[ 0.-0.j, 0.+0.j], [ 0.+0.j, 0.-0.j]]) >>> freq_spectrum = np.fft.hfft(signal) >>> freq_spectrum array([[ 1., 1.], [ 2., -2.]]) """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=complex) if n is None: n = (a.shape[axis] - 1) * 2 unitary = _unitary(norm) return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n) def ihfft(a, n=None, axis=-1, norm=None): """ Compute the inverse FFT of a signal which has Hermitian symmetry. Parameters ---------- a : array_like Input array. n : int, optional Length of the inverse FFT. Number of points along transformation axis in the input to use. If `n` is smaller than the length of the input, the input is cropped. If it is larger, the input is padded with zeros. If `n` is not given, the length of the input along the axis specified by `axis` is used. axis : int, optional Axis over which to compute the inverse FFT. If not given, the last axis is used. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axis indicated by `axis`, or the last one if `axis` is not specified. If `n` is even, the length of the transformed axis is ``(n/2)+1``. If `n` is odd, the length is ``(n+1)/2``. See also -------- hfft, irfft Notes ----- `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the opposite case: here the signal has Hermitian symmetry in the time domain and is real in the frequency domain. So here it's `hfft` for which you must supply the length of the result if it is to be odd: ``ihfft(hfft(a), len(a)) == a``, within numerical accuracy. Examples -------- >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j]) >>> np.fft.ihfft(spectrum) array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=float) if n is None: n = a.shape[axis] unitary = _unitary(norm) output = conjugate(rfft(a, n, axis)) return output * (1 / (sqrt(n) if unitary else n)) def _cook_nd_args(a, s=None, axes=None, invreal=0): if s is None: shapeless = 1 if axes is None: s = list(a.shape) else: s = take(a.shape, axes) else: shapeless = 0 s = list(s) if axes is None: axes = list(range(-len(s), 0)) if len(s) != len(axes): raise ValueError("Shape and axes have different lengths.") if invreal and shapeless: s[-1] = (a.shape[axes[-1]] - 1) * 2 return s, axes def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None): a = asarray(a) s, axes = _cook_nd_args(a, s, axes) itl = list(range(len(axes))) itl.reverse() for ii in itl: a = function(a, n=s[ii], axis=axes[ii], norm=norm) return a def fftn(a, s=None, axes=None, norm=None): """ Compute the N-dimensional discrete Fourier Transform. This function computes the *N*-dimensional discrete Fourier Transform over any number of axes in an *M*-dimensional array by means of the Fast Fourier Transform (FFT). Parameters ---------- a : array_like Input array, can be complex. s : sequence of ints, optional Shape (length of each transformed axis) of the output (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). This corresponds to `n` for `fft(x, n)`. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. axes : sequence of ints, optional Axes over which to compute the FFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. Repeated indices in `axes` means that the transform over that axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` and `a`, as explained in the parameters section above. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- numpy.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT. fft : The one-dimensional FFT, with definitions and conventions used. rfftn : The *n*-dimensional FFT of real input. fft2 : The two-dimensional FFT. fftshift : Shifts zero-frequency terms to centre of array Notes ----- The output, analogously to `fft`, contains the term for zero frequency in the low-order corner of all axes, the positive frequency terms in the first half of all axes, the term for the Nyquist frequency in the middle of all axes and the negative frequency terms in the second half of all axes, in order of decreasingly negative frequency. See `numpy.fft` for details, definitions and conventions used. Examples -------- >>> a = np.mgrid[:3, :3, :3][0] >>> np.fft.fftn(a, axes=(1, 2)) array([[[ 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j]], [[ 9.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j]], [[ 18.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j]]]) >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) array([[[ 2.+0.j, 2.+0.j, 2.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j]], [[-2.+0.j, -2.+0.j, -2.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j]]]) >>> import matplotlib.pyplot as plt >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12, ... 2 * np.pi * np.arange(200) / 34) >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape) >>> FS = np.fft.fftn(S) >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2)) <matplotlib.image.AxesImage object at 0x...> >>> plt.show() """ return _raw_fftnd(a, s, axes, fft, norm) def ifftn(a, s=None, axes=None, norm=None): """ Compute the N-dimensional inverse discrete Fourier Transform. This function computes the inverse of the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, ``ifftn(fftn(a)) == a`` to within numerical accuracy. For a description of the definitions and conventions used, see `numpy.fft`. The input, analogously to `ifft`, should be ordered in the same way as is returned by `fftn`, i.e. it should have the term for zero frequency in all axes in the low-order corner, the positive frequency terms in the first half of all axes, the term for the Nyquist frequency in the middle of all axes and the negative frequency terms in the second half of all axes, in order of decreasingly negative frequency. Parameters ---------- a : array_like Input array, can be complex. s : sequence of ints, optional Shape (length of each transformed axis) of the output (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). This corresponds to ``n`` for ``ifft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. See notes for issue on `ifft` zero padding. axes : sequence of ints, optional Axes over which to compute the IFFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. Repeated indices in `axes` means that the inverse transform over that axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` or `a`, as explained in the parameters section above. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- numpy.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse. ifft : The one-dimensional inverse FFT. ifft2 : The two-dimensional inverse FFT. ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning of array. Notes ----- See `numpy.fft` for definitions and conventions used. Zero-padding, analogously with `ifft`, is performed by appending zeros to the input along the specified dimension. Although this is the common approach, it might lead to surprising results. If another form of zero padding is desired, it must be performed before `ifftn` is called. Examples -------- >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) Create and plot an image with band-limited frequency content: >>> import matplotlib.pyplot as plt >>> n = np.zeros((200,200), dtype=complex) >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20))) >>> im = np.fft.ifftn(n).real >>> plt.imshow(im) <matplotlib.image.AxesImage object at 0x...> >>> plt.show() """ return _raw_fftnd(a, s, axes, ifft, norm) def fft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional discrete Fourier Transform This function computes the *n*-dimensional discrete Fourier Transform over any axes in an *M*-dimensional array by means of the Fast Fourier Transform (FFT). By default, the transform is computed over the last two axes of the input array, i.e., a 2-dimensional FFT. Parameters ---------- a : array_like Input array, can be complex s : sequence of ints, optional Shape (length of each transformed axis) of the output (`s[0]` refers to axis 0, `s[1]` to axis 1, etc.). This corresponds to `n` for `fft(x, n)`. Along each axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. axes : sequence of ints, optional Axes over which to compute the FFT. If not given, the last two axes are used. A repeated index in `axes` means the transform over that axis is performed multiple times. A one-element sequence means that a one-dimensional FFT is performed. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or the last two axes if `axes` is not given. Raises ------ ValueError If `s` and `axes` have different length, or `axes` not given and ``len(s) != 2``. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- numpy.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. ifft2 : The inverse two-dimensional FFT. fft : The one-dimensional FFT. fftn : The *n*-dimensional FFT. fftshift : Shifts zero-frequency terms to the center of the array. For two-dimensional input, swaps first and third quadrants, and second and fourth quadrants. Notes ----- `fft2` is just `fftn` with a different default for `axes`. The output, analogously to `fft`, contains the term for zero frequency in the low-order corner of the transformed axes, the positive frequency terms in the first half of these axes, the term for the Nyquist frequency in the middle of the axes and the negative frequency terms in the second half of the axes, in order of decreasingly negative frequency. See `fftn` for details and a plotting example, and `numpy.fft` for definitions and conventions used. Examples -------- >>> a = np.mgrid[:5, :5][0] >>> np.fft.fft2(a) array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j ], [-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j ], [-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j ], [-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j ], [-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j , 0.0 +0.j , 0.0 +0.j ]]) """ return _raw_fftnd(a, s, axes, fft, norm) def ifft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional inverse discrete Fourier Transform. This function computes the inverse of the 2-dimensional discrete Fourier Transform over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a`` to within numerical accuracy. By default, the inverse transform is computed over the last two axes of the input array. The input, analogously to `ifft`, should be ordered in the same way as is returned by `fft2`, i.e. it should have the term for zero frequency in the low-order corner of the two axes, the positive frequency terms in the first half of these axes, the term for the Nyquist frequency in the middle of the axes and the negative frequency terms in the second half of both axes, in order of decreasingly negative frequency. Parameters ---------- a : array_like Input array, can be complex. s : sequence of ints, optional Shape (length of each axis) of the output (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``. Along each axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. See notes for issue on `ifft` zero padding. axes : sequence of ints, optional Axes over which to compute the FFT. If not given, the last two axes are used. A repeated index in `axes` means the transform over that axis is performed multiple times. A one-element sequence means that a one-dimensional FFT is performed. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or the last two axes if `axes` is not given. Raises ------ ValueError If `s` and `axes` have different length, or `axes` not given and ``len(s) != 2``. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- numpy.fft : Overall view of discrete Fourier transforms, with definitions and conventions used. fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse. ifftn : The inverse of the *n*-dimensional FFT. fft : The one-dimensional FFT. ifft : The one-dimensional inverse FFT. Notes ----- `ifft2` is just `ifftn` with a different default for `axes`. See `ifftn` for details and a plotting example, and `numpy.fft` for definition and conventions used. Zero-padding, analogously with `ifft`, is performed by appending zeros to the input along the specified dimension. Although this is the common approach, it might lead to surprising results. If another form of zero padding is desired, it must be performed before `ifft2` is called. Examples -------- >>> a = 4 * np.eye(4) >>> np.fft.ifft2(a) array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) """ return _raw_fftnd(a, s, axes, ifft, norm) def rfftn(a, s=None, axes=None, norm=None): """ Compute the N-dimensional discrete Fourier Transform for real input. This function computes the N-dimensional discrete Fourier Transform over any number of axes in an M-dimensional real array by means of the Fast Fourier Transform (FFT). By default, all axes are transformed, with the real transform performed over the last axis, while the remaining transforms are complex. Parameters ---------- a : array_like Input array, taken to be real. s : sequence of ints, optional Shape (length along each transformed axis) to use from the input. (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). The final element of `s` corresponds to `n` for ``rfft(x, n)``, while for the remaining axes, it corresponds to `n` for ``fft(x, n)``. Along any axis, if the given shape is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. if `s` is not given, the shape of the input along the axes specified by `axes` is used. axes : sequence of ints, optional Axes over which to compute the FFT. If not given, the last ``len(s)`` axes are used, or all axes if `s` is also not specified. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : complex ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` and `a`, as explained in the parameters section above. The length of the last axis transformed will be ``s[-1]//2+1``, while the remaining transformed axes will have lengths according to `s`, or unchanged from the input. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT of real input. fft : The one-dimensional FFT, with definitions and conventions used. rfft : The one-dimensional FFT of real input. fftn : The n-dimensional FFT. rfft2 : The two-dimensional FFT of real input. Notes ----- The transform for real input is performed over the last transformation axis, as by `rfft`, then the transform over the remaining axes is performed as by `fftn`. The order of the output is as for `rfft` for the final transformation axis, and as for `fftn` for the remaining transformation axes. See `fft` for details, definitions and conventions used. Examples -------- >>> a = np.ones((2, 2, 2)) >>> np.fft.rfftn(a) array([[[ 8.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j]], [[ 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j]]]) >>> np.fft.rfftn(a, axes=(2, 0)) array([[[ 4.+0.j, 0.+0.j], [ 4.+0.j, 0.+0.j]], [[ 0.+0.j, 0.+0.j], [ 0.+0.j, 0.+0.j]]]) """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=float) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm) for ii in range(len(axes)-1): a = fft(a, s[ii], axes[ii], norm) return a def rfft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional FFT of a real array. Parameters ---------- a : array Input array, taken to be real. s : sequence of ints, optional Shape of the FFT. axes : sequence of ints, optional Axes over which to compute the FFT. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The result of the real 2-D FFT. See Also -------- rfftn : Compute the N-dimensional discrete Fourier Transform for real input. Notes ----- This is really just `rfftn` with different default behavior. For more details see `rfftn`. """ return rfftn(a, s, axes, norm) def irfftn(a, s=None, axes=None, norm=None): """ Compute the inverse of the N-dimensional FFT of real input. This function computes the inverse of the N-dimensional discrete Fourier Transform for real input over any number of axes in an M-dimensional array by means of the Fast Fourier Transform (FFT). In other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`, and for the same reason.) The input should be ordered in the same way as is returned by `rfftn`, i.e. as for `irfft` for the final transformation axis, and as for `ifftn` along all the other axes. Parameters ---------- a : array_like Input array. s : sequence of ints, optional Shape (length of each transformed axis) of the output (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the number of input points used along this axis, except for the last axis, where ``s[-1]//2+1`` points of the input are used. Along any axis, if the shape indicated by `s` is smaller than that of the input, the input is cropped. If it is larger, the input is padded with zeros. If `s` is not given, the shape of the input along the axes specified by `axes` is used. axes : sequence of ints, optional Axes over which to compute the inverse FFT. If not given, the last `len(s)` axes are used, or all axes if `s` is also not specified. Repeated indices in `axes` means that the inverse transform over that axis is performed multiple times. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The truncated or zero-padded input, transformed along the axes indicated by `axes`, or by a combination of `s` or `a`, as explained in the parameters section above. The length of each transformed axis is as given by the corresponding element of `s`, or the length of the input in every axis except for the last one if `s` is not given. In the final transformed axis the length of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the length of the final transformed axis of the input. To get an odd number of output points in the final axis, `s` must be specified. Raises ------ ValueError If `s` and `axes` have different length. IndexError If an element of `axes` is larger than than the number of axes of `a`. See Also -------- rfftn : The forward n-dimensional FFT of real input, of which `ifftn` is the inverse. fft : The one-dimensional FFT, with definitions and conventions used. irfft : The inverse of the one-dimensional FFT of real input. irfft2 : The inverse of the two-dimensional FFT of real input. Notes ----- See `fft` for definitions and conventions used. See `rfft` for definitions and conventions used for real input. Examples -------- >>> a = np.zeros((3, 2, 2)) >>> a[0, 0, 0] = 3 * 2 * 2 >>> np.fft.irfftn(a) array([[[ 1., 1.], [ 1., 1.]], [[ 1., 1.], [ 1., 1.]], [[ 1., 1.], [ 1., 1.]]]) """ # The copy may be required for multithreading. a = array(a, copy=True, dtype=complex) s, axes = _cook_nd_args(a, s, axes, invreal=1) for ii in range(len(axes)-1): a = ifft(a, s[ii], axes[ii], norm) a = irfft(a, s[-1], axes[-1], norm) return a def irfft2(a, s=None, axes=(-2, -1), norm=None): """ Compute the 2-dimensional inverse FFT of a real array. Parameters ---------- a : array_like The input array s : sequence of ints, optional Shape of the inverse FFT. axes : sequence of ints, optional The axes over which to compute the inverse fft. Default is the last two axes. norm : {None, "ortho"}, optional .. versionadded:: 1.10.0 Normalization mode (see `numpy.fft`). Default is None. Returns ------- out : ndarray The result of the inverse real 2-D FFT. See Also -------- irfftn : Compute the inverse of the N-dimensional FFT of real input. Notes ----- This is really `irfftn` with different defaults. For more details see `irfftn`. """ return irfftn(a, s, axes, norm)
bsd-3-clause
cactusbin/nyt
matplotlib/examples/axes_grid/demo_curvelinear_grid2.py
15
1839
import numpy as np #from matplotlib.path import Path import matplotlib.pyplot as plt from mpl_toolkits.axes_grid.grid_helper_curvelinear import GridHelperCurveLinear from mpl_toolkits.axes_grid.axislines import Subplot import mpl_toolkits.axes_grid.angle_helper as angle_helper def curvelinear_test1(fig): """ grid for custom transform. """ def tr(x, y): sgn = np.sign(x) x, y = np.abs(np.asarray(x)), np.asarray(y) return sgn*x**.5, y def inv_tr(x,y): sgn = np.sign(x) x, y = np.asarray(x), np.asarray(y) return sgn*x**2, y extreme_finder = angle_helper.ExtremeFinderCycle(20, 20, lon_cycle = None, lat_cycle = None, lon_minmax = None, #(0, np.inf), lat_minmax = None, ) grid_helper = GridHelperCurveLinear((tr, inv_tr), extreme_finder=extreme_finder) ax1 = Subplot(fig, 111, grid_helper=grid_helper) # ax1 will have a ticks and gridlines defined by the given # transform (+ transData of the Axes). Note that the transform of # the Axes itself (i.e., transData) is not affected by the given # transform. fig.add_subplot(ax1) ax1.imshow(np.arange(25).reshape(5,5), vmax = 50, cmap=plt.cm.gray_r, interpolation="nearest", origin="lower") # tick density grid_helper.grid_finder.grid_locator1._nbins = 6 grid_helper.grid_finder.grid_locator2._nbins = 6 if 1: fig = plt.figure(1, figsize=(7, 4)) fig.clf() curvelinear_test1(fig) plt.show()
unlicense
shear/rppy
test_ruger_hti.py
2
2682
# -*- coding: utf-8 -*- """ Created on Mon Aug 3 17:24:04 2015 @author: Sean """ import rppy import numpy as np import matplotlib.pyplot as plt p1 = 2000 vp1 = 3000 vs1 = 1500 e1 = 0.0 d1 = 0.0 y1 = 0.0 p2 = 2200 vp2 = 4000 vs2 = 2000 y2 = 0.1 d2 = 0.1 e2 = 0.1 theta = 30 phi = np.arange(0, 90, 1) phit = np.array([1.2500, 4.9342, 8.6184, 11.842, 15.526, 19.211, 22.664, 25.888, 28.421, 30.724, 34.638, 38.092, 41.546, 45.461, 49.375, 53.289, 56.974, 60.888, 65.493, 69.408, 73.783, 79.079, 84.375, 89.211]) exp = np.array([0.19816, 0.19816, 0.19678, 0.19539, 0.19263, 0.19056, 0.18711, 0.18365, 0.18020, 0.17813, 0.17329, 0.16845, 0.16431, 0.15878, 0.15326, 0.14842, 0.14359, 0.13875, 0.13391, 0.12977, 0.12632, 0.12286, 0.12079, 0.12010]) Rpp = np.zeros(np.shape(phi)) Rpo = np.zeros(np.shape(phi)) Rpk = np.zeros(np.shape(phi)) for ind, phiv in enumerate(phi): Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1, vp2, vs2, p2, e2, d2, y2, theta, phiv) Rpo[ind] = rppy.reflectivity.exact_ortho(rppy.reflectivity.Cij(vp1, vs1, p1, 0, 0, 0, e1, d1, y1, 0), p1, rppy.reflectivity.Cij(vp2, vs2, p2, 0, 0, 0, e2, d2, y2, 0), p2, 0, 0, phiv, theta) Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1, vp2, vs2, p2, e2, d2, y1, phiv, theta) plt.figure(1) plt.plot(phi, Rpp, phi, Rpo, phi, Rpk) plt.show() theta = np.arange(0, 60, 1) phi = 45 Rpp = np.zeros(np.shape(theta)) Rpo = np.zeros(np.shape(theta)) Rpk = np.zeros(np.shape(theta)) Rpa = np.zeros(np.shape(theta)) for ind, thetav in enumerate(theta): Rpp[ind] = rppy.reflectivity.ruger_hti(vp1, vs1, p1, e1, d1, y1, vp2, vs2, p2, e2, d2, y1, thetav, phi) Rpk[ind] = rppy.reflectivity.vavrycuk_psencik_hti(vp1, vs1, p1, e1, d1, y1, vp2, vs2, p2, e2, d2, y1, phi, thetav) Rpo = rppy.reflectivity.zoeppritz(vp1, vs1, p1, vp2, vs2, p2, theta) Rpa = rppy.reflectivity.aki_richards(vp1, vs1, p1, vp2, vs2, p2, theta) plt.figure(2) plt.plot(theta, Rpp, theta, Rpo, theta, Rpk, theta, Rpa) plt.xlim([0, 60]) plt.ylim([0.125, 0.275]) plt.legend(['Ruger', 'Zoe', 'Vavrycuk', 'A-R']) plt.show()
bsd-2-clause
andnovar/ggplot
ggplot/scales/scale_colour_gradient.py
12
2017
from __future__ import (absolute_import, division, print_function, unicode_literals) from .scale import scale from copy import deepcopy import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap, rgb2hex, ColorConverter def colors_at_breaks(cmap, breaks=[0, 0.25, 0.5, 0.75, 1.]): return [rgb2hex(cmap(bb)[:3]) for bb in breaks] class scale_colour_gradient(scale): """ Specify a two- or three-point gradient. Parameters ---------- name : Name of an existing gradient scheme limits : list of the upper and lower bounds of the gradient low : colour at the lower bound of the gradient mid : colour at the middle of the gradient high : Colour at the upper bound of the gradient Examples -------- >>> from ggplot import * >>> diamons_premium = diamonds[diamonds.cut=='Premium'] >>> gg = ggplot(diamons_premium, aes(x='depth', y='carat', colour='price')) + \\ ... geom_point() >>> print(gg + scale_colour_gradient(low='red', mid='white', high='blue', limits=[4000,6000]) + \\ ... ggtitle('With red-blue gradient')) >>> print(gg + ggtitle('With standard gradient')) """ VALID_SCALES = ['name', 'limits', 'low', 'mid', 'high'] def __radd__(self, gg): gg = deepcopy(gg) if self.name: gg.color_label = self.name if not (self.limits is None): gg.color_limits = self.limits color_spectrum = [] if self.low: color_spectrum.append(self.low) if self.mid: color_spectrum.append(self.mid) if self.high: color_spectrum.append(self.high) if self.low and self.high: gradient2n = LinearSegmentedColormap.from_list('gradient2n', color_spectrum) plt.cm.register_cmap(cmap=gradient2n) # add them back to ggplot gg.color_scale = colors_at_breaks(gradient2n) gg.colormap = gradient2n return gg
bsd-2-clause
felipessalvatore/CNNexample
src/tunning/fc.py
1
2217
import os import sys from random import randint import numpy as np import inspect import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) from util import run_test, get_data_4d, get_time from CNN import CNNModel, train_model, check_valid from DataHolder import DataHolder from Config import Config train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels = get_data_4d() my_dataholder = DataHolder(train_dataset, train_labels, valid_dataset, valid_labels, test_dataset, test_labels) FC = [5, 10, 15, 20, 30, 40, 60, 200] number_of_exp = len(FC) results = [] duration = [] info = [] for i, fc in enumerate(FC): print("\n ({0} of {1})".format(i + 1, number_of_exp)) my_config = Config(tunning=True, hidden_nodes_1=3 * fc, hidden_nodes_2=2 * fc, hidden_nodes_3=fc) attrs = vars(my_config) config_info = ["%s: %s" % item for item in attrs.items()] info.append(config_info) my_model = CNNModel(my_config, my_dataholder) train_model(my_model, my_dataholder, 10001, 1000, False) current_dur = get_time(train_model, 10001) score = check_valid(my_model) results.append(score) duration.append(current_dur) best_result = max(list(zip(results, FC, duration, info))) result_string = """In an experiment with {0} fully connected sizes the best one is {1} with valid accuracy = {2}. \nThe training takes {3:.2f} seconds using the following params: \n{4}""".format(number_of_exp, best_result[1], best_result[0], best_result[2], best_result[3]) file = open("final.txt", "w") file.write(result_string) file.close() plt.plot(FC, results) plt.xlabel("hidden_nodes_3") plt.ylabel("valid acc") plt.savefig("fc.png") plt.clf() plt.plot(FC, duration) plt.xlabel("hidden_nodes_3") plt.ylabel("duration (s)") plt.savefig("fc_du.png") plt.clf()
mit
hdmetor/scikit-learn
examples/mixture/plot_gmm_selection.py
248
3223
""" ================================= Gaussian Mixture Model Selection ================================= This example shows that model selection can be performed with Gaussian Mixture Models using information-theoretic criteria (BIC). Model selection concerns both the covariance type and the number of components in the model. In that case, AIC also provides the right result (not shown to save time), but BIC is better suited if the problem is to identify the right model. Unlike Bayesian procedures, such inferences are prior-free. In that case, the model with 2 components and full covariance (which corresponds to the true generative model) is selected. """ print(__doc__) import itertools import numpy as np from scipy import linalg import matplotlib.pyplot as plt import matplotlib as mpl from sklearn import mixture # Number of samples per component n_samples = 500 # Generate random sample, two components np.random.seed(0) C = np.array([[0., -0.1], [1.7, .4]]) X = np.r_[np.dot(np.random.randn(n_samples, 2), C), .7 * np.random.randn(n_samples, 2) + np.array([-6, 3])] lowest_bic = np.infty bic = [] n_components_range = range(1, 7) cv_types = ['spherical', 'tied', 'diag', 'full'] for cv_type in cv_types: for n_components in n_components_range: # Fit a mixture of Gaussians with EM gmm = mixture.GMM(n_components=n_components, covariance_type=cv_type) gmm.fit(X) bic.append(gmm.bic(X)) if bic[-1] < lowest_bic: lowest_bic = bic[-1] best_gmm = gmm bic = np.array(bic) color_iter = itertools.cycle(['k', 'r', 'g', 'b', 'c', 'm', 'y']) clf = best_gmm bars = [] # Plot the BIC scores spl = plt.subplot(2, 1, 1) for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)): xpos = np.array(n_components_range) + .2 * (i - 2) bars.append(plt.bar(xpos, bic[i * len(n_components_range): (i + 1) * len(n_components_range)], width=.2, color=color)) plt.xticks(n_components_range) plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()]) plt.title('BIC score per model') xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\ .2 * np.floor(bic.argmin() / len(n_components_range)) plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14) spl.set_xlabel('Number of components') spl.legend([b[0] for b in bars], cv_types) # Plot the winner splot = plt.subplot(2, 1, 2) Y_ = clf.predict(X) for i, (mean, covar, color) in enumerate(zip(clf.means_, clf.covars_, color_iter)): v, w = linalg.eigh(covar) if not np.any(Y_ == i): continue plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color) # Plot an ellipse to show the Gaussian component angle = np.arctan2(w[0][1], w[0][0]) angle = 180 * angle / np.pi # convert to degrees v *= 4 ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color) ell.set_clip_box(splot.bbox) ell.set_alpha(.5) splot.add_artist(ell) plt.xlim(-10, 10) plt.ylim(-3, 6) plt.xticks(()) plt.yticks(()) plt.title('Selected GMM: full model, 2 components') plt.subplots_adjust(hspace=.35, bottom=.02) plt.show()
bsd-3-clause
sauloal/cnidaria
scripts/venv/lib/python2.7/site-packages/mpl_toolkits/axisartist/axisline_style.py
8
5277
from __future__ import (absolute_import, division, print_function, unicode_literals) import six from matplotlib.patches import _Style, FancyArrowPatch from matplotlib.transforms import IdentityTransform from matplotlib.path import Path import numpy as np class _FancyAxislineStyle: class SimpleArrow(FancyArrowPatch): """ The artist class that will be returned for SimpleArrow style. """ _ARROW_STYLE = "->" def __init__(self, axis_artist, line_path, transform, line_mutation_scale): self._axis_artist = axis_artist self._line_transform = transform self._line_path = line_path self._line_mutation_scale = line_mutation_scale FancyArrowPatch.__init__(self, path=self._line_path, arrowstyle=self._ARROW_STYLE, arrow_transmuter=None, patchA=None, patchB=None, shrinkA=0., shrinkB=0., mutation_scale=line_mutation_scale, mutation_aspect=None, transform=IdentityTransform(), ) def set_line_mutation_scale(self, scale): self.set_mutation_scale(scale*self._line_mutation_scale) def _extend_path(self, path, mutation_size=10): """ Extend the path to make a room for drawing arrow. """ from matplotlib.bezier import get_cos_sin x0, y0 = path.vertices[-2] x1, y1 = path.vertices[-1] cost, sint = get_cos_sin(x0, y0, x1, y1) d = mutation_size * 1. x2, y2 = x1 + cost*d, y1+sint*d if path.codes is None: _path = Path(np.concatenate([path.vertices, [[x2, y2]]])) else: _path = Path(np.concatenate([path.vertices, [[x2, y2]]]), np.concatenate([path.codes, [Path.LINETO]])) return _path def set_path(self, path): self._line_path = path def draw(self, renderer): """ Draw the axis line. 1) transform the path to the display coordinate. 2) extend the path to make a room for arrow 3) update the path of the FancyArrowPatch. 4) draw """ path_in_disp = self._line_transform.transform_path(self._line_path) mutation_size = self.get_mutation_scale() #line_mutation_scale() extented_path = self._extend_path(path_in_disp, mutation_size=mutation_size) self._path_original = extented_path FancyArrowPatch.draw(self, renderer) class FilledArrow(SimpleArrow): """ The artist class that will be returned for SimpleArrow style. """ _ARROW_STYLE = "-|>" class AxislineStyle(_Style): """ :class:`AxislineStyle` is a container class which defines style classes for AxisArtists. An instance of any axisline style class is an callable object, whose call signature is :: __call__(self, axis_artist, path, transform) When called, this should return a mpl artist with following methods implemented. :: def set_path(self, path): # set the path for axisline. def set_line_mutation_scale(self, scale): # set the scale def draw(self, renderer): # draw """ _style_list = {} class _Base(object): # The derived classes are required to be able to be initialized # w/o arguments, i.e., all its argument (except self) must have # the default values. def __init__(self): """ initialization. """ super(AxislineStyle._Base, self).__init__() def __call__(self, axis_artist, transform): """ Given the AxisArtist instance, and transform for the path (set_path method), return the mpl artist for drawing the axis line. """ return self.new_line(axis_artist, transform) class SimpleArrow(_Base): """ A simple arrow. """ ArrowAxisClass = _FancyAxislineStyle.SimpleArrow def __init__(self, size=1): """ *size* size of the arrow as a fraction of the ticklabel size. """ self.size = size super(AxislineStyle.SimpleArrow, self).__init__() def new_line(self, axis_artist, transform): linepath = Path([(0,0), (0, 1)]) axisline = self.ArrowAxisClass(axis_artist, linepath, transform, line_mutation_scale=self.size) return axisline _style_list["->"] = SimpleArrow class FilledArrow(SimpleArrow): ArrowAxisClass = _FancyAxislineStyle.FilledArrow _style_list["-|>"] = FilledArrow
mit
mjudsp/Tsallis
examples/plot_isotonic_regression.py
303
1767
""" =================== Isotonic Regression =================== An illustration of the isotonic regression on generated data. The isotonic regression finds a non-decreasing approximation of a function while minimizing the mean squared error on the training data. The benefit of such a model is that it does not assume any form for the target function such as linearity. For comparison a linear regression is also presented. """ print(__doc__) # Author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Licence: BSD import numpy as np import matplotlib.pyplot as plt from matplotlib.collections import LineCollection from sklearn.linear_model import LinearRegression from sklearn.isotonic import IsotonicRegression from sklearn.utils import check_random_state n = 100 x = np.arange(n) rs = check_random_state(0) y = rs.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n)) ############################################################################### # Fit IsotonicRegression and LinearRegression models ir = IsotonicRegression() y_ = ir.fit_transform(x, y) lr = LinearRegression() lr.fit(x[:, np.newaxis], y) # x needs to be 2d for LinearRegression ############################################################################### # plot result segments = [[[i, y[i]], [i, y_[i]]] for i in range(n)] lc = LineCollection(segments, zorder=0) lc.set_array(np.ones(len(y))) lc.set_linewidths(0.5 * np.ones(n)) fig = plt.figure() plt.plot(x, y, 'r.', markersize=12) plt.plot(x, y_, 'g.-', markersize=12) plt.plot(x, lr.predict(x[:, np.newaxis]), 'b-') plt.gca().add_collection(lc) plt.legend(('Data', 'Isotonic Fit', 'Linear Fit'), loc='lower right') plt.title('Isotonic regression') plt.show()
bsd-3-clause
ansobolev/regCMPostProc
src/plot.py
1
2816
#!/usr/bin/env python # RegCM postprocessing tool # Copyright (C) 2014 Aliou, Addisu, Kanhu, Andrey # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import numpy as np import matplotlib.pyplot as plt import cartopy import cartopy.crs as ccrs import cartopy.feature as cfeature from value import Value class Plotter(object): def __init__(self, value): self._value = value self.lat, self.lon = value.latlon def plot(self, coastlines=True, countries=True, places=True, title=None, levels = None): if levels is not None: l_min, l_max = levels l = (l_max - l_min) / 10 levels = range(l_min, l_max + l, l) projection = ccrs.PlateCarree() self.fig, self.ax = plt.subplots(subplot_kw={'projection': projection}) if coastlines: self.ax.coastlines('10m') if countries: countries = cfeature.NaturalEarthFeature( scale='110m', category='cultural', name='admin_0_countries') self.ax.add_feature(countries, color='r', alpha=0.1) if places: places = cfeature.NaturalEarthFeature( scale='110m', category='cultural', name='populated_places') self.ax.add_feature(places, color='b', hatch='o') cx = self.ax.contourf(self.lon, self.lat, self._value.data, transform=ccrs.PlateCarree(),cmap='bwr', levels=levels) # To mask out OCEAN or LAND #ax.add_feature(cfeature.OCEAN) #ax.add_feature(cfeature.LAND) self.ax.gridlines(crs=ccrs.PlateCarree(), draw_labels=True, linewidth=1, color='blue', alpha=0.5, linestyle='-') self.fig.colorbar(cx) times = self._value.limits['time'] plt.title(self._value.title + ' [' + self._value.units + ']\n' + 'mean between ' + str(times[0]) + ' and ' + str(times[1]) + '\n') def show(self): plt.show() def save(self, filename, format): plt.savefig(filename + '.' + format) def close(self): plt.close(self.fig) if __name__ == "__main__": pass
gpl-3.0
wmvanvliet/mne-python
examples/time_frequency/plot_source_power_spectrum.py
19
1959
""" ====================================================== Compute source power spectral density (PSD) in a label ====================================================== Returns an STC file containing the PSD (in dB) of each of the sources within a label. """ # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD (3-clause) import matplotlib.pyplot as plt import mne from mne import io from mne.datasets import sample from mne.minimum_norm import read_inverse_operator, compute_source_psd print(__doc__) ############################################################################### # Set parameters data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif' fname_label = data_path + '/MEG/sample/labels/Aud-lh.label' # Setup for reading the raw data raw = io.read_raw_fif(raw_fname, verbose=False) events = mne.find_events(raw, stim_channel='STI 014') inverse_operator = read_inverse_operator(fname_inv) raw.info['bads'] = ['MEG 2443', 'EEG 053'] # picks MEG gradiometers picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True, stim=False, exclude='bads') tmin, tmax = 0, 120 # use the first 120s of data fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 label = mne.read_label(fname_label) stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM", tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, pick_ori="normal", n_fft=n_fft, label=label, dB=True) stc.save('psd_dSPM') ############################################################################### # View PSD of sources in label plt.plot(stc.times, stc.data.T) plt.xlabel('Frequency (Hz)') plt.ylabel('PSD (dB)') plt.title('Source Power Spectrum (PSD)') plt.show()
bsd-3-clause
destijl/forensicartifacts
frontend/thirdparty/networkx-1.9/examples/graph/napoleon_russian_campaign.py
44
3216
#!/usr/bin/env python """ Minard's data from Napoleon's 1812-1813 Russian Campaign. http://www.math.yorku.ca/SCS/Gallery/minard/minard.txt """ __author__ = """Aric Hagberg (hagberg@lanl.gov)""" # Copyright (C) 2006 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import string import networkx as nx def minard_graph(): data1="""\ 24.0,54.9,340000,A,1 24.5,55.0,340000,A,1 25.5,54.5,340000,A,1 26.0,54.7,320000,A,1 27.0,54.8,300000,A,1 28.0,54.9,280000,A,1 28.5,55.0,240000,A,1 29.0,55.1,210000,A,1 30.0,55.2,180000,A,1 30.3,55.3,175000,A,1 32.0,54.8,145000,A,1 33.2,54.9,140000,A,1 34.4,55.5,127100,A,1 35.5,55.4,100000,A,1 36.0,55.5,100000,A,1 37.6,55.8,100000,A,1 37.7,55.7,100000,R,1 37.5,55.7,98000,R,1 37.0,55.0,97000,R,1 36.8,55.0,96000,R,1 35.4,55.3,87000,R,1 34.3,55.2,55000,R,1 33.3,54.8,37000,R,1 32.0,54.6,24000,R,1 30.4,54.4,20000,R,1 29.2,54.3,20000,R,1 28.5,54.2,20000,R,1 28.3,54.3,20000,R,1 27.5,54.5,20000,R,1 26.8,54.3,12000,R,1 26.4,54.4,14000,R,1 25.0,54.4,8000,R,1 24.4,54.4,4000,R,1 24.2,54.4,4000,R,1 24.1,54.4,4000,R,1""" data2="""\ 24.0,55.1,60000,A,2 24.5,55.2,60000,A,2 25.5,54.7,60000,A,2 26.6,55.7,40000,A,2 27.4,55.6,33000,A,2 28.7,55.5,33000,R,2 29.2,54.2,30000,R,2 28.5,54.1,30000,R,2 28.3,54.2,28000,R,2""" data3="""\ 24.0,55.2,22000,A,3 24.5,55.3,22000,A,3 24.6,55.8,6000,A,3 24.6,55.8,6000,R,3 24.2,54.4,6000,R,3 24.1,54.4,6000,R,3""" cities="""\ 24.0,55.0,Kowno 25.3,54.7,Wilna 26.4,54.4,Smorgoni 26.8,54.3,Moiodexno 27.7,55.2,Gloubokoe 27.6,53.9,Minsk 28.5,54.3,Studienska 28.7,55.5,Polotzk 29.2,54.4,Bobr 30.2,55.3,Witebsk 30.4,54.5,Orscha 30.4,53.9,Mohilow 32.0,54.8,Smolensk 33.2,54.9,Dorogobouge 34.3,55.2,Wixma 34.4,55.5,Chjat 36.0,55.5,Mojaisk 37.6,55.8,Moscou 36.6,55.3,Tarantino 36.5,55.0,Malo-Jarosewii""" c={} for line in cities.split('\n'): x,y,name=line.split(',') c[name]=(float(x),float(y)) g=[] for data in [data1,data2,data3]: G=nx.Graph() i=0 G.pos={} # location G.pop={} # size last=None for line in data.split('\n'): x,y,p,r,n=line.split(',') G.pos[i]=(float(x),float(y)) G.pop[i]=int(p) if last is None: last=i else: G.add_edge(i,last,{r:int(n)}) last=i i=i+1 g.append(G) return g,c if __name__ == "__main__": (g,city)=minard_graph() try: import matplotlib.pyplot as plt plt.figure(1,figsize=(11,5)) plt.clf() colors=['b','g','r'] for G in g: c=colors.pop(0) node_size=[int(G.pop[n]/300.0) for n in G] nx.draw_networkx_edges(G,G.pos,edge_color=c,width=4,alpha=0.5) nx.draw_networkx_nodes(G,G.pos,node_size=node_size,node_color=c,alpha=0.5) nx.draw_networkx_nodes(G,G.pos,node_size=5,node_color='k') for c in city: x,y=city[c] plt.text(x,y+0.1,c) plt.savefig("napoleon_russian_campaign.png") except ImportError: pass
apache-2.0
zarafagroupware/python-zarafa
scripts/z-barplot.py
2
1667
#!/usr/bin/env python import zarafa import matplotlib.pyplot as plt def opt_args(): parser = zarafa.parser('skpc') parser.add_option('--save', dest='save', action='store', help='Save plot to file (png)') return parser.parse_args() def b2m(bytes): return (bytes / 1024) / 1024 def main(): options, args = opt_args() users = list(zarafa.Server(options).users()) width = 0.35 # the width of the bars fig, ax = plt.subplots() ind = range(0, len(users)) data = [b2m(user.store.size) for user in users] rects1 = ax.bar(ind, data, width, color='r') data = [len(list(user.store.folders())) for user in users] rects2 = ax.bar([offset + width for offset in ind], data, width, color='g') data =[sum(folder.count for folder in user.store.folders()) for user in users] rects3 = ax.bar([offset + width * 2 for offset in ind], data, width, color='b') ax.legend( (rects1[0], rects2[0], rects3[0]), ('Store size (Mb)', 'Folders', 'Items') ) ax.set_ylabel('Values') ax.set_title('Store size, Folder, Items per user') ax.set_xticks([offset + width for offset in ind]) ax.set_xticklabels([user.name for user in users]) def autolabel(rects): # attach some text labels for rect in rects: height = rect.get_height() ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height), ha='center', va='bottom') autolabel(rects1) autolabel(rects2) autolabel(rects3) if options.save: plt.savefig(options.save) else: plt.show() if __name__ == '__main__': main()
agpl-3.0
pap/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/fontconfig_pattern.py
72
6429
""" A module for parsing and generating fontconfig patterns. See the `fontconfig pattern specification <http://www.fontconfig.org/fontconfig-user.html>`_ for more information. """ # Author : Michael Droettboom <mdroe@stsci.edu> # License : matplotlib license (PSF compatible) # This class is defined here because it must be available in: # - The old-style config framework (:file:`rcsetup.py`) # - The traits-based config framework (:file:`mpltraits.py`) # - The font manager (:file:`font_manager.py`) # It probably logically belongs in :file:`font_manager.py`, but # placing it in any of these places would have created cyclical # dependency problems, or an undesired dependency on traits even # when the traits-based config framework is not used. import re from matplotlib.pyparsing import Literal, ZeroOrMore, \ Optional, Regex, StringEnd, ParseException, Suppress family_punc = r'\\\-:,' family_unescape = re.compile(r'\\([%s])' % family_punc).sub family_escape = re.compile(r'([%s])' % family_punc).sub value_punc = r'\\=_:,' value_unescape = re.compile(r'\\([%s])' % value_punc).sub value_escape = re.compile(r'([%s])' % value_punc).sub class FontconfigPatternParser: """A simple pyparsing-based parser for fontconfig-style patterns. See the `fontconfig pattern specification <http://www.fontconfig.org/fontconfig-user.html>`_ for more information. """ _constants = { 'thin' : ('weight', 'light'), 'extralight' : ('weight', 'light'), 'ultralight' : ('weight', 'light'), 'light' : ('weight', 'light'), 'book' : ('weight', 'book'), 'regular' : ('weight', 'regular'), 'normal' : ('weight', 'normal'), 'medium' : ('weight', 'medium'), 'demibold' : ('weight', 'demibold'), 'semibold' : ('weight', 'semibold'), 'bold' : ('weight', 'bold'), 'extrabold' : ('weight', 'extra bold'), 'black' : ('weight', 'black'), 'heavy' : ('weight', 'heavy'), 'roman' : ('slant', 'normal'), 'italic' : ('slant', 'italic'), 'oblique' : ('slant', 'oblique'), 'ultracondensed' : ('width', 'ultra-condensed'), 'extracondensed' : ('width', 'extra-condensed'), 'condensed' : ('width', 'condensed'), 'semicondensed' : ('width', 'semi-condensed'), 'expanded' : ('width', 'expanded'), 'extraexpanded' : ('width', 'extra-expanded'), 'ultraexpanded' : ('width', 'ultra-expanded') } def __init__(self): family = Regex(r'([^%s]|(\\[%s]))*' % (family_punc, family_punc)) \ .setParseAction(self._family) size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)") \ .setParseAction(self._size) name = Regex(r'[a-z]+') \ .setParseAction(self._name) value = Regex(r'([^%s]|(\\[%s]))*' % (value_punc, value_punc)) \ .setParseAction(self._value) families =(family + ZeroOrMore( Literal(',') + family) ).setParseAction(self._families) point_sizes =(size + ZeroOrMore( Literal(',') + size) ).setParseAction(self._point_sizes) property =( (name + Suppress(Literal('=')) + value + ZeroOrMore( Suppress(Literal(',')) + value) ) | name ).setParseAction(self._property) pattern =(Optional( families) + Optional( Literal('-') + point_sizes) + ZeroOrMore( Literal(':') + property) + StringEnd() ) self._parser = pattern self.ParseException = ParseException def parse(self, pattern): """ Parse the given fontconfig *pattern* and return a dictionary of key/value pairs useful for initializing a :class:`font_manager.FontProperties` object. """ props = self._properties = {} try: self._parser.parseString(pattern) except self.ParseException, e: raise ValueError("Could not parse font string: '%s'\n%s" % (pattern, e)) self._properties = None return props def _family(self, s, loc, tokens): return [family_unescape(r'\1', str(tokens[0]))] def _size(self, s, loc, tokens): return [float(tokens[0])] def _name(self, s, loc, tokens): return [str(tokens[0])] def _value(self, s, loc, tokens): return [value_unescape(r'\1', str(tokens[0]))] def _families(self, s, loc, tokens): self._properties['family'] = [str(x) for x in tokens] return [] def _point_sizes(self, s, loc, tokens): self._properties['size'] = [str(x) for x in tokens] return [] def _property(self, s, loc, tokens): if len(tokens) == 1: if tokens[0] in self._constants: key, val = self._constants[tokens[0]] self._properties.setdefault(key, []).append(val) else: key = tokens[0] val = tokens[1:] self._properties.setdefault(key, []).extend(val) return [] parse_fontconfig_pattern = FontconfigPatternParser().parse def generate_fontconfig_pattern(d): """ Given a dictionary of key/value pairs, generates a fontconfig pattern string. """ props = [] families = '' size = '' for key in 'family style variant weight stretch file size'.split(): val = getattr(d, 'get_' + key)() if val is not None and val != []: if type(val) == list: val = [value_escape(r'\\\1', str(x)) for x in val if x is not None] if val != []: val = ','.join(val) props.append(":%s=%s" % (key, val)) return ''.join(props)
agpl-3.0
sevenian3/ChromaStarPy
solartest.py
1
6462
# -*- coding: utf-8 -*- """ Created on Wed Aug 30 10:54:21 2017 @author: ishort """ #plotting: import matplotlib import matplotlib.pyplot as plt #%matplotlib inline import pylab #General file for printing ad hoc quantities #dbgHandle = open("debug.out", 'w') #Get the data dataPath = "SolFluxAtlas2005/" #outPath = absPath + "Outputs/" numStr = "" num = 0.0 wavStr = "" flxStr = "" inLine = "" fields = [" " for i in range(2)] #with open("", 'r', encoding='utf-8') as inputHandle: inFile = dataPath + "fluxspliced.2005" with open(inFile, 'r') as inputHandle: #Expects number of records on first lines, then white space delimited columns of #wavelengths in nm and continuum rectified fluxes inLine = inputHandle.readline() #Special one-line header print(inLine) fields = inLine.split() numStr = fields[0].strip() #first field is number of following records num = int(numStr) waveSun = [0.0 for i in range(num)] fluxSun = [0.0 for i in range(num)] for i in range(num): inLine = inputHandle.readline() fields = inLine.split() wavStr = fields[0].strip(); flxStr = fields[1].strip() waveSun[i] = float(wavStr); fluxSun[i] = float(flxStr) pylab.plot(waveSun, fluxSun, color='black') #Now get the synthetic spectrum pre-computed with ChromaStarPy modelPath = "Outputs/" #outPath = absPath + "Outputs/" numStr = "" num = 0.0 wavStr = "" flxStr = "" inLine = " " #fields = [" " for i in range(2)] """ runVers = "pyLoop" #Model atmosphere teffStr = "5777.0" loggStr = "4.44" logZStr = "0.0" massStarStr = "1.0" xiTStr = "1.0" logHeFeStr = "0.0" logCOStr = "0.0" logAlphaFeStr = "0.0" #Spectrum synthesis lambdaStartStr = "390.0" lambdaStopStr = "400.0" lineThreshStr = "-3.0" voigtThreshStr = "-3.0" logGammaColStr = "0.5" logKapFudgeStr = "0.0" macroVStr = "1.0" rotVStr = "2.0" rotIStr = "90.0" RVStr = "0.0" strucStem = "Teff" + teffStr + "Logg" + loggStr + "Z" + logZStr + "M" + massStarStr+"xiT"+xiTStr + \ "HeFe" + logHeFeStr + "CO" + logCOStr + "AlfFe" + logAlphaFeStr + "v" + runVers strucFile = "struc." + strucStem + ".out" specFile = "spec." + strucStem + "L"+lambdaStartStr+"-"+lambdaStopStr+"xiT"+xiTStr+"LThr"+lineThreshStr+ \ "GamCol"+logGammaColStr+"Mac"+macroVStr+"Rot"+rotVStr+"-"+rotIStr+"RV"+RVStr + ".out" #with open("", 'r', encoding='utf-8') as inputHandle: inFile = modelPath + specFile; """ project = "Project" runVers = "Run" teff = 5777.0 logg = 4.44 log10ZScale = 0.0 lambdaStart = 390.0 lambdaStop = 400.0 fileStem = project + "-"\ + str(round(teff, 7)) + "-" + str(round(logg, 3)) + "-" + str(round(log10ZScale, 3))\ + "-" + str(round(lambdaStart, 5)) + "-" + str(round(lambdaStop, 5))\ + "-" + runVers inFile = modelPath + fileStem + ".spec.txt" invnAir = 1.0 / 1.000277 #// reciprocal of refractive index of air at STP #numStr = fields[0].strip() #first field is number of following records #num = int(numStr) waveMod = [] fluxMod = [] wav = 0.0 #//initialization wavStr = "" lblStr = "" with open(inFile, 'r') as inputHandle: #Expects number of records on first lines, then white space delimited columns of #wavelengths in nm and continuum rectified fluxes inLine = inputHandle.readline() #line of header print(inLine) inLine = inputHandle.readline() print(inLine) fields = inLine.split() #number of line IDs is last field: numLineIdsStr = fields[len(fields)-1] numLineIds = int(numLineIdsStr) - 1 # to be on safe side print("Recovered that there are " + numLineIdsStr + " lines to ID") inLine = inputHandle.readline() print(inLine) fields = inLine.split() #number of wavelengths in spectrum is last field: numWavsStr = fields[len(fields)-1] numWavs = int(numWavsStr) # to be on safe side print("Recovered that there are " + numWavsStr + " wavelengths") #One more line of header inLine = inputHandle.readline() #line of header print(inLine) waveMod = [0.0 for i in range(numWavs)] fluxMod = [0.0 for i in range(numWavs)] #Get the synthetic spectrum for i in range(numWavs): inLine = inputHandle.readline() fields = inLine.split() wavStr = fields[0].strip(); flxStr = fields[1].strip() wav = invnAir * float(wavStr) waveMod[i] = wav fluxMod[i] = float(flxStr) waveIds = [0.0 for i in range(numLineIds)] lblIds = ["" for i in range(numLineIds)] #Get the line IDs #Expects four white-space-delimited fields: # wavelength, element, ion. stage, and rounded wavelength #Another line of header for line id section inLine = inputHandle.readline() #line of header print(inLine) for i in range(numLineIds): inLine = inputHandle.readline() fields = inLine.split() wavStr = fields[0].strip() wav = invnAir * float(wavStr) waveIds[i] = wav lblStr = fields[1].strip() + " " + fields[2].strip() + " " + fields[3].strip() lblIds[i] = lblStr """ #If we do NOT know number of records: #for i in inputHandle: #doesn't work - 0 iterations while (inLine != ""): inLine = inputHandle.readline() if not inLine: break #print(inLine) fields = inLine.split() wavStr = fields[0].strip(); flxStr = fields[1].strip() wav = invnAir * float(wavStr) waveMod.append(wav) fluxMod.append(float(flxStr)) """ #plot the spectrum #plt.title('Synthetic spectrum') plt.ylabel('$F_\lambda/F^C_\lambda$') plt.xlabel('$\lambda$ (nm)') xMin = min(waveMod) xMax = max(waveMod) pylab.xlim(xMin, xMax) pylab.ylim(0.0, 1.6) pylab.plot(waveMod, fluxMod, color="gray") #add the line IDs for i in range(numLineIds): if "Ca II" in lblIds[i]: thisLam = waveIds[i] thisLbl = lblIds[i] xPoint = [thisLam, thisLam] yPoint = [1.05, 1.1] pylab.plot(xPoint, yPoint, color='black') pylab.text(thisLam, 1.5, thisLbl, rotation=270) #Save as encapsulated postscript (eps) for LaTex epsName = fileStem + ".eps" plt.savefig(epsName, format='eps', dpi=1000)
mit
OrkoHunter/networkx
examples/graph/atlas.py
54
2609
#!/usr/bin/env python """ Atlas of all graphs of 6 nodes or less. """ __author__ = """Aric Hagberg (hagberg@lanl.gov)""" # Copyright (C) 2004 by # Aric Hagberg <hagberg@lanl.gov> # Dan Schult <dschult@colgate.edu> # Pieter Swart <swart@lanl.gov> # All rights reserved. # BSD license. import networkx as nx from networkx.generators.atlas import * from networkx.algorithms.isomorphism.isomorph import graph_could_be_isomorphic as isomorphic import random def atlas6(): """ Return the atlas of all connected graphs of 6 nodes or less. Attempt to check for isomorphisms and remove. """ Atlas=graph_atlas_g()[0:208] # 208 # remove isolated nodes, only connected graphs are left U=nx.Graph() # graph for union of all graphs in atlas for G in Atlas: zerodegree=[n for n in G if G.degree(n)==0] for n in zerodegree: G.remove_node(n) U=nx.disjoint_union(U,G) # list of graphs of all connected components C=nx.connected_component_subgraphs(U) UU=nx.Graph() # do quick isomorphic-like check, not a true isomorphism checker nlist=[] # list of nonisomorphic graphs for G in C: # check against all nonisomorphic graphs so far if not iso(G,nlist): nlist.append(G) UU=nx.disjoint_union(UU,G) # union the nonisomorphic graphs return UU def iso(G1, glist): """Quick and dirty nonisomorphism checker used to check isomorphisms.""" for G2 in glist: if isomorphic(G1,G2): return True return False if __name__ == '__main__': import networkx as nx G=atlas6() print("graph has %d nodes with %d edges"\ %(nx.number_of_nodes(G),nx.number_of_edges(G))) print(nx.number_connected_components(G),"connected components") try: from networkx import graphviz_layout except ImportError: raise ImportError("This example needs Graphviz and either PyGraphviz or Pydot") import matplotlib.pyplot as plt plt.figure(1,figsize=(8,8)) # layout graphs with positions using graphviz neato pos=nx.graphviz_layout(G,prog="neato") # color nodes the same in each connected subgraph C=nx.connected_component_subgraphs(G) for g in C: c=[random.random()]*nx.number_of_nodes(g) # random color... nx.draw(g, pos, node_size=40, node_color=c, vmin=0.0, vmax=1.0, with_labels=False ) plt.savefig("atlas.png",dpi=75)
bsd-3-clause
herilalaina/scikit-learn
examples/feature_selection/plot_f_test_vs_mi.py
82
1671
""" =========================================== Comparison of F-test and mutual information =========================================== This example illustrates the differences between univariate F-test statistics and mutual information. We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the target depends on them as follows: y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant. The code below plots the dependency of y against individual x_i and normalized values of univariate F-tests statistics and mutual information. As F-test captures only linear dependency, it rates x_1 as the most discriminative feature. On the other hand, mutual information can capture any kind of dependency between variables and it rates x_2 as the most discriminative feature, which probably agrees better with our intuitive perception for this example. Both methods correctly marks x_3 as irrelevant. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.feature_selection import f_regression, mutual_info_regression np.random.seed(0) X = np.random.rand(1000, 3) y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000) f_test, _ = f_regression(X, y) f_test /= np.max(f_test) mi = mutual_info_regression(X, y) mi /= np.max(mi) plt.figure(figsize=(15, 5)) for i in range(3): plt.subplot(1, 3, i + 1) plt.scatter(X[:, i], y, edgecolor='black', s=20) plt.xlabel("$x_{}$".format(i + 1), fontsize=14) if i == 0: plt.ylabel("$y$", fontsize=14) plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]), fontsize=16) plt.show()
bsd-3-clause
ryandougherty/mwa-capstone
MWA_Tools/build/matplotlib/lib/mpl_examples/misc/font_indexing.py
4
1299
""" A little example that shows how the various indexing into the font tables relate to one another. Mainly for mpl developers.... """ import matplotlib from matplotlib.ft2font import FT2Font, KERNING_DEFAULT, KERNING_UNFITTED, KERNING_UNSCALED #fname = '/usr/share/fonts/sfd/FreeSans.ttf' fname = matplotlib.get_data_path() + '/fonts/ttf/Vera.ttf' font = FT2Font(fname) font.set_charmap(0) codes = font.get_charmap().items() #dsu = [(ccode, glyphind) for ccode, glyphind in codes] #dsu.sort() #for ccode, glyphind in dsu: # try: name = font.get_glyph_name(glyphind) # except RuntimeError: pass # else: print '% 4d % 4d %s %s'%(glyphind, ccode, hex(int(ccode)), name) # make a charname to charcode and glyphind dictionary coded = {} glyphd = {} for ccode, glyphind in codes: name = font.get_glyph_name(glyphind) coded[name] = ccode glyphd[name] = glyphind code = coded['A'] glyph = font.load_char(code) #print glyph.bbox print glyphd['A'], glyphd['V'], coded['A'], coded['V'] print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_DEFAULT) print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNFITTED) print 'AV', font.get_kerning(glyphd['A'], glyphd['V'], KERNING_UNSCALED) print 'AV', font.get_kerning(glyphd['A'], glyphd['T'], KERNING_UNSCALED)
gpl-2.0
scipy/scipy
scipy/odr/models.py
19
7660
""" Collection of Model instances for use with the odrpack fitting package. """ import numpy as np from scipy.odr.odrpack import Model __all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', 'polynomial'] def _lin_fcn(B, x): a, b = B[0], B[1:] b.shape = (b.shape[0], 1) return a + (x*b).sum(axis=0) def _lin_fjb(B, x): a = np.ones(x.shape[-1], float) res = np.concatenate((a, x.ravel())) res.shape = (B.shape[-1], x.shape[-1]) return res def _lin_fjd(B, x): b = B[1:] b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0) b.shape = x.shape return b def _lin_est(data): # Eh. The answer is analytical, so just return all ones. # Don't return zeros since that will interfere with # ODRPACK's auto-scaling procedures. if len(data.x.shape) == 2: m = data.x.shape[0] else: m = 1 return np.ones((m + 1,), float) def _poly_fcn(B, x, powers): a, b = B[0], B[1:] b.shape = (b.shape[0], 1) return a + np.sum(b * np.power(x, powers), axis=0) def _poly_fjacb(B, x, powers): res = np.concatenate((np.ones(x.shape[-1], float), np.power(x, powers).flat)) res.shape = (B.shape[-1], x.shape[-1]) return res def _poly_fjacd(B, x, powers): b = B[1:] b.shape = (b.shape[0], 1) b = b * powers return np.sum(b * np.power(x, powers-1), axis=0) def _exp_fcn(B, x): return B[0] + np.exp(B[1] * x) def _exp_fjd(B, x): return B[1] * np.exp(B[1] * x) def _exp_fjb(B, x): res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) res.shape = (2, x.shape[-1]) return res def _exp_est(data): # Eh. return np.array([1., 1.]) class _MultilinearModel(Model): r""" Arbitrary-dimensional linear model This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i` Examples -------- We can calculate orthogonal distance regression with an arbitrary dimensional linear model: >>> from scipy import odr >>> x = np.linspace(0.0, 5.0) >>> y = 10.0 + 5.0 * x >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.multilinear) >>> output = odr_obj.run() >>> print(output.beta) [10. 5.] """ def __init__(self): super().__init__( _lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est, meta={'name': 'Arbitrary-dimensional Linear', 'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]', 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'}) multilinear = _MultilinearModel() def polynomial(order): """ Factory function for a general polynomial model. Parameters ---------- order : int or sequence If an integer, it becomes the order of the polynomial to fit. If a sequence of numbers, then these are the explicit powers in the polynomial. A constant term (power 0) is always included, so don't include 0. Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). Returns ------- polynomial : Model instance Model instance. Examples -------- We can fit an input data using orthogonal distance regression (ODR) with a polynomial model: >>> import matplotlib.pyplot as plt >>> from scipy import odr >>> x = np.linspace(0.0, 5.0) >>> y = np.sin(x) >>> poly_model = odr.polynomial(3) # using third order polynomial model >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, poly_model) >>> output = odr_obj.run() # running ODR fitting >>> poly = np.poly1d(output.beta[::-1]) >>> poly_y = poly(x) >>> plt.plot(x, y, label="input data") >>> plt.plot(x, poly_y, label="polynomial ODR") >>> plt.legend() >>> plt.show() """ powers = np.asarray(order) if powers.shape == (): # Scalar. powers = np.arange(1, powers + 1) powers.shape = (len(powers), 1) len_beta = len(powers) + 1 def _poly_est(data, len_beta=len_beta): # Eh. Ignore data and return all ones. return np.ones((len_beta,), float) return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, estimate=_poly_est, extra_args=(powers,), meta={'name': 'Sorta-general Polynomial', 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' % (len_beta-1)}) class _ExponentialModel(Model): r""" Exponential model This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}` Examples -------- We can calculate orthogonal distance regression with an exponential model: >>> from scipy import odr >>> x = np.linspace(0.0, 5.0) >>> y = -10.0 + np.exp(0.5*x) >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.exponential) >>> output = odr_obj.run() >>> print(output.beta) [-10. 0.5] """ def __init__(self): super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, estimate=_exp_est, meta={'name': 'Exponential', 'equ': 'y= B_0 + exp(B_1 * x)', 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'}) exponential = _ExponentialModel() def _unilin(B, x): return x*B[0] + B[1] def _unilin_fjd(B, x): return np.ones(x.shape, float) * B[0] def _unilin_fjb(B, x): _ret = np.concatenate((x, np.ones(x.shape, float))) _ret.shape = (2,) + x.shape return _ret def _unilin_est(data): return (1., 1.) def _quadratic(B, x): return x*(x*B[0] + B[1]) + B[2] def _quad_fjd(B, x): return 2*x*B[0] + B[1] def _quad_fjb(B, x): _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) _ret.shape = (3,) + x.shape return _ret def _quad_est(data): return (1.,1.,1.) class _UnilinearModel(Model): r""" Univariate linear model This model is defined by :math:`y = \beta_0 x + \beta_1` Examples -------- We can calculate orthogonal distance regression with an unilinear model: >>> from scipy import odr >>> x = np.linspace(0.0, 5.0) >>> y = 1.0 * x + 2.0 >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.unilinear) >>> output = odr_obj.run() >>> print(output.beta) [1. 2.] """ def __init__(self): super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, estimate=_unilin_est, meta={'name': 'Univariate Linear', 'equ': 'y = B_0 * x + B_1', 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) unilinear = _UnilinearModel() class _QuadraticModel(Model): r""" Quadratic model This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2` Examples -------- We can calculate orthogonal distance regression with a quadratic model: >>> from scipy import odr >>> x = np.linspace(0.0, 5.0) >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0 >>> data = odr.Data(x, y) >>> odr_obj = odr.ODR(data, odr.quadratic) >>> output = odr_obj.run() >>> print(output.beta) [1. 2. 3.] """ def __init__(self): super().__init__( _quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est, meta={'name': 'Quadratic', 'equ': 'y = B_0*x**2 + B_1*x + B_2', 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'}) quadratic = _QuadraticModel()
bsd-3-clause
abhijeet-talaulikar/Automatic-Helmet-Detection
K-Fold/Logistic_Regression.py
1
2663
import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import roc_curve, auc from sklearn.model_selection import KFold from sklearn.linear_model import LogisticRegression from sklearn.metrics import * from timeit import default_timer as timer from random import randint from sklearn.feature_selection import * from sklearn.decomposition import PCA helmet_data = np.genfromtxt ('helmet.csv', delimiter=",") face_data = np.genfromtxt ('face.csv', delimiter=",") data_full = np.concatenate((helmet_data, face_data), 0) np.random.shuffle(data_full) #shuffle the tuples #feature reduction (on HOG part) #gain, j = mutual_info_classif(data_full[:, 8:-1], data_full[:, -1], discrete_features='auto', n_neighbors=3, copy=True, random_state=None), 0 #for i in np.arange(len(gain)): # if gain[i] <= 0.001: # data_full = np.delete(data_full, 8+i-j, 1) # j += 1 #data = np.copy(data_full) #principal component analysis pca = PCA(n_components=150) data = pca.fit_transform(data_full[:, 8:-1]) data = np.concatenate((data_full[:, 0:8], data, np.array([data_full[:, -1]]).T), axis=1) precision, recall, f1, accuracy, support, fn, roc_auc = 0, 0, 0, 0, 0, 0, 0 colors = ['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'] k = 10 kf = KFold(n_splits = k) start = timer() for train, test in kf.split(data): X_train, X_test = data[train, 0:-1], data[test, 0:-1] y_train, y_test = data[train, -1], data[test, -1] clf = LogisticRegression().fit(X_train, y_train) y_pred = clf.predict(X_test) #ROC curve y_prob = clf.predict_proba(X_test)[:,1] fpr, tpr, thresholds = roc_curve(y_test, y_prob, pos_label=1) roc_auc += auc(fpr, tpr) plt.plot(fpr, tpr, color=colors[randint(0, len(colors)-1)]) precision += precision_score(y_test, y_pred, average = 'macro') recall += recall_score(y_test, y_pred, average = 'macro') f1 += f1_score(y_test, y_pred, average = 'macro') accuracy += accuracy_score(y_test, y_pred) y = y_test - y_pred fn += sum(y[y > 0]) / len(y_test) end = timer() precision /= k recall /= k f1 /= k accuracy /= k fn /= k print("Precision \t: %s" % round(precision, 4)) print("Recall \t\t: %s" % round(recall, 4)) print("F1 \t\t: %s" % round(f1, 4)) print("Accuracy \t: %s" % round(accuracy, 4)) print("False Neg \t: %s%%" % round(fn * 100, 4)) print("Mean AUC \t: %s" % round(roc_auc / k, 4)) print("\nExecution time: %s ms" % round((end - start) * 1000, 4)) #ROC curve plt.title('Logistic Regression (AUC = %s)' % round(roc_auc, 4)) plt.legend(loc='lower right') plt.plot([0,1],[0,1],'r--') plt.xlim([-0.05,1.0]) plt.ylim([0.0,1.05]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.show()
gpl-3.0
aabadie/scikit-learn
benchmarks/bench_plot_neighbors.py
101
6469
""" Plot the scaling of the nearest neighbors algorithms with k, D, and N """ from time import time import numpy as np import matplotlib.pyplot as plt from matplotlib import ticker from sklearn import neighbors, datasets def get_data(N, D, dataset='dense'): if dataset == 'dense': np.random.seed(0) return np.random.random((N, D)) elif dataset == 'digits': X = datasets.load_digits().data i = np.argsort(X[0])[::-1] X = X[:, i] return X[:N, :D] else: raise ValueError("invalid dataset: %s" % dataset) def barplot_neighbors(Nrange=2 ** np.arange(1, 11), Drange=2 ** np.arange(7), krange=2 ** np.arange(10), N=1000, D=64, k=5, leaf_size=30, dataset='digits'): algorithms = ('kd_tree', 'brute', 'ball_tree') fiducial_values = {'N': N, 'D': D, 'k': k} #------------------------------------------------------------ # varying N N_results_build = dict([(alg, np.zeros(len(Nrange))) for alg in algorithms]) N_results_query = dict([(alg, np.zeros(len(Nrange))) for alg in algorithms]) for i, NN in enumerate(Nrange): print("N = %i (%i out of %i)" % (NN, i + 1, len(Nrange))) X = get_data(NN, D, dataset) for algorithm in algorithms: nbrs = neighbors.NearestNeighbors(n_neighbors=min(NN, k), algorithm=algorithm, leaf_size=leaf_size) t0 = time() nbrs.fit(X) t1 = time() nbrs.kneighbors(X) t2 = time() N_results_build[algorithm][i] = (t1 - t0) N_results_query[algorithm][i] = (t2 - t1) #------------------------------------------------------------ # varying D D_results_build = dict([(alg, np.zeros(len(Drange))) for alg in algorithms]) D_results_query = dict([(alg, np.zeros(len(Drange))) for alg in algorithms]) for i, DD in enumerate(Drange): print("D = %i (%i out of %i)" % (DD, i + 1, len(Drange))) X = get_data(N, DD, dataset) for algorithm in algorithms: nbrs = neighbors.NearestNeighbors(n_neighbors=k, algorithm=algorithm, leaf_size=leaf_size) t0 = time() nbrs.fit(X) t1 = time() nbrs.kneighbors(X) t2 = time() D_results_build[algorithm][i] = (t1 - t0) D_results_query[algorithm][i] = (t2 - t1) #------------------------------------------------------------ # varying k k_results_build = dict([(alg, np.zeros(len(krange))) for alg in algorithms]) k_results_query = dict([(alg, np.zeros(len(krange))) for alg in algorithms]) X = get_data(N, DD, dataset) for i, kk in enumerate(krange): print("k = %i (%i out of %i)" % (kk, i + 1, len(krange))) for algorithm in algorithms: nbrs = neighbors.NearestNeighbors(n_neighbors=kk, algorithm=algorithm, leaf_size=leaf_size) t0 = time() nbrs.fit(X) t1 = time() nbrs.kneighbors(X) t2 = time() k_results_build[algorithm][i] = (t1 - t0) k_results_query[algorithm][i] = (t2 - t1) plt.figure(figsize=(8, 11)) for (sbplt, vals, quantity, build_time, query_time) in [(311, Nrange, 'N', N_results_build, N_results_query), (312, Drange, 'D', D_results_build, D_results_query), (313, krange, 'k', k_results_build, k_results_query)]: ax = plt.subplot(sbplt, yscale='log') plt.grid(True) tick_vals = [] tick_labels = [] bottom = 10 ** np.min([min(np.floor(np.log10(build_time[alg]))) for alg in algorithms]) for i, alg in enumerate(algorithms): xvals = 0.1 + i * (1 + len(vals)) + np.arange(len(vals)) width = 0.8 c_bar = plt.bar(xvals, build_time[alg] - bottom, width, bottom, color='r') q_bar = plt.bar(xvals, query_time[alg], width, build_time[alg], color='b') tick_vals += list(xvals + 0.5 * width) tick_labels += ['%i' % val for val in vals] plt.text((i + 0.02) / len(algorithms), 0.98, alg, transform=ax.transAxes, ha='left', va='top', bbox=dict(facecolor='w', edgecolor='w', alpha=0.5)) plt.ylabel('Time (s)') ax.xaxis.set_major_locator(ticker.FixedLocator(tick_vals)) ax.xaxis.set_major_formatter(ticker.FixedFormatter(tick_labels)) for label in ax.get_xticklabels(): label.set_rotation(-90) label.set_fontsize(10) title_string = 'Varying %s' % quantity descr_string = '' for s in 'NDk': if s == quantity: pass else: descr_string += '%s = %i, ' % (s, fiducial_values[s]) descr_string = descr_string[:-2] plt.text(1.01, 0.5, title_string, transform=ax.transAxes, rotation=-90, ha='left', va='center', fontsize=20) plt.text(0.99, 0.5, descr_string, transform=ax.transAxes, rotation=-90, ha='right', va='center') plt.gcf().suptitle("%s data set" % dataset.capitalize(), fontsize=16) plt.figlegend((c_bar, q_bar), ('construction', 'N-point query'), 'upper right') if __name__ == '__main__': barplot_neighbors(dataset='digits') barplot_neighbors(dataset='dense') plt.show()
bsd-3-clause
charman2/rsas
examples/unsteady.py
1
5254
# -*- coding: utf-8 -*- """Storage selection (SAS) functions: example with multiple fluxes out at steady state Runs the rSAS model for a synthetic dataset with one flux in and multiple fluxes out and steady state flow Theory is presented in: Harman, C. J. (2014), Time-variable transit time distributions and transport: Theory and application to storage-dependent transport of chloride in a watershed, Water Resour. Res., 51, doi:10.1002/2014WR015707. """ from __future__ import division import rsas import numpy as np import matplotlib.pyplot as plt import pandas as pd # Initializes the random number generator so we always get the same result np.random.seed(0) # ===================================== # Load the input data # ===================================== data = pd.read_csv('Q1.csv', index_col=0, parse_dates=[1]) # length of the dataset N = len(data) # The individual timeseries can be pulled out of the dataframe S = data['S'].values J = data['J'].values Q = data['Q1'].values C_J = data['C_J'].values-2 C_Q1 = data['C_Q1'].values ST_min = data['ST_min'].values ST_max = data['ST_max'].values # ========================= # Parameters needed by rsas # ========================= # The concentration of water older than the start of observations C_old = ((J*C_J)[J>0]).sum()/((J)[J>0]).sum() # ========================= # Create the rsas functions # ========================= S_dead = 10. #lam = 0. # Uniform # Parameters for the rSAS function Q_rSAS_fun_type = 'uniform' ST_min = np.zeros(N) ST_max = S + S_dead Q_rSAS_fun_parameters = np.c_[ST_min, ST_max] rSAS_fun_Q1 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters) rSAS_fun = [rSAS_fun_Q1] # Kumaraswami ## Parameters for the rSAS function #Q_rSAS_fun_type = 'kumaraswami' #ST_min = np.ones(N) * 0. #ST_max = S + S_dead #a = np.maximum(0.01, 2. + lam * (S - S.mean())/S.std()) #b = np.ones(N) * 5. #Q_rSAS_fun_parameters = np.c_[a, b, ST_min, ST_max] #rSAS_fun_Q1 = rsas.create_function(Q_rSAS_fun_type, Q_rSAS_fun_parameters) #rSAS_fun = [rSAS_fun_Q1] # ================= # Initial condition # ================= # Unknown initial age distribution, so just set this to zeros ST_init = np.zeros(N + 1) # ============= # Run the model # ============= # Run it outputs = rsas.solve(J, Q, rSAS_fun, ST_init=ST_init, mode='RK4', dt = 1., n_substeps=3, C_J=C_J, C_old=[C_old], verbose=False, debug=False) # Let's pull these out to make the outputs from rsas crystal clear # State variables: age-ranked storage of water and solutes # ROWS of ST, MS are T - ages # COLUMNS of ST, MS are t - times # LAYERS of MS are s - solutes ST = outputs['ST'] MS = outputs['MS'][:,:,0] # Timestep-averaged backwards TTD # ROWS of PQ are T - ages # COLUMNS of PQ are t - times # LAYERS of PQ are q - fluxes PQ1m = outputs['PQ'][:,:,0] # Timestep-averaged outflow concentration # ROWS of C_Q are t - times # COLUMNS of PQ are q - fluxes C_Q1m1 = outputs['C_Q'][:,0,0] # Timestep averaged solute load out # ROWS of MQ are T - ages # COLUMNS of MQ are t - times # LAYERS of MQ are q - fluxes # Last dimension of MS are s - solutes MQ1m = outputs['MQ'][:,:,0,0] #%% # ================================== # Plot the rSAS function # ================================== STx = np.linspace(0,S.max()+S_dead,100) Omega = np.r_[[rSAS_fun_Q1.cdf_i(STx,i) for i in range(N)]].T import matplotlib.cm as cm fig = plt.figure(0) plt.clf() for i in range(N): plt.plot(STx, Omega[:,i], lw=1, color=cm.jet((S[i]-S.min())/S.ptp())) plt.ylim((0,1)) plt.ylabel('$\Omega_Q(T)$') plt.xlabel('age-ranked storage $S_T$') plt.title('Cumulative rSAS function') #%% # ================================== # Plot the transit time distribution # ================================== fig = plt.figure(1) plt.clf() plt.plot(PQ1m, lw=1) plt.ylim((0,1)) plt.ylabel('$P_Q(T)$') plt.xlabel('age $T$') plt.title('Cumulative transit time distribution') #%% # ===================================================================== # Outflow concentration estimated using several different TTD # ===================================================================== # Lets get the instantaneous value of the TTD at the end of each timestep PQ1i = np.zeros((N+1, N+1)) PQ1i[:,0] = rSAS_fun_Q1.cdf_i(ST[:,0],0) PQ1i[:,1:] = np.r_[[rSAS_fun_Q1.cdf_i(ST[:,i+1],i) for i in range(N)]].T # Use the transit time distribution and input timeseries to estimate # the output timeseries for the instantaneous and timestep-averaged cases C_Q1i, C_Q1i_raw, Q1i_observed_fraction = rsas.transport(PQ1i, C_J, C_old) C_Q1m2, C_Q1m2_raw, Q1m2_observed_fraction = rsas.transport(PQ1m, C_J, C_old) # Plot the results fig = plt.figure(2) plt.clf() plt.step(data['datetime'], C_Q1m1, 'g', ls='--', label='mean rsas internal', lw=2, where='post') plt.step(data['datetime'], C_Q1m2, 'b', ls=':', label='mean rsas.transport', lw=2, where='post') plt.step(data['datetime'], C_Q1m2_raw, '0.5', ls=':', label='mean rsas.transport (obs part)', lw=2, where='post') plt.plot(data['datetime'], C_Q1i, 'b:o', label='inst. rsas.transport', lw=1) #plt.plot(data['datetime'], data['C_Q1'], 'r.', label='observed', lw=2) plt.ylim((-2, 0)) plt.legend(loc=0) plt.ylabel('Concentration [-]') plt.xlabel('time') plt.title('Outflow concentration') plt.show()
mit
keflavich/pyspeckit
docs/conf.py
4
12272
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst # # Astropy documentation build configuration file. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this file. # # All configuration values have a default. Some values are defined in # the global Astropy configuration which is loaded here before anything else. # See astropy.sphinx.conf for which values are set there. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('..')) # IMPORTANT: the above commented section was generated by sphinx-quickstart, but # is *NOT* appropriate for astropy or Astropy affiliated packages. It is left # commented out with this explanation to make it clear why this should not be # done. If the sys.path entry above is added, when the astropy.sphinx.conf # import occurs, it will import the *source* version of astropy instead of the # version installed (if invoked as "make html" or directly with sphinx), or the # version in the build directory (if "python setup.py build_sphinx" is used). # Thus, any C-extensions that are needed to build the documentation will *not* # be accessible, and the documentation will not build correctly. import datetime import os import sys try: from sphinx_astropy.conf.v1 import * # noqa except ImportError: print('ERROR: the documentation requires the sphinx-astropy package to be' ' installed') sys.exit(1) # Get configuration information from setup.cfg try: from ConfigParser import ConfigParser except ImportError: from configparser import ConfigParser conf = ConfigParser() conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')]) setup_cfg = dict(conf.items('metadata')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.2' # -*- coding: utf-8 -*- try: import numpy except ImportError: print("Failed to import numpy") # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) sys.path.insert(0, rootpath) #import numpydoc #sys.path.insert(0, os.path.split(numpydoc.__file__)[0]) sys.path.insert(0, rootpath+"/docs/sphinxext/") sys.path.append(os.path.abspath('sphinxext')) sys.path.append(os.path.abspath('.')) print("rootpath: ",rootpath) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' #sys.path.insert(0, os.path.abspath('.')) # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions += ['edit_on_github', 'edit_on_bitbucket'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' html_sidebars = {'**':['globaltoc.html', 'localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html']} # General information about the project. project = 'pyspeckit' copyright = '2011, Adam Ginsburg and coauthors' # This does not *have* to match the package name, but typically does project = setup_cfg['package_name'] author = setup_cfg['author'] copyright = '{0}, {1}'.format( datetime.datetime.now().year, setup_cfg['author']) # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # read the docs mocks __import__(setup_cfg['package_name']) package = sys.modules[setup_cfg['package_name']] class Mock(object): def __init__(self, *args, **kwargs): pass def __call__(self, *args, **kwargs): return Mock() @classmethod def __getattr__(cls, name): if name in ('__file__', '__path__'): return '/dev/null' elif name[0] == name[0].upper(): return type(name, (), {}) else: return Mock() MOCK_MODULES = {'matplotlib', 'matplotlib.pyplot', 'matplotlib.figure', 'matplotlib.widgets', 'matplotlib.cbook', 'pyfits', 'scipy', 'scipy', 'pyfits', 'pytest', 'scipy.interpolate', 'scipy.ndimage', 'pywcs', 'matplotlib', 'matplotlib.pyplot', 'h5py', 'atpy','progressbar'} for mod_name in MOCK_MODULES: if mod_name not in sys.modules: sys.modules[mod_name] = Mock() for mod_name in MOCK_MODULES: sys.modules[mod_name] = Mock() # The short X.Y version. #import pyspeckit #version = pyspeckit.__version__ ## The full version, including alpha/beta/rc tags. #release = pyspeckit.__version__ # # The short X.Y version. version = package.__version__.split('-', 1)[0] # The full version, including alpha/beta/rc tags. release = package.__version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build','_static','_template'] # The reST default role (used for this markup: `text`) to use for all documents. default_role = 'obj' # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'agogo' html_style = 'extra.css' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} html_theme_options = dict( pagewidth = '1000px', documentwidth = '760px', sidebarwidth = '200px', nosidebar=False, headerbg="#666666", headercolor1="#000000", headercolor2="#000000", headerlinkcolor="#FF9522", linkcolor="#4a8f43", textalign='left', ) # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". html_title = '{0} v{1}'.format(project, release) # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "images/logo.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "images/logo.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static','_static/extra.css','_static/scipy.css','_static/astropy.css'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'pyspeckitdoc' htmlhelp_basename = project + 'doc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'pyspeckit.tex', 'pyspeckit Documentation', 'Adam Ginsburg and coauthors', 'manual'), ] latex_documents = [('index', project + '.tex', project + ' Documentation', author, 'manual')] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # Try to make autoclass include both __init__ and Class docstrings autoclass_content = 'both' # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [('index', project.lower(), project + ' Documentation', [author], 1)] ## -- Options for the edit_on_github extension ---------------------------------------- if eval(setup_cfg.get('edit_on_github')): extensions += ['edit_on_github'] versionmod = __import__(setup_cfg['package_name'] + '.version') edit_on_github_project = setup_cfg['github_project'] if versionmod.version.release: edit_on_github_branch = "v" + versionmod.version.version else: edit_on_github_branch = "master" edit_on_github_source_root = "" edit_on_github_doc_root = "docs" edit_on_bitbucket_project = "pyspeckit/pyspeckit" edit_on_bitbucket_source_root = "" edit_on_bitbucket_doc_root = "doc"
mit
cauchycui/scikit-learn
examples/linear_model/plot_ols_ridge_variance.py
387
2060
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Ordinary Least Squares and Ridge Regression Variance ========================================================= Due to the few points in each dimension and the straight line that linear regression uses to follow these points as well as it can, noise on the observations will cause great variance as shown in the first plot. Every line's slope can vary quite a bit for each prediction due to the noise induced in the observations. Ridge regression is basically minimizing a penalised version of the least-squared function. The penalising `shrinks` the value of the regression coefficients. Despite the few data points in each dimension, the slope of the prediction is much more stable and the variance in the line itself is greatly reduced, in comparison to that of the standard linear regression """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model X_train = np.c_[.5, 1].T y_train = [.5, 1] X_test = np.c_[0, 2].T np.random.seed(0) classifiers = dict(ols=linear_model.LinearRegression(), ridge=linear_model.Ridge(alpha=.1)) fignum = 1 for name, clf in classifiers.items(): fig = plt.figure(fignum, figsize=(4, 3)) plt.clf() plt.title(name) ax = plt.axes([.12, .12, .8, .8]) for _ in range(6): this_X = .1 * np.random.normal(size=(2, 1)) + X_train clf.fit(this_X, y_train) ax.plot(X_test, clf.predict(X_test), color='.5') ax.scatter(this_X, y_train, s=3, c='.5', marker='o', zorder=10) clf.fit(X_train, y_train) ax.plot(X_test, clf.predict(X_test), linewidth=2, color='blue') ax.scatter(X_train, y_train, s=30, c='r', marker='+', zorder=10) ax.set_xticks(()) ax.set_yticks(()) ax.set_ylim((0, 1.6)) ax.set_xlabel('X') ax.set_ylabel('y') ax.set_xlim(0, 2) fignum += 1 plt.show()
bsd-3-clause
18padx08/PPTex
PPTexEnv_x86_64/lib/python2.7/site-packages/matplotlib/tests/test_transforms.py
9
19984
from __future__ import (absolute_import, division, print_function, unicode_literals) import six from six.moves import xrange, zip import unittest from nose.tools import assert_equal, assert_raises import numpy.testing as np_test from numpy.testing import assert_almost_equal from matplotlib.transforms import Affine2D, BlendedGenericTransform from matplotlib.path import Path from matplotlib.scale import LogScale from matplotlib.testing.decorators import cleanup, image_comparison import numpy as np import matplotlib.transforms as mtrans import matplotlib.pyplot as plt import matplotlib.path as mpath import matplotlib.patches as mpatches @cleanup def test_non_affine_caching(): class AssertingNonAffineTransform(mtrans.Transform): """ This transform raises an assertion error when called when it shouldn't be and self.raise_on_transform is True. """ input_dims = output_dims = 2 is_affine = False def __init__(self, *args, **kwargs): mtrans.Transform.__init__(self, *args, **kwargs) self.raise_on_transform = False self.underlying_transform = mtrans.Affine2D().scale(10, 10) def transform_path_non_affine(self, path): if self.raise_on_transform: assert False, ('Invalidated affine part of transform ' 'unnecessarily.') return self.underlying_transform.transform_path(path) transform_path = transform_path_non_affine def transform_non_affine(self, path): if self.raise_on_transform: assert False, ('Invalidated affine part of transform ' 'unnecessarily.') return self.underlying_transform.transform(path) transform = transform_non_affine my_trans = AssertingNonAffineTransform() ax = plt.axes() plt.plot(list(xrange(10)), transform=my_trans + ax.transData) plt.draw() # enable the transform to raise an exception if it's non-affine transform # method is triggered again. my_trans.raise_on_transform = True ax.transAxes.invalidate() plt.draw() @cleanup def test_external_transform_api(): class ScaledBy(object): def __init__(self, scale_factor): self._scale_factor = scale_factor def _as_mpl_transform(self, axes): return mtrans.Affine2D().scale(self._scale_factor) + axes.transData ax = plt.axes() line, = plt.plot(list(xrange(10)), transform=ScaledBy(10)) ax.set_xlim(0, 100) ax.set_ylim(0, 100) # assert that the top transform of the line is the scale transform. np.testing.assert_allclose(line.get_transform()._a.get_matrix(), mtrans.Affine2D().scale(10).get_matrix()) @image_comparison(baseline_images=['pre_transform_data']) def test_pre_transform_plotting(): # a catch-all for as many as possible plot layouts which handle pre-transforming the data # NOTE: The axis range is important in this plot. It should be x10 what the data suggests it should be ax = plt.axes() times10 = mtrans.Affine2D().scale(10) ax.contourf(np.arange(48).reshape(6, 8), transform=times10 + ax.transData) ax.pcolormesh(np.linspace(0, 4, 7), np.linspace(5.5, 8, 9), np.arange(48).reshape(8, 6), transform=times10 + ax.transData) ax.scatter(np.linspace(0, 10), np.linspace(10, 0), transform=times10 + ax.transData) x = np.linspace(8, 10, 20) y = np.linspace(1, 5, 20) u = 2*np.sin(x) + np.cos(y[:, np.newaxis]) v = np.sin(x) - np.cos(y[:, np.newaxis]) df = 25. / 30. # Compatibility factor for old test image ax.streamplot(x, y, u, v, transform=times10 + ax.transData, density=(df, df), linewidth=u**2 + v**2) # reduce the vector data down a bit for barb and quiver plotting x, y = x[::3], y[::3] u, v = u[::3, ::3], v[::3, ::3] ax.quiver(x, y + 5, u, v, transform=times10 + ax.transData) ax.barbs(x - 3, y + 5, u**2, v**2, transform=times10 + ax.transData) @cleanup def test_contour_pre_transform_limits(): ax = plt.axes() xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20)) ax.contourf(xs, ys, np.log(xs * ys), transform=mtrans.Affine2D().scale(0.1) + ax.transData) expected = np.array([[ 1.5 , 1.24], [ 2. , 1.25]]) assert_almost_equal(expected, ax.dataLim.get_points()) @cleanup def test_pcolor_pre_transform_limits(): # Based on test_contour_pre_transform_limits() ax = plt.axes() xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20)) ax.pcolor(xs, ys, np.log(xs * ys), transform=mtrans.Affine2D().scale(0.1) + ax.transData) expected = np.array([[ 1.5 , 1.24], [ 2. , 1.25]]) assert_almost_equal(expected, ax.dataLim.get_points()) @cleanup def test_pcolormesh_pre_transform_limits(): # Based on test_contour_pre_transform_limits() ax = plt.axes() xs, ys = np.meshgrid(np.linspace(15, 20, 15), np.linspace(12.4, 12.5, 20)) ax.pcolormesh(xs, ys, np.log(xs * ys), transform=mtrans.Affine2D().scale(0.1) + ax.transData) expected = np.array([[ 1.5 , 1.24], [ 2. , 1.25]]) assert_almost_equal(expected, ax.dataLim.get_points()) def test_Affine2D_from_values(): points = np.array([ [0,0], [10,20], [-1,0], ]) t = mtrans.Affine2D.from_values(1,0,0,0,0,0) actual = t.transform(points) expected = np.array( [[0,0],[10,0],[-1,0]] ) assert_almost_equal(actual,expected) t = mtrans.Affine2D.from_values(0,2,0,0,0,0) actual = t.transform(points) expected = np.array( [[0,0],[0,20],[0,-2]] ) assert_almost_equal(actual,expected) t = mtrans.Affine2D.from_values(0,0,3,0,0,0) actual = t.transform(points) expected = np.array( [[0,0],[60,0],[0,0]] ) assert_almost_equal(actual,expected) t = mtrans.Affine2D.from_values(0,0,0,4,0,0) actual = t.transform(points) expected = np.array( [[0,0],[0,80],[0,0]] ) assert_almost_equal(actual,expected) t = mtrans.Affine2D.from_values(0,0,0,0,5,0) actual = t.transform(points) expected = np.array( [[5,0],[5,0],[5,0]] ) assert_almost_equal(actual,expected) t = mtrans.Affine2D.from_values(0,0,0,0,0,6) actual = t.transform(points) expected = np.array( [[0,6],[0,6],[0,6]] ) assert_almost_equal(actual,expected) def test_clipping_of_log(): # issue 804 M,L,C = Path.MOVETO, Path.LINETO, Path.CLOSEPOLY points = [ (0.2, -99), (0.4, -99), (0.4, 20), (0.2, 20), (0.2, -99) ] codes = [ M, L, L, L, C ] path = Path(points, codes) # something like this happens in plotting logarithmic histograms trans = BlendedGenericTransform(Affine2D(), LogScale.Log10Transform('clip')) tpath = trans.transform_path_non_affine(path) result = tpath.iter_segments(trans.get_affine(), clip=(0, 0, 100, 100), simplify=False) tpoints, tcodes = list(zip(*result)) # Because y coordinate -99 is outside the clip zone, the first # line segment is effectively removed. That means that the closepoly # operation must be replaced by a move to the first point. assert np.allclose(tcodes, [ M, M, L, L, L ]) class NonAffineForTest(mtrans.Transform): """ A class which looks like a non affine transform, but does whatever the given transform does (even if it is affine). This is very useful for testing NonAffine behaviour with a simple Affine transform. """ is_affine = False output_dims = 2 input_dims = 2 def __init__(self, real_trans, *args, **kwargs): self.real_trans = real_trans r = mtrans.Transform.__init__(self, *args, **kwargs) def transform_non_affine(self, values): return self.real_trans.transform(values) def transform_path_non_affine(self, path): return self.real_trans.transform_path(path) class BasicTransformTests(unittest.TestCase): def setUp(self): self.ta1 = mtrans.Affine2D(shorthand_name='ta1').rotate(np.pi / 2) self.ta2 = mtrans.Affine2D(shorthand_name='ta2').translate(10, 0) self.ta3 = mtrans.Affine2D(shorthand_name='ta3').scale(1, 2) self.tn1 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn1') self.tn2 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn2') self.tn3 = NonAffineForTest(mtrans.Affine2D().translate(1, 2), shorthand_name='tn3') # creates a transform stack which looks like ((A, (N, A)), A) self.stack1 = (self.ta1 + (self.tn1 + self.ta2)) + self.ta3 # creates a transform stack which looks like (((A, N), A), A) self.stack2 = self.ta1 + self.tn1 + self.ta2 + self.ta3 # creates a transform stack which is a subset of stack2 self.stack2_subset = self.tn1 + self.ta2 + self.ta3 # when in debug, the transform stacks can produce dot images: # self.stack1.write_graphviz(file('stack1.dot', 'w')) # self.stack2.write_graphviz(file('stack2.dot', 'w')) # self.stack2_subset.write_graphviz(file('stack2_subset.dot', 'w')) def test_transform_depth(self): assert_equal(self.stack1.depth, 4) assert_equal(self.stack2.depth, 4) assert_equal(self.stack2_subset.depth, 3) def test_left_to_right_iteration(self): stack3 = (self.ta1 + (self.tn1 + (self.ta2 + self.tn2))) + self.ta3 # stack3.write_graphviz(file('stack3.dot', 'w')) target_transforms = [stack3, (self.tn1 + (self.ta2 + self.tn2)) + self.ta3, (self.ta2 + self.tn2) + self.ta3, self.tn2 + self.ta3, self.ta3, ] r = [rh for _, rh in stack3._iter_break_from_left_to_right()] self.assertEqual(len(r), len(target_transforms)) for target_stack, stack in zip(target_transforms, r): self.assertEqual(target_stack, stack) def test_transform_shortcuts(self): self.assertEqual(self.stack1 - self.stack2_subset, self.ta1) self.assertEqual(self.stack2 - self.stack2_subset, self.ta1) assert_equal((self.stack2_subset - self.stack2), self.ta1.inverted(), ) assert_equal((self.stack2_subset - self.stack2).depth, 1) assert_raises(ValueError, self.stack1.__sub__, self.stack2) aff1 = self.ta1 + (self.ta2 + self.ta3) aff2 = self.ta2 + self.ta3 self.assertEqual(aff1 - aff2, self.ta1) self.assertEqual(aff1 - self.ta2, aff1 + self.ta2.inverted()) self.assertEqual(self.stack1 - self.ta3, self.ta1 + (self.tn1 + self.ta2)) self.assertEqual(self.stack2 - self.ta3, self.ta1 + self.tn1 + self.ta2) self.assertEqual((self.ta2 + self.ta3) - self.ta3 + self.ta3, self.ta2 + self.ta3) def test_contains_branch(self): r1 = (self.ta2 + self.ta1) r2 = (self.ta2 + self.ta1) self.assertEqual(r1, r2) self.assertNotEqual(r1, self.ta1) self.assertTrue(r1.contains_branch(r2)) self.assertTrue(r1.contains_branch(self.ta1)) self.assertFalse(r1.contains_branch(self.ta2)) self.assertFalse(r1.contains_branch((self.ta2 + self.ta2))) self.assertEqual(r1, r2) self.assertTrue(self.stack1.contains_branch(self.ta3)) self.assertTrue(self.stack2.contains_branch(self.ta3)) self.assertTrue(self.stack1.contains_branch(self.stack2_subset)) self.assertTrue(self.stack2.contains_branch(self.stack2_subset)) self.assertFalse(self.stack2_subset.contains_branch(self.stack1)) self.assertFalse(self.stack2_subset.contains_branch(self.stack2)) self.assertTrue(self.stack1.contains_branch((self.ta2 + self.ta3))) self.assertTrue(self.stack2.contains_branch((self.ta2 + self.ta3))) self.assertFalse(self.stack1.contains_branch((self.tn1 + self.ta2))) def test_affine_simplification(self): # tests that a transform stack only calls as much is absolutely necessary # "non-affine" allowing the best possible optimization with complex # transformation stacks. points = np.array([[0, 0], [10, 20], [np.nan, 1], [-1, 0]], dtype=np.float64) na_pts = self.stack1.transform_non_affine(points) all_pts = self.stack1.transform(points) na_expected = np.array([[1., 2.], [-19., 12.], [np.nan, np.nan], [1., 1.]], dtype=np.float64) all_expected = np.array([[11., 4.], [-9., 24.], [np.nan, np.nan], [11., 2.]], dtype=np.float64) # check we have the expected results from doing the affine part only np_test.assert_array_almost_equal(na_pts, na_expected) # check we have the expected results from a full transformation np_test.assert_array_almost_equal(all_pts, all_expected) # check we have the expected results from doing the transformation in two steps np_test.assert_array_almost_equal(self.stack1.transform_affine(na_pts), all_expected) # check that getting the affine transformation first, then fully transforming using that # yields the same result as before. np_test.assert_array_almost_equal(self.stack1.get_affine().transform(na_pts), all_expected) # check that the affine part of stack1 & stack2 are equivalent (i.e. the optimization # is working) expected_result = (self.ta2 + self.ta3).get_matrix() result = self.stack1.get_affine().get_matrix() np_test.assert_array_equal(expected_result, result) result = self.stack2.get_affine().get_matrix() np_test.assert_array_equal(expected_result, result) class TestTransformPlotInterface(unittest.TestCase): def tearDown(self): plt.close() def test_line_extent_axes_coords(self): # a simple line in axes coordinates ax = plt.axes() ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transAxes) np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[np.inf, np.inf], [-np.inf, -np.inf]])) def test_line_extent_data_coords(self): # a simple line in data coordinates ax = plt.axes() ax.plot([0.1, 1.2, 0.8], [0.9, 0.5, 0.8], transform=ax.transData) np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ 0.1, 0.5], [ 1.2, 0.9]])) def test_line_extent_compound_coords1(self): # a simple line in data coordinates in the y component, and in axes coordinates in the x ax = plt.axes() trans = mtrans.blended_transform_factory(ax.transAxes, ax.transData) ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans) np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ np.inf, -5.], [ -np.inf, 35.]])) plt.close() def test_line_extent_predata_transform_coords(self): # a simple line in (offset + data) coordinates ax = plt.axes() trans = mtrans.Affine2D().scale(10) + ax.transData ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans) np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[1., -50.], [12., 350.]])) plt.close() def test_line_extent_compound_coords2(self): # a simple line in (offset + data) coordinates in the y component, and in axes coordinates in the x ax = plt.axes() trans = mtrans.blended_transform_factory(ax.transAxes, mtrans.Affine2D().scale(10) + ax.transData) ax.plot([0.1, 1.2, 0.8], [35, -5, 18], transform=trans) np.testing.assert_array_equal(ax.dataLim.get_points(), np.array([[ np.inf, -50.], [ -np.inf, 350.]])) plt.close() def test_line_extents_affine(self): ax = plt.axes() offset = mtrans.Affine2D().translate(10, 10) plt.plot(list(xrange(10)), transform=offset + ax.transData) expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + 10 np.testing.assert_array_almost_equal(ax.dataLim.get_points(), expeted_data_lim) def test_line_extents_non_affine(self): ax = plt.axes() offset = mtrans.Affine2D().translate(10, 10) na_offset = NonAffineForTest(mtrans.Affine2D().translate(10, 10)) plt.plot(list(xrange(10)), transform=offset + na_offset + ax.transData) expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + 20 np.testing.assert_array_almost_equal(ax.dataLim.get_points(), expeted_data_lim) def test_pathc_extents_non_affine(self): ax = plt.axes() offset = mtrans.Affine2D().translate(10, 10) na_offset = NonAffineForTest(mtrans.Affine2D().translate(10, 10)) pth = mpath.Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]])) patch = mpatches.PathPatch(pth, transform=offset + na_offset + ax.transData) ax.add_patch(patch) expeted_data_lim = np.array([[0., 0.], [10., 10.]]) + 20 np.testing.assert_array_almost_equal(ax.dataLim.get_points(), expeted_data_lim) def test_pathc_extents_affine(self): ax = plt.axes() offset = mtrans.Affine2D().translate(10, 10) pth = mpath.Path(np.array([[0, 0], [0, 10], [10, 10], [10, 0]])) patch = mpatches.PathPatch(pth, transform=offset + ax.transData) ax.add_patch(patch) expeted_data_lim = np.array([[0., 0.], [10., 10.]]) + 10 np.testing.assert_array_almost_equal(ax.dataLim.get_points(), expeted_data_lim) def test_line_extents_for_non_affine_transData(self): ax = plt.axes(projection='polar') # add 10 to the radius of the data offset = mtrans.Affine2D().translate(0, 10) plt.plot(list(xrange(10)), transform=offset + ax.transData) # the data lim of a polar plot is stored in coordinates # before a transData transformation, hence the data limits # are not what is being shown on the actual plot. expeted_data_lim = np.array([[0., 0.], [9., 9.]]) + [0, 10] np.testing.assert_array_almost_equal(ax.dataLim.get_points(), expeted_data_lim) def test_bbox_intersection(): bbox_from_ext = mtrans.Bbox.from_extents inter = mtrans.Bbox.intersection from numpy.testing import assert_array_equal as assert_a_equal def assert_bbox_eq(bbox1, bbox2): assert_a_equal(bbox1.bounds, bbox2.bounds) r1 = bbox_from_ext(0, 0, 1, 1) r2 = bbox_from_ext(0.5, 0.5, 1.5, 1.5) r3 = bbox_from_ext(0.5, 0, 0.75, 0.75) r4 = bbox_from_ext(0.5, 1.5, 1, 2.5) r5 = bbox_from_ext(1, 1, 2, 2) # self intersection -> no change assert_bbox_eq(inter(r1, r1), r1) # simple intersection assert_bbox_eq(inter(r1, r2), bbox_from_ext(0.5, 0.5, 1, 1)) # r3 contains r2 assert_bbox_eq(inter(r1, r3), r3) # no intersection assert_equal(inter(r1, r4), None) # single point assert_bbox_eq(inter(r1, r5), bbox_from_ext(1, 1, 1, 1)) @cleanup def test_log_transform(): # Tests that the last line runs without exception (previously the # transform would fail if one of the axes was logarithmic). fig, ax = plt.subplots() ax.set_yscale('log') ax.transData.transform((1,1)) if __name__=='__main__': import nose nose.runmodule(argv=['-s','--with-doctest'], exit=False)
mit
samuelstjean/dipy
scratch/very_scratch/diffusion_sphere_stats.py
20
18082
import nibabel import os import numpy as np import dipy as dp #import dipy.core.generalized_q_sampling as dgqs import dipy.reconst.gqi as dgqs import dipy.reconst.dti as ddti import dipy.reconst.recspeed as rp import dipy.io.pickles as pkl import scipy as sp from matplotlib.mlab import find #import dipy.core.sphere_plots as splots import dipy.core.sphere_stats as sphats import dipy.core.geometry as geometry import get_vertices as gv #old SimData files ''' results_SNR030_1fibre results_SNR030_1fibre+iso results_SNR030_2fibres_15deg results_SNR030_2fibres_30deg results_SNR030_2fibres_60deg results_SNR030_2fibres_90deg results_SNR030_2fibres+iso_15deg results_SNR030_2fibres+iso_30deg results_SNR030_2fibres+iso_60deg results_SNR030_2fibres+iso_90deg results_SNR030_isotropic ''' #fname='/home/ian/Data/SimData/results_SNR030_1fibre' ''' file has one row for every voxel, every voxel is repeating 1000 times with the same noise level , then we have 100 different directions. 1000 * 100 is the number of all rows. The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80, 80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80, 120, 160, 200, 240, 280, 320, 360. ''' #new complete SimVoxels files simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00'] simdir = '/home/ian/Data/SimVoxels/' def gq_tn_calc_save(): for simfile in simdata: dataname = simfile print dataname sim_data=np.loadtxt(simdir+dataname) marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt' b_vals_dirs=np.loadtxt(marta_table_fname) bvals=b_vals_dirs[:,0]*1000 gradients=b_vals_dirs[:,1:] gq = dgqs.GeneralizedQSampling(sim_data,bvals,gradients) gqfile = simdir+'gq/'+dataname+'.pkl' pkl.save_pickle(gqfile,gq) ''' gq.IN gq.__doc__ gq.glob_norm_param gq.QA gq.__init__ gq.odf gq.__class__ gq.__module__ gq.q2odf_params ''' tn = ddti.Tensor(sim_data,bvals,gradients) tnfile = simdir+'tn/'+dataname+'.pkl' pkl.save_pickle(tnfile,tn) ''' tn.ADC tn.__init__ tn._getevals tn.B tn.__module__ tn._getevecs tn.D tn.__new__ tn._getndim tn.FA tn.__reduce__ tn._getshape tn.IN tn.__reduce_ex__ tn._setevals tn.MD tn.__repr__ tn._setevecs tn.__class__ tn.__setattr__ tn.adc tn.__delattr__ tn.__sizeof__ tn.evals tn.__dict__ tn.__str__ tn.evecs tn.__doc__ tn.__subclasshook__ tn.fa tn.__format__ tn.__weakref__ tn.md tn.__getattribute__ tn._evals tn.ndim tn.__getitem__ tn._evecs tn.shape tn.__hash__ tn._getD ''' ''' file has one row for every voxel, every voxel is repeating 1000 times with the same noise level , then we have 100 different directions. 100 * 1000 is the number of all rows. At the moment this module is hardwired to the use of the EDS362 spherical mesh. I am assumung (needs testing) that directions 181 to 361 are the antipodal partners of directions 0 to 180. So when counting the number of different vertices that occur as maximal directions we wll map the indices modulo 181. ''' def analyze_maxima(indices, max_dirs, subsets): '''This calculates the eigenstats for each of the replicated batches of the simulation data ''' results = [] for direction in subsets: batch = max_dirs[direction,:,:] index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))]) #normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch) centre, b1 = sphats.eigenstats(batch) # make azimuth be in range (0,360) rather than (-180,180) centre[1] += 360*(centre[1] < 0) #results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety))) results.append(np.concatenate((centre, b1, index_variety))) return results #dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3)) # these are the principal directions for the full set of simulations #gq_tn_calc_save() #eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz')) from dipy.data import get_sphere odf_vertices,odf_faces=get_sphere('symmetric362') #odf_vertices=eds['vertices'] def run_comparisons(sample_data=35): for simfile in [simdata[sample_data]]: dataname = simfile print dataname sim_data=np.loadtxt(simdir+dataname) gqfile = simdir+'gq/'+dataname+'.pkl' gq = pkl.load_pickle(gqfile) tnfile = simdir+'tn/'+dataname+'.pkl' tn = pkl.load_pickle(tnfile) dt_first_directions_in=odf_vertices[tn.IN] dt_indices = tn.IN.reshape((100,1000)) dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90)) gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000)) gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')] #print gq_first_directions_in.shape gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90)) #for gqi see example dicoms_2_tracks gq.IN[:,0] np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000) out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w') #print np.vstack(dt_results).shape, np.vstack(gq_results).shape results = np.hstack((np.vstack(dt_results), np.vstack(gq_results))) #print results.shape #results = np.vstack(dt_results) print >> out, results[:,:] out.close() #up = dt_batch[:,2]>= 0 #splots.plot_sphere(dt_batch[up], 'batch '+str(direction)) #splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre) #spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95% #rotated = np.dot(dt_batch,evecs) #rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0]) #eval_order = np.argsort(rot_evals) #rotated = rotated[:,eval_order] #up = rotated[:,2]>= 0 #splot.plot_sphere(rotated[up],'first1000') #splot.plot_lambert(rotated[up],'batch '+str(direction)) def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]): results = [] out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w') for j in range(len(sample_data)): sample = sample_data[j] simfile = simdata[sample] dataname = simfile print dataname sim_data=np.loadtxt(simdir+dataname) marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt' b_vals_dirs=np.loadtxt(marta_table_fname) bvals=b_vals_dirs[:,0]*1000 gradients=b_vals_dirs[:,1:] for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel(): # 0,1,1000,1001,2000,2001,... s = sim_data[j,:] gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5) tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS') t0, t1, t2, npa = gqs.npa(s, width = 5) print >> out, dataname, j, npa, tn.fa()[0] ''' for (i,o) in enumerate(gqs.odf(s)): print i,o for (i,o) in enumerate(gqs.odf_vertices): print i,o ''' #o = gqs.odf(s) #v = gqs.odf_vertices #pole = v[t0[0]] #eqv = dgqs.equatorial_zone_vertices(v, pole, 5) #print 'Number of equatorial vertices: ', len(eqv) #print np.max(o[eqv]),np.min(o[eqv]) #cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv] #print np.min(cos1), np.max(cos1) #print 'equatorial max in equatorial vertices:', t1[0] in eqv #x = np.cross(v[t0[0]],v[t1[0]]) #x = x/np.sqrt(np.sum(x**2)) #print x #ptchv = dgqs.patch_vertices(v, x, 5) #print len(ptchv) #eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])] #print (eqp, o[eqp]) #print t2[0] in ptchv, t2[0] in eqv #print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]]) #print ptchv[np.argmin([o[v] for v in ptchv])] #gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000)) #gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')] #print gq_first_directions_in.shape #gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100)) #for gqi see example dicoms_2_tracks gq.IN[:,0] #np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000) #out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w') #results = np.hstack((np.vstack(dt_results), np.vstack(gq_results))) #results = np.vstack(dt_results) #print >> out, results[:,:] out.close() def run_small_data(): #smalldir = '/home/ian/Devel/dipy/dipy/data/' smalldir = '/home/eg309/Devel/dipy/dipy/data/' # from os.path import join as opj # bvals=np.load(opj(os.path.dirname(__file__), \ # 'data','small_64D.bvals.npy')) bvals=np.load(smalldir+'small_64D.bvals.npy') # gradients=np.load(opj(os.path.dirname(__file__), \ # 'data','small_64D.gradients.npy')) gradients=np.load(smalldir+'small_64D.gradients.npy') # img =ni.load(os.path.join(os.path.dirname(__file__),\ # 'data','small_64D.nii')) img=nibabel.load(smalldir+'small_64D.nii') small_data=img.get_data() print 'real_data', small_data.shape gqsmall = dgqs.GeneralizedQSampling(small_data,bvals,gradients) tnsmall = ddti.Tensor(small_data,bvals,gradients) x,y,z,a,b=tnsmall.evecs.shape evecs=tnsmall.evecs xyz=x*y*z evecs = evecs.reshape(xyz,3,3) #vs = np.sign(evecs[:,2,:]) #print vs.shape #print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape #evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3) #print evecs.shape evals=tnsmall.evals evals = evals.reshape(xyz,3) #print evals.shape #print('GQS in %d' %(t2-t1)) ''' eds=np.load(opj(os.path.dirname(__file__),\ '..','matrices',\ 'evenly_distributed_sphere_362.npz')) ''' from dipy.data import get_sphere odf_vertices,odf_faces=get_sphere('symmetric362') #odf_vertices=eds['vertices'] #odf_faces=eds['faces'] #Yeh et.al, IEEE TMI, 2010 #calculate the odf using GQI scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free #water diffusion coefficient #l_values sqrt(6 D tau) D free water #diffusion coefficiet and tau included in the b-value tmp=np.tile(scaling,(3,1)) b_vector=gradients.T*tmp Lambda = 1.2 # smoothing parameter - diffusion sampling length q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) #implements equation no. 9 from Yeh et.al. S=small_data.copy() x,y,z,g=S.shape S=S.reshape(x*y*z,g) QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) FA = tnsmall.fa().reshape(x*y*z) fwd = 0 #Calculate Quantitative Anisotropy and find the peaks and the indices #for every voxel summary = {} summary['vertices'] = odf_vertices v = odf_vertices.shape[0] summary['faces'] = odf_faces f = odf_faces.shape[0] for (i,s) in enumerate(S): #print 'Volume %d' % i istr = str(i) summary[istr] = {} t0, t1, t2, npa = gqsmall.npa(s, width = 5) summary[istr]['triple']=(t0,t1,t2) summary[istr]['npa']=npa odf = Q2odf(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) fwd=max(np.max(odf),fwd) #peaks = peaks - np.min(odf) n_peaks=min(len(peaks),5) peak_heights = [odf[i] for i in inds[:n_peaks]] #QA[i][:l] = peaks[:n_peaks] IN[i][:n_peaks] = inds[:n_peaks] summary[istr]['odf'] = odf summary[istr]['peaks'] = peaks summary[istr]['inds'] = inds summary[istr]['evecs'] = evecs[i,:,:] summary[istr]['evals'] = evals[i,:] summary[istr]['n_peaks'] = n_peaks summary[istr]['peak_heights'] = peak_heights # summary[istr]['fa'] = tnsmall.fa()[0] summary[istr]['fa'] = FA[i] ''' QA/=fwd QA=QA.reshape(x,y,z,5) IN=IN.reshape(x,y,z,5) ''' peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1] peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2] peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3] #peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2] #peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3] print '#voxels with 1, 2, 3 peaks', len(peaks_1),len(peaks_2),len(peaks_3) return FA, summary def Q2odf(s,q2odf_params): ''' construct odf for a voxel ''' odf=np.dot(s,q2odf_params) return odf #run_comparisons() #run_gq_sims() FA, summary = run_small_data() peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1] peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2] peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3] fa_npa_1 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_1] fa_npa_2 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_2] fa_npa_3 = [[summary[str(i)]['fa'], summary[str(i)]['npa'], summary[str(i)]['peak_heights']] for i in peaks_3]
bsd-3-clause
blab/antibody-response-pulse
bcell-array/code/Virus_Bcell_IgM_IgG_Landscape.py
1
11385
# coding: utf-8 # # Antibody Response Pulse # https://github.com/blab/antibody-response-pulse # # ### B-cells evolution --- cross-reactive antibody response after influenza virus infection or vaccination # ### Adaptive immune response for repeated infection # In[1]: ''' author: Alvason Zhenhua Li date: 04/09/2015 ''' get_ipython().magic(u'matplotlib inline') import numpy as np import matplotlib.pyplot as plt import os import alva_machinery_event_OAS_new as alva AlvaFontSize = 23 AlvaFigSize = (15, 5) numberingFig = 0 # equation plotting dir_path = '/Users/al/Desktop/GitHub/antibody-response-pulse/bcell-array/figure' file_name = 'Virus-Bcell-IgM-IgG' figure_name = '-equation' file_suffix = '.png' save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix) numberingFig = numberingFig + 1 plt.figure(numberingFig, figsize=(12, 5)) plt.axis('off') plt.title(r'$ Virus-Bcell-IgM-IgG \ equations \ (antibody-response \ for \ repeated-infection) $' , fontsize = AlvaFontSize) plt.text(0, 7.0/9, r'$ \frac{\partial V_n(t)}{\partial t} = +\mu_{v} V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) - \phi_{m} M_{n}(t) V_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) $' , fontsize = 1.2*AlvaFontSize) plt.text(0, 5.0/9, r'$ \frac{\partial B_n(t)}{\partial t} = +\mu_{b}V_{n}(t)(1 - \frac{V_n(t)}{V_{max}}) + (\beta_{m} + \beta_{g}) V_{n}(t) B_{n}(t) - \mu_{b} B_{n}(t) + m_b V_{n}(t)\frac{B_{i-1}(t) - 2B_i(t) + B_{i+1}(t)}{(\Delta i)^2} $' , fontsize = 1.2*AlvaFontSize) plt.text(0, 3.0/9,r'$ \frac{\partial M_n(t)}{\partial t} = +\xi_{m} B_{n}(t) - \phi_{m} M_{n}(t) V_{n}(t) - \mu_{m} M_{n}(t) $' , fontsize = 1.2*AlvaFontSize) plt.text(0, 1.0/9,r'$ \frac{\partial G_n(t)}{\partial t} = +\xi_{g} B_{n}(t) - \phi_{g} G_{n}(t) V_{n}(t) - \mu_{g} G_{n}(t) + m_a V_{n}(t)\frac{G_{i-1}(t) - 2G_i(t) + G_{i+1}(t)}{(\Delta i)^2} $' , fontsize = 1.2*AlvaFontSize) plt.savefig(save_figure, dpi = 100) plt.show() # define the V-M-G partial differential equations def dVdt_array(VBMGxt = [], *args): # naming V = VBMGxt[0] B = VBMGxt[1] M = VBMGxt[2] G = VBMGxt[3] x_totalPoint = VBMGxt.shape[1] # there are n dSdt dV_dt_array = np.zeros(x_totalPoint) # each dSdt with the same equation form dV_dt_array[:] = +inRateV*V[:]*(1 - V[:]/maxV) - killRateVm*M[:]*V[:] - killRateVg*G[:]*V[:] return(dV_dt_array) def dBdt_array(VBMGxt = [], *args): # naming V = VBMGxt[0] B = VBMGxt[1] M = VBMGxt[2] G = VBMGxt[3] x_totalPoint = VBMGxt.shape[1] # there are n dSdt dB_dt_array = np.zeros(x_totalPoint) # each dSdt with the same equation form Bcopy = np.copy(B) centerX = Bcopy[:] leftX = np.roll(Bcopy[:], 1) rightX = np.roll(Bcopy[:], -1) leftX[0] = centerX[0] rightX[-1] = centerX[-1] dB_dt_array[:] = +inRateB*V[:]*(1 - V[:]/maxV) + (actRateBm + alva.event_active + alva.event_OAS_B)*V[:]*B[:] - outRateB*B[:] + mutatRateB*V[:]*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2) return(dB_dt_array) def dMdt_array(VBMGxt = [], *args): # naming V = VBMGxt[0] B = VBMGxt[1] M = VBMGxt[2] G = VBMGxt[3] x_totalPoint = VBMGxt.shape[1] # there are n dSdt dM_dt_array = np.zeros(x_totalPoint) # each dSdt with the same equation form dM_dt_array[:] = +inRateM*B[:] - consumeRateM*M[:]*V[:] - outRateM*M[:] return(dM_dt_array) def dGdt_array(VBMGxt = [], *args): # naming V = VBMGxt[0] B = VBMGxt[1] M = VBMGxt[2] G = VBMGxt[3] x_totalPoint = VBMGxt.shape[1] # there are n dSdt dG_dt_array = np.zeros(x_totalPoint) # each dSdt with the same equation form Gcopy = np.copy(G) centerX = Gcopy[:] leftX = np.roll(Gcopy[:], 1) rightX = np.roll(Gcopy[:], -1) leftX[0] = centerX[0] rightX[-1] = centerX[-1] dG_dt_array[:] = +(inRateG + alva.event_OAS)*B[:] - consumeRateG*G[:]*V[:] - outRateG*G[:] + mutatRateA*(leftX[:] - 2*centerX[:] + rightX[:])/(dx**2) return(dG_dt_array) # In[2]: # setting parameter timeUnit = 'year' if timeUnit == 'hour': hour = float(1) day = float(24) elif timeUnit == 'day': day = float(1) hour = float(1)/24 elif timeUnit == 'year': year = float(1) day = float(1)/365 hour = float(1)/24/365 maxV = float(50) # max virus/micro-liter inRateV = 0.2/hour # in-rate of virus killRateVm = 0.0003/hour # kill-rate of virus by antibody-IgM killRateVg = killRateVm # kill-rate of virus by antibody-IgG inRateB = 0.06/hour # in-rate of B-cell outRateB = inRateB/8 # out-rate of B-cell actRateBm = killRateVm # activation rate of naive B-cell inRateM = 0.16/hour # in-rate of antibody-IgM from naive B-cell outRateM = inRateM/1 # out-rate of antibody-IgM from naive B-cell consumeRateM = killRateVm # consume-rate of antibody-IgM by cleaning virus inRateG = inRateM/10 # in-rate of antibody-IgG from memory B-cell outRateG = outRateM/250 # out-rate of antibody-IgG from memory B-cell consumeRateG = killRateVg # consume-rate of antibody-IgG by cleaning virus mutatRateB = 0.00002/hour # B-cell mutation rate mutatRateA = 0.0002/hour # mutation rate # time boundary and griding condition minT = float(0) maxT = float(10*12*30*day) totalPoint_T = int(6*10**3 + 1) gT = np.linspace(minT, maxT, totalPoint_T) spacingT = np.linspace(minT, maxT, num = totalPoint_T, retstep = True) gT = spacingT[0] dt = spacingT[1] # space boundary and griding condition minX = float(0) maxX = float(9) totalPoint_X = int(maxX - minX + 1) gX = np.linspace(minX, maxX, totalPoint_X) gridingX = np.linspace(minX, maxX, num = totalPoint_X, retstep = True) gX = gridingX[0] dx = gridingX[1] gV_array = np.zeros([totalPoint_X, totalPoint_T]) gB_array = np.zeros([totalPoint_X, totalPoint_T]) gM_array = np.zeros([totalPoint_X, totalPoint_T]) gG_array = np.zeros([totalPoint_X, totalPoint_T]) # initial output condition #gV_array[1, 0] = float(2) #[pre-parameter, post-parameter, recovered-day, OAS+, OSA-, origin_virus] actRateBg_1st = 0.0002/hour # activation rate of memory B-cell at 1st time (pre-) actRateBg_2nd = actRateBg_1st*10 # activation rate of memory B-cell at 2nd time (post-) origin_virus = int(2) current_virus = int(6) event_parameter = np.array([[actRateBg_1st, actRateBg_2nd, 14*day, +5/hour, -actRateBm - actRateBg_1st + (actRateBm + actRateBg_1st)/1.3, origin_virus, current_virus]]) # [viral population, starting time] ---first infection_period = 12*30*day viral_population = np.zeros(int(maxX + 1)) viral_population[origin_virus:current_virus + 1] = 3 infection_starting_time = np.arange(int(maxX + 1))*infection_period event_1st = np.zeros([int(maxX + 1), 2]) event_1st[:, 0] = viral_population event_1st[:, 1] = infection_starting_time print ('event_1st = {:}'.format(event_1st)) # [viral population, starting time] ---2nd] viral_population = np.zeros(int(maxX + 1)) viral_population[origin_virus:current_virus + 1] = 0 infection_starting_time = np.arange(int(maxX + 1))*0 event_2nd = np.zeros([int(maxX + 1), 2]) event_2nd[:, 0] = viral_population event_2nd[:, 1] = infection_starting_time print ('event_2nd = {:}'.format(event_2nd)) event_table = np.array([event_parameter, event_1st, event_2nd]) # Runge Kutta numerical solution pde_array = np.array([dVdt_array, dBdt_array, dMdt_array, dGdt_array]) initial_Out = np.array([gV_array, gB_array, gM_array, gG_array]) gOut_array = alva.AlvaRungeKutta4XT(pde_array, initial_Out, minX, maxX, totalPoint_X, minT, maxT, totalPoint_T, event_table) # plotting gV = gOut_array[0] gB = gOut_array[1] gM = gOut_array[2] gG = gOut_array[3] numberingFig = numberingFig + 1 for i in range(totalPoint_X): figure_name = '-response-%i'%(i) figure_suffix = '.png' save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix) plt.figure(numberingFig, figsize = AlvaFigSize) plt.plot(gT, gV[i], color = 'red', label = r'$ V_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5) plt.plot(gT, gM[i], color = 'blue', label = r'$ IgM_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5) plt.plot(gT, gG[i], color = 'green', label = r'$ IgG_{%i}(t) $'%(i), linewidth = 3.0, alpha = 0.5) plt.plot(gT, gM[i] + gG[i], color = 'gray', linewidth = 5.0, alpha = 0.5, linestyle = 'dashed' , label = r'$ IgM_{%i}(t) + IgG_{%i}(t) $'%(i, i)) plt.grid(True, which = 'both') plt.title(r'$ Antibody \ from \ Virus-{%i} $'%(i), fontsize = AlvaFontSize) plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize) plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize) plt.xlim([minT, maxT]) plt.xticks(fontsize = AlvaFontSize*0.6) plt.yticks(fontsize = AlvaFontSize*0.6) plt.ylim([2**0, 2**12]) plt.yscale('log', basey = 2) plt.legend(loc = (1,0), fontsize = AlvaFontSize) plt.savefig(save_figure, dpi = 100, bbox_inches='tight') plt.show() # In[3]: # Normalization stacked graph numberingFig = numberingFig + 1 plt.figure(numberingFig, figsize = AlvaFigSize) plt.stackplot(gT, gM + gG, alpha = 0.3) plt.title(r'$ Stacked-graph \ of \ Antibody $', fontsize = AlvaFontSize) plt.xlabel(r'$time \ (%s)$'%(timeUnit), fontsize = AlvaFontSize) plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize) plt.xticks(fontsize = AlvaFontSize*0.6) plt.yticks(fontsize = AlvaFontSize*0.6) plt.ylim([2**0, 2**12]) plt.yscale('log', basey = 2) plt.grid(True) plt.show() # In[4]: # expected peak of the antibody response totalColor = current_virus - origin_virus + 1 AlvaColor = [plt.get_cmap('rainbow')(float(i)/(totalColor)) for i in range(1, totalColor + 1)] sample_time = 90*day # plotting figure_name = '-landscape' figure_suffix = '.png' save_figure = os.path.join(dir_path, file_name + figure_name + file_suffix) numberingFig = numberingFig + 1 plt.figure(numberingFig, figsize = (12, 9)) for i in range(origin_virus, current_virus + 1): detect_xn = current_virus + 2 - i if detect_xn == origin_virus: virus_label = '$ origin-virus $' elif detect_xn == current_virus: virus_label = '$ current-virus $' else: virus_label = '$ {:}th-virus $'.format(detect_xn - origin_virus + 1) detect_time = int(totalPoint_T/(maxT - minT)*(detect_xn*infection_period + sample_time)) plt.plot(gX, gM[:, detect_time] + gG[:, detect_time], marker = 'o', markersize = 20 , color = AlvaColor[detect_xn - origin_virus], label = virus_label) plt.fill_between(gX, gM[:, detect_time] + gG[:, detect_time], facecolor = AlvaColor[detect_xn - origin_virus] , alpha = 0.5) plt.grid(True, which = 'both') plt.title(r'$ Antibody \ Landscape $', fontsize = AlvaFontSize) plt.xlabel(r'$ Virus \ space \ (Antigenic-distance) $', fontsize = AlvaFontSize) plt.ylabel(r'$ Neutralization \ \ titer $', fontsize = AlvaFontSize) plt.xlim([minX, maxX]) plt.xticks(fontsize = AlvaFontSize) plt.yticks(fontsize = AlvaFontSize) plt.ylim([2**0, 2**9]) plt.yscale('log', basey = 2) plt.legend(loc = (1,0), fontsize = AlvaFontSize) plt.savefig(save_figure, dpi = 100, bbox_inches='tight') plt.show() # In[ ]:
gpl-2.0
only4hj/fast-rcnn
lib/roi_data_layer/minibatch.py
1
22641
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """Compute minibatch blobs for training a Fast R-CNN network.""" import numpy as np import numpy.random as npr import cv2 from fast_rcnn.config import cfg from utils.blob import prep_im_for_blob, im_list_to_blob from utils.model import last_conv_size from roi_data_layer.roidb import prepare_one_roidb_rpn, prepare_one_roidb_frcnn from roidb import clear_one_roidb def get_minibatch(roidb, num_classes, bbox_means, bbox_stds, proposal_file): """Given a roidb, construct a minibatch sampled from it.""" num_images = len(roidb) # Sample random scales to use for each image in this batch random_scale_inds = npr.randint(0, high=len(cfg.TRAIN.SCALES), size=num_images) assert(cfg.TRAIN.BATCH_SIZE % num_images == 0), \ 'num_images ({}) must divide BATCH_SIZE ({})'. \ format(num_images, cfg.TRAIN.BATCH_SIZE) rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images fg_rois_per_image = np.round(cfg.TRAIN.FG_FRACTION * rois_per_image) # Get the input image blob, formatted for caffe im_blob, im_scales, processed_ims = _get_image_blob(roidb, random_scale_inds) if 'model_to_use' in roidb[0] and roidb[0]['model_to_use'] == 'rpn': conv_h, scale_h = last_conv_size(im_blob.shape[2], cfg.MODEL_NAME) conv_w, scale_w = last_conv_size(im_blob.shape[3], cfg.MODEL_NAME) # Now, build the region of interest and label blobs rois_blob = np.zeros((0, 5), dtype=np.float32) labels_blob = np.zeros((0, 9, conv_h, conv_w), dtype=np.float32) bbox_targets_blob = np.zeros((0, 36, conv_h, conv_w), dtype=np.float32) bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32) all_overlaps = [] for im_i in xrange(num_images): if cfg.TRAIN.LAZY_PREPARING_ROIDB: prepare_one_roidb_rpn(roidb[im_i], processed_ims[im_i].shape[0], processed_ims[im_i].shape[1], im_scales[im_i]) # Normalize bbox_targets if cfg.TRAIN.NORMALIZE_BBOX: bbox_targets = roidb[im_i]['bbox_targets'] cls_inds = np.where(bbox_targets[:, 0] > 0)[0] if cls_inds.size > 0: bbox_targets[cls_inds, 1:] -= bbox_means[0, :] bbox_targets[cls_inds, 1:] /= bbox_stds[0, :] labels, overlaps, im_rois, bbox_targets, bbox_loss \ = _sample_rois_rpn(roidb[im_i], fg_rois_per_image, rois_per_image, num_classes, conv_h, conv_w) # Add to RoIs blob if im_rois != None: batch_ind = im_i * np.ones((im_rois.shape[0], 1)) rois_blob_this_image = np.hstack((batch_ind, im_rois)) rois_blob = np.vstack((rois_blob, rois_blob_this_image)) # Add to labels, bbox targets, and bbox loss blobs labels_blob = np.vstack((labels_blob, labels)) bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets)) bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss)) # For debug visualizations #_vis_minibatch_rpn(im_blob, conv_h, conv_w, rois_blob, labels_blob, roidb, bbox_targets_blob, bbox_loss_blob) blobs = {'data': im_blob, 'labels': labels_blob} else: # Now, build the region of interest and label blobs rois_blob = np.zeros((0, 5), dtype=np.float32) labels_blob = np.zeros((0), dtype=np.float32) bbox_targets_blob = np.zeros((0, 4 * num_classes), dtype=np.float32) bbox_loss_blob = np.zeros(bbox_targets_blob.shape, dtype=np.float32) all_overlaps = [] for im_i in xrange(num_images): if cfg.TRAIN.LAZY_PREPARING_ROIDB: prepare_one_roidb_frcnn(roidb[im_i], proposal_file, num_classes) # Normalize bbox_targets if cfg.TRAIN.NORMALIZE_BBOX: bbox_targets = roidb[im_i]['bbox_targets'] for cls in xrange(1, num_classes): cls_inds = np.where(bbox_targets[:, 0] == cls)[0] bbox_targets[cls_inds, 1:] -= bbox_means[cls, :] bbox_targets[cls_inds, 1:] /= bbox_stds[cls, :] labels, overlaps, im_rois, bbox_targets, bbox_loss \ = _sample_rois(roidb[im_i], fg_rois_per_image, rois_per_image, num_classes) # Add to RoIs blob rois = _project_im_rois(im_rois, im_scales[im_i]) batch_ind = im_i * np.ones((rois.shape[0], 1)) rois_blob_this_image = np.hstack((batch_ind, rois)) rois_blob = np.vstack((rois_blob, rois_blob_this_image)) # Add to labels, bbox targets, and bbox loss blobs labels_blob = np.hstack((labels_blob, labels)) bbox_targets_blob = np.vstack((bbox_targets_blob, bbox_targets)) bbox_loss_blob = np.vstack((bbox_loss_blob, bbox_loss)) #all_overlaps = np.hstack((all_overlaps, overlaps)) # For debug visualizations #_vis_minibatch(im_blob, rois_blob, labels_blob, all_overlaps) blobs = {'data': im_blob, 'rois': rois_blob, 'labels': labels_blob} if cfg.TRAIN.BBOX_REG: blobs['bbox_targets'] = bbox_targets_blob blobs['bbox_loss_weights'] = bbox_loss_blob return blobs def clear_minibatch(roidb): num_images = len(roidb) for im_i in xrange(num_images): clear_one_roidb(roidb[im_i]) def _sample_rois(roidb, fg_rois_per_image, rois_per_image, num_classes): """Generate a random sample of RoIs comprising foreground and background examples. """ # label = class RoI has max overlap with labels = roidb['max_classes'] overlaps = roidb['max_overlaps'] rois = roidb['boxes'] # Select foreground RoIs as those with >= FG_THRESH overlap fg_inds = np.where(overlaps >= cfg.TRAIN.FG_THRESH)[0] # Guard against the case when an image has fewer than fg_rois_per_image # foreground RoIs fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size) # Sample foreground regions without replacement if fg_inds.size > 0: fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False) # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = np.where((overlaps < cfg.TRAIN.BG_THRESH_HI) & (overlaps >= cfg.TRAIN.BG_THRESH_LO))[0] # Compute number of background RoIs to take from this image (guarding # against there being fewer than desired) bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size) # Sample foreground regions without replacement if bg_inds.size > 0: bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False) # The indices that we're selecting (both fg and bg) keep_inds = np.append(fg_inds, bg_inds) # Select sampled values from various arrays: labels = labels[keep_inds] # Clamp labels for the background RoIs to 0 labels[fg_rois_per_this_image:] = 0 overlaps = overlaps[keep_inds] rois = rois[keep_inds] bbox_targets, bbox_loss_weights = \ _get_bbox_regression_labels(roidb['bbox_targets'][keep_inds, :], num_classes) return labels, overlaps, rois, bbox_targets, bbox_loss_weights def get_img_rect(img_height, img_width, conv_height, conv_width, axis1, axis2, axis3): anchors = np.array([[128*2, 128*1], [128*1, 128*1], [128*1, 128*2], [256*2, 256*1], [256*1, 256*1], [256*1, 256*2], [512*2, 512*1], [512*1, 512*1], [512*1, 512*2]]) scale_width = img_width / conv_width scale_height = img_height / conv_height img_center_x = img_width * axis3 / conv_width + scale_width / 2 img_center_y = img_height * axis2 / conv_height + scale_height / 2 anchor_size = anchors[axis1] img_x1 = img_center_x - anchor_size[0] / 2 img_x2 = img_center_x + anchor_size[0] / 2 img_y1 = img_center_y - anchor_size[1] / 2 img_y2 = img_center_y + anchor_size[1] / 2 return [img_x1, img_y1, img_x2, img_y2] def _sample_rois_rpn(roidb, fg_rois_per_image, rois_per_image, num_classes, union_conv_height, union_conv_width): """Generate a random sample of RoIs comprising foreground and background examples. """ # label = class RoI has max overlap with labels = roidb['max_classes'] new_labels = np.zeros(labels.shape, dtype=np.int16) new_labels.fill(-1) bbox_target = roidb['bbox_targets'] new_bbox_target = np.zeros(bbox_target.shape, dtype=np.float32) conv_width = roidb['conv_width'] conv_height = roidb['conv_height'] # Select foreground RoIs as those with >= FG_THRESH overlap fg_inds = np.where(labels > 0)[0] # Guard against the case when an image has fewer than fg_rois_per_image # foreground RoIs fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size) # Sample foreground regions without replacement if fg_inds.size > 0: fg_inds = npr.choice(fg_inds, size=fg_rois_per_this_image, replace=False) # Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI) bg_inds = np.where(labels == 0)[0] # Compute number of background RoIs to take from this image (guarding # against there being fewer than desired) bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size) # Sample foreground regions without replacement if bg_inds.size > 0: bg_inds = npr.choice(bg_inds, size=bg_rois_per_this_image, replace=False) new_labels[fg_inds] = 1 new_labels[bg_inds] = 0 if 'rois' in roidb: rois = roidb['rois'][fg_inds] else: rois = None """ print 'labels.shape %s' % labels.shape print 'bbox_target.shape %s' % (bbox_target.shape, ) for fg_ind in fg_inds: print 'label : %s ' % labels[fg_ind] print 'bbox_target : %s ' % bbox_target[fg_ind] axis1 = fg_ind / conv_height / conv_width axis2 = fg_ind / conv_width % conv_height axis3 = fg_ind % conv_width im = cv2.imread(roidb['image']) target_size = cfg.TRAIN.SCALES[0] im, im_scale = prep_im_for_blob(im, 0, target_size, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MIN_SIZE) img_height = im.shape[2] img_width = im.shape[3] proposal_rects = get_img_rect(img_height, img_width, conv_height, conv_width, axis1, axis2, axis3) for proposal_rect in proposal_rects: plt.imshow(im) for ground_rect in ground_rects: plt.gca().add_patch( plt.Rectangle((ground_rect[0], ground_rect[1]), ground_rect[2] - ground_rect[0], ground_rect[3] - ground_rect[1], fill=False, edgecolor='b', linewidth=3) ) plt.gca().add_patch( plt.Rectangle((proposal_rect[0], proposal_rect[1]), proposal_rect[2] - proposal_rect[0], proposal_rect[3] - proposal_rect[1], fill=False, edgecolor='g', linewidth=3) ) plt.gca().add_patch( plt.Rectangle((pred_rect[0], pred_rect[1]), pred_rect[2] - pred_rect[0], pred_rect[3] - pred_rect[1], fill=False, edgecolor='r', linewidth=3) ) plt.show(block=False) raw_input("") plt.close() """ new_bbox_target[fg_inds] = bbox_target[fg_inds] new_bbox_target, bbox_loss_weights = \ _get_bbox_regression_labels_rpn(new_bbox_target, num_classes, labels) """ print 'label no 1 : %s' % len(np.where(new_labels == 1)[0]) print 'new_bbox_target no 1 : %s' % len(np.where(new_bbox_target != 0)[0]) print 'bbox_loss_weights no 1 : %s' % len(np.where(bbox_loss_weights > 0)[0]) """ new_labels = new_labels.reshape((1, 9, conv_height, conv_width)) new_bbox_target = new_bbox_target.reshape((1, 9, conv_height, conv_width, 4)) new_bbox_target = new_bbox_target.transpose(0, 1, 4, 2, 3) new_bbox_target = new_bbox_target.reshape((1, 36, conv_height, conv_width)) bbox_loss_weights = bbox_loss_weights.reshape((1, 9, conv_height, conv_width, 4)) bbox_loss_weights = bbox_loss_weights.transpose(0, 1, 4, 2, 3) bbox_loss_weights = bbox_loss_weights.reshape((1, 36, conv_height, conv_width)) output_labels = np.zeros((1, 9, union_conv_height, union_conv_width)) output_bbox_targets = np.zeros((1, 36, union_conv_height, union_conv_width)) output_bbox_loss_weights = np.zeros((1, 36, union_conv_height, union_conv_width)) output_labels.fill(-1) output_labels[:, :, 0:conv_height, 0:conv_width] = new_labels output_bbox_targets[:, :, 0:conv_height, 0:conv_width] = new_bbox_target output_bbox_loss_weights[:, :, 0:conv_height, 0:conv_width] = bbox_loss_weights """ for fg_ind in fg_inds: if fg_ind == 6510: axis1 = fg_ind / conv_height / conv_width axis2 = fg_ind / conv_width % conv_height axis3 = fg_ind % conv_width print '' print 'conv_size : %s, %s' % (conv_height, conv_width) print 'axis : %s, %s, %s' % (axis1, axis2, axis3) print 'output_labels[%s] : %s' % (fg_ind, output_labels[0, axis1, axis2, axis3]) print 'output_bbox_targets[%s] : %s' % (fg_ind, output_bbox_targets[0, axis1*4:axis1*4+4, axis2, axis3]) print 'output_bbox_loss_weights[%s] : %s' % (fg_ind, output_bbox_loss_weights[0, axis1*4:axis1*4+4, axis2, axis3]) """ """ # Generate positive rois based on index for debugging anchors = [[128*2, 128*1], [128*1, 128*1], [128*1, 128*2], [256*2, 256*1], [256*1, 256*1], [256*1, 256*2], [512*2, 512*1], [512*1, 512*1], [512*1, 512*2]] conv_scale_width = roidb['conv_scale_width'] conv_scale_height = roidb['conv_scale_height'] rois = np.zeros((len(fg_inds), 4), dtype=np.int16) for i, fg_ind in enumerate(fg_inds): center_x = fg_ind % conv_width center_y = (fg_ind - center_x) / conv_width % conv_height anchor = fg_ind / conv_height / conv_width anchor_w = anchors[anchor][0] anchor_h = anchors[anchor][1] x1 = center_x * conv_scale_width - anchor_w / 2 y1 = center_y * conv_scale_height - anchor_h / 2 x2 = x1 + anchor_w y2 = y1 + anchor_h rois[i, :] = x1, y1, x2, y2 """ """ pos_labels = np.where(new_labels == 1) i = 0 for d0, d1, d2, d3 in zip(pos_labels[0], pos_labels[1], pos_labels[2], pos_labels[3]): print '[%s] label : %s, bbox_target : %s, bbox_loss_weights : %s' % (i, new_labels[d0, d1, d2, d3], new_bbox_target[d0, d1*4 : d1*4+4, d2, d3], bbox_loss_weights[d0, d1*4 : d1*4+4, d2, d3]) i += 1 """ """ print 'label no 2 : %s' % len(np.where(output_labels == 1)[0]) print 'new_bbox_target no 2 : %s' % len(np.where(output_bbox_targets != 0)[0]) print 'bbox_loss_weights no 2 : %s' % len(np.where(output_bbox_loss_weights > 0)[0]) """ return output_labels, None, rois, output_bbox_targets, output_bbox_loss_weights def _get_image_blob(roidb, scale_inds): """Builds an input blob from the images in the roidb at the specified scales. """ num_images = len(roidb) processed_ims = [] im_scales = [] for i in xrange(num_images): im = cv2.imread(roidb[i]['image']) if roidb[i]['flipped']: im = im[:, ::-1, :] target_size = cfg.TRAIN.SCALES[scale_inds[i]] im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size, cfg.TRAIN.MAX_SIZE, cfg.TRAIN.MIN_SIZE) im_scales.append(im_scale) processed_ims.append(im) # Create a blob to hold the input images blob = im_list_to_blob(processed_ims) return blob, im_scales, processed_ims def _project_im_rois(im_rois, im_scale_factor): """Project image RoIs into the rescaled training image.""" rois = im_rois * im_scale_factor return rois def _get_bbox_regression_labels(bbox_target_data, num_classes): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: bbox_target_data (ndarray): N x 4K blob of regression targets bbox_loss_weights (ndarray): N x 4K blob of loss weights """ clss = bbox_target_data[:, 0] bbox_targets = np.zeros((clss.size, 4 * num_classes), dtype=np.float32) bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32) inds = np.where(clss > 0)[0] for ind in inds: cls = clss[ind] start = 4 * cls end = start + 4 bbox_targets[ind, start:end] = bbox_target_data[ind, 1:] bbox_loss_weights[ind, start:end] = [1., 1., 1., 1.] return bbox_targets, bbox_loss_weights def _get_bbox_regression_labels_rpn(bbox_target_data, num_classes, labels): """Bounding-box regression targets are stored in a compact form in the roidb. This function expands those targets into the 4-of-4*K representation used by the network (i.e. only one class has non-zero targets). The loss weights are similarly expanded. Returns: bbox_target_data (ndarray): N x 4K blob of regression targets bbox_loss_weights (ndarray): N x 4K blob of loss weights """ clss = bbox_target_data[:, 0] bbox_targets = np.zeros((clss.size, 4), dtype=np.float32) bbox_loss_weights = np.zeros(bbox_targets.shape, dtype=np.float32) inds = np.where(clss > 0)[0] #print '' #print 'len(inds) : %s' % len(inds) for ind in inds: bbox_targets[ind, :] = bbox_target_data[ind, 1:] bbox_loss_weights[ind, :] = [1., 1., 1., 1.] #print 'bbox_targets[ind, :] : %s - %s ' % (bbox_target_data[ind, 0], bbox_targets[ind, :]) return bbox_targets, bbox_loss_weights def _vis_minibatch(im_blob, rois_blob, labels_blob, overlaps): """Visualize a mini-batch for debugging.""" import matplotlib.pyplot as plt for i in xrange(rois_blob.shape[0]): rois = rois_blob[i, :] im_ind = rois[0] roi = rois[1:] im = im_blob[im_ind, :, :, :].transpose((1, 2, 0)).copy() im += cfg.PIXEL_MEANS im = im[:, :, (2, 1, 0)] im = im.astype(np.uint8) cls = labels_blob[i] plt.imshow(im) print 'class: ', cls, ' overlap: ', overlaps[i] plt.gca().add_patch( plt.Rectangle((roi[0], roi[1]), roi[2] - roi[0], roi[3] - roi[1], fill=False, edgecolor='r', linewidth=3) ) plt.show() def _vis_minibatch_rpn(im_blob, conv_h, conv_w, rois_blob, labels_blob, roidb, bbox_targets_blob, bbox_loss_blob): """Visualize a mini-batch for debugging.""" import matplotlib.pyplot as plt for i in xrange(len(roidb)): # DJDJ #if roidb[i]['image'].endswith('000009.jpg') == False: # continue print 'image : %s' % roidb[i]['image'] resized_gt_boxes = roidb[int(i)]['resized_gt_boxes'] im = im_blob[i, :, :, :].transpose((1, 2, 0)).copy() im += cfg.PIXEL_MEANS im = im[:, :, (2, 1, 0)] im = im.astype(np.uint8) for j in range(9): for k in range(labels_blob.shape[2]): for l in range(labels_blob.shape[3]): label = labels_blob[i][j][k][l] if label == -1: continue elif label == 1: color = 'g' elif label == 0: #color = 'y' continue plt.imshow(im) for resized_gt_box in resized_gt_boxes: resized_gt_box = resized_gt_box.astype(np.int) plt.gca().add_patch( plt.Rectangle((resized_gt_box[0], resized_gt_box[1]), resized_gt_box[2] - resized_gt_box[0], resized_gt_box[3] - resized_gt_box[1], fill=False, edgecolor='b', linewidth=3) ) proposal_rects = get_img_rect(im.shape[0], im.shape[1], conv_h, conv_w, j, k, l) plt.gca().add_patch( plt.Rectangle((proposal_rects[0], proposal_rects[1]), proposal_rects[2] - proposal_rects[0], proposal_rects[3] - proposal_rects[1], fill=False, edgecolor=color, linewidth=3) ) plt.show(block=False) raw_input("") plt.close()
mit
rodluger/everest
docs/mcmc.py
1
2721
"""MCMC example for transit fitting.""" import matplotlib.pyplot as pl from everest import Everest, TransitModel import numpy as np import emcee from tqdm import tqdm from corner import corner def lnprior(x): """Return the log prior given parameter vector `x`.""" per, t0, b = x if b < -1 or b > 1: return -np.inf elif per < 7 or per > 10: return -np.inf elif t0 < 1978 or t0 > 1979: return -np.inf else: return 0. def lnlike(x, star): """Return the log likelihood given parameter vector `x`.""" ll = lnprior(x) if np.isinf(ll): return ll, (np.nan, np.nan) per, t0, b = x model = TransitModel('b', per=per, t0=t0, b=b, rhos=10.)(star.time) like, d, vard = star.lnlike(model, full_output=True) ll += like return ll, (d,) # Initialize the everest model star = Everest(201635569) # Set up the MCMC sampler params = ['Period (days)', r't$_0$ (BJD - 2456811)', 'Impact parameter'] blobs = ['Depth (%)'] nsteps = 1000 nburn = 300 nwalk = 10 ndim = len(params) nblobs = len(blobs) sampler = emcee.EnsembleSampler(nwalk, ndim, lnlike, args=[star]) x0 = [[8.368 + 0.01 * np.random.randn(), 1978.4513 + 0.01 * np.random.randn(), 0. + 0.1 * np.random.randn()] for k in range(nwalk)] blobs0 = [[0.] for k in range(nwalk)] # Run! for i in tqdm(sampler.sample(x0, iterations=nsteps, blobs0=blobs0), total=nsteps): pass # Add the blobs to the chain for plotting chain = np.concatenate((sampler.chain, np.array(sampler.blobs).swapaxes(0, 1)), axis=2) # Re-scale the transit time for prettier axes labels chain[:, :, 1] -= 1978. # Take the absolute value of the impact parameter for plotting chain[:, :, 2] = np.abs(chain[:, :, 2]) # Re-scale the transit depth as a percentage chain[:, :, 3] *= 100. # Plot the chains fig1, ax = pl.subplots(ndim + nblobs, figsize=(6, 7)) fig1.suptitle("K2-14b", fontsize=16, fontweight='bold') ax[-1].set_xlabel("Iteration", fontsize=14) for n in range(ndim + nblobs): for k in range(nwalk): ax[n].plot(chain[k, :, n], alpha=0.3, lw=1) ax[n].set_ylabel((params + blobs)[n], fontsize=9) ax[n].margins(0, None) ax[n].axvline(nburn, color='b', alpha=0.5, lw=1, ls='--') fig1.savefig("k2-14b_chains.png", bbox_inches='tight') # Plot the posterior distributions samples = chain[:, nburn:, :].reshape(-1, ndim + nblobs) fig2 = corner(samples, labels=params + blobs) fig2.suptitle("K2-14b", fontsize=16, fontweight='bold') fig2.set_size_inches(6, 6) for ax in fig2.axes: for tick in ax.get_xticklabels() + ax.get_yticklabels(): tick.set_fontsize(7) fig2.savefig("k2-14b_corner.png", bbox_inches='tight')
mit
cxcsds/ciao-contrib
crates_contrib/images.py
1
4630
# # Copyright (C) 2012, 2015, 2016, 2019 # Smithsonian Astrophysical Observatory # # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # """ Image-specific Crates routines. At present there is only one routine - imextent. """ from pytransform import LINEAR2DTransform __all__ = ('imextent', ) def imextent(img, xmin, xmax, ymin, ymax, limits='center'): """Create a linear transform for the image axes. Returns a 2D linear transform object that represents the mapping from "pixel" units (e.g. logical values) to a linearly scaled system (offset and scale change, no rotation). One use of this is to mimic the extent argument from matplotlib's imshow command, as discussed in the examples below. Parameters ---------- img : 2D NumPy array xmin, xmax, ymin, ymax : float The coordinates of the lower-left and upper-right corners of the image in the transformed (non-logical) system. limits : {'center', 'edge'} Do the coordinates (xmin, ..., ymax) refer to the center of the pixels, or their edges. In FITS convention, the bottom-left pixel is centered on 1,1 and the top-right pixel is nx,ny (for a nx by ny grid). With limits='center' xmin,xmax refers to the center of the lower-left pixel (i.e. 1,1 in FITS terminology) whereas with limits='edge' it refers to the bottom-left corner (0.5,0.5 in FITS). Returns ------- tr : pytransform.LINEAR2DTransform The transform object containing the coordinate mapping. Notes ----- The logical coordinate system follows the FITS standard, so the first pixel is (1,1) and not (0,0), and the X axis values are given first. Examples -------- The following example creates a 40 pixel wide by 20 pixel high image, zi, where the X axis goes from 40 to 60 and the Y axis 10 to 20. The imextent call creates a transform object. >>> yi, xi = np.mgrid[10:20:20j, 40:60:40j] >>> zi = 100.0 / np.sqrt((xi - 45.62) ** 2 + (yi - 14.7) ** 2) >>> tr = imextent(zi, 40, 60, 10, 20) The transform object can be used to convert between logical coordinates (where 1,1 refers to the center of the lower-left pixel) and the data coordinates: >>> print(tr.apply([[1,1], [40,20]])) [[40 10] [60 20]] and the invert method goes from data to logical coordinates: >>> print(tr.invert([[45.0, 15.0]])) [[ 10.75 10.5 ]] The following examples use a 4 pixel by 3 pixel image: >>> img = np.arange(0, 12).reshape(3, 4) The default value for the limits argument is 'center', which means that the given coordinates - in this case 10,-10 and 13,-6 - refer to the center of the bottom-left and top-right pixels: >>> tr_cen = imextent(img, 10, 13, -10, -6, limits='center') The alternative is limits='edge', where 10,-10 refers to the bottom-left corner of the image and 13,-6 refers to the top-right corner: >>> tr_edge = imextent(img, 10, 13, -10, -6, limits='edge') >>> print(tr_cen.apply([[1.0, 1.0]])) [[ 10. -10.]] >>> print(tr_edge.apply([[1.0, 1.0]])) [[ 10.375 -9.33333333]] """ try: (ny, nx) = img.shape except AttributeError: raise ValueError("First argument has no shape attribute.") dx = (xmax - xmin) * 1.0 dy = (ymax - ymin) * 1.0 if limits == 'center': dx /= (nx - 1.0) dy /= (ny - 1.0) x0 = xmin - dx y0 = ymin - dy elif limits == 'edge': dx /= nx dy /= ny x0 = xmin - dx / 2.0 y0 = ymin - dy / 2.0 else: raise ValueError("limits must be 'center' or 'edge', not '{}'".format(limits)) tr = LINEAR2DTransform() tr.get_parameter('ROTATION').set_value(0.0) tr.get_parameter('SCALE').set_value([dx, dy]) tr.get_parameter('OFFSET').set_value([x0, y0]) return tr
gpl-3.0
rasbt/python-machine-learning-book
code/optional-py-scripts/ch05.py
1
19830
# Sebastian Raschka, 2015 (http://sebastianraschka.com) # Python Machine Learning - Code Examples # # Chapter 5 - Compressing Data via Dimensionality Reduction # # S. Raschka. Python Machine Learning. Packt Publishing Ltd., 2015. # GitHub Repo: https://github.com/rasbt/python-machine-learning-book # # License: MIT # https://github.com/rasbt/python-machine-learning-book/blob/master/LICENSE.txt import pandas as pd import numpy as np from sklearn.preprocessing import StandardScaler from sklearn.decomposition import PCA import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn.linear_model import LogisticRegression from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA from sklearn.datasets import make_moons from sklearn.datasets import make_circles from sklearn.decomposition import KernelPCA from scipy.spatial.distance import pdist, squareform from scipy import exp from scipy.linalg import eigh from matplotlib.ticker import FormatStrFormatter # for sklearn 0.18's alternative syntax from distutils.version import LooseVersion as Version from sklearn import __version__ as sklearn_version if Version(sklearn_version) < '0.18': from sklearn.grid_search import train_test_split from sklearn.lda import LDA else: from sklearn.model_selection import train_test_split from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA ############################################################################# print(50 * '=') print('Section: Unsupervised dimensionality reduction' ' via principal component analysis') print(50 * '-') df_wine = pd.read_csv('https://archive.ics.uci.edu/ml/' 'machine-learning-databases/wine/wine.data', header=None) df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD280/OD315 of diluted wines', 'Proline'] print('Wine data excerpt:\n\n:', df_wine.head()) X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values X_train, X_test, y_train, y_test = \ train_test_split(X, y, test_size=0.3, random_state=0) sc = StandardScaler() X_train_std = sc.fit_transform(X_train) X_test_std = sc.transform(X_test) cov_mat = np.cov(X_train_std.T) eigen_vals, eigen_vecs = np.linalg.eig(cov_mat) print('\nEigenvalues \n%s' % eigen_vals) ############################################################################# print(50 * '=') print('Section: Total and explained variance') print(50 * '-') tot = sum(eigen_vals) var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)] cum_var_exp = np.cumsum(var_exp) plt.bar(range(1, 14), var_exp, alpha=0.5, align='center', label='individual explained variance') plt.step(range(1, 14), cum_var_exp, where='mid', label='cumulative explained variance') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.legend(loc='best') # plt.tight_layout() # plt.savefig('./figures/pca1.png', dpi=300) plt.show() ############################################################################# print(50 * '=') print('Section: Feature Transformation') print(50 * '-') # Make a list of (eigenvalue, eigenvector) tuples eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))] # Sort the (eigenvalue, eigenvector) tuples from high to low eigen_pairs.sort(reverse=True) w = np.hstack((eigen_pairs[0][1][:, np.newaxis], eigen_pairs[1][1][:, np.newaxis])) print('Matrix W:\n', w) X_train_pca = X_train_std.dot(w) colors = ['r', 'b', 'g'] markers = ['s', 'x', 'o'] for l, c, m in zip(np.unique(y_train), colors, markers): plt.scatter(X_train_pca[y_train == l, 0], X_train_pca[y_train == l, 1], c=c, label=l, marker=m) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.legend(loc='lower left') # plt.tight_layout() # plt.savefig('./figures/pca2.png', dpi=300) plt.show() print('Dot product:\n', X_train_std[0].dot(w)) ############################################################################# print(50 * '=') print('Section: Principal component analysis in scikit-learn') print(50 * '-') pca = PCA() X_train_pca = pca.fit_transform(X_train_std) print('Variance explained ratio:\n', pca.explained_variance_ratio_) plt.bar(range(1, 14), pca.explained_variance_ratio_, alpha=0.5, align='center') plt.step(range(1, 14), np.cumsum(pca.explained_variance_ratio_), where='mid') plt.ylabel('Explained variance ratio') plt.xlabel('Principal components') plt.show() pca = PCA(n_components=2) X_train_pca = pca.fit_transform(X_train_std) X_test_pca = pca.transform(X_test_std) plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1]) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.show() def plot_decision_regions(X, y, classifier, resolution=0.02): # setup marker generator and color map markers = ('s', 'x', 'o', '^', 'v') colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan') cmap = ListedColormap(colors[:len(np.unique(y))]) # plot the decision surface x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution), np.arange(x2_min, x2_max, resolution)) Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T) Z = Z.reshape(xx1.shape) plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap) plt.xlim(xx1.min(), xx1.max()) plt.ylim(xx2.min(), xx2.max()) # plot class samples for idx, cl in enumerate(np.unique(y)): plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.8, c=cmap(idx), marker=markers[idx], label=cl) lr = LogisticRegression() lr = lr.fit(X_train_pca, y_train) plot_decision_regions(X_train_pca, y_train, classifier=lr) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.legend(loc='lower left') # plt.tight_layout() # plt.savefig('./figures/pca3.png', dpi=300) plt.show() plot_decision_regions(X_test_pca, y_test, classifier=lr) plt.xlabel('PC 1') plt.ylabel('PC 2') plt.legend(loc='lower left') # plt.tight_layout() # plt.savefig('./figures/pca4.png', dpi=300) plt.show() pca = PCA(n_components=None) X_train_pca = pca.fit_transform(X_train_std) print('Explaind variance ratio:\n', pca.explained_variance_ratio_) ############################################################################# print(50 * '=') print('Section: Supervised data compression via linear discriminant analysis' ' - Computing the scatter matrices') print(50 * '-') np.set_printoptions(precision=4) mean_vecs = [] for label in range(1, 4): mean_vecs.append(np.mean(X_train_std[y_train == label], axis=0)) print('MV %s: %s\n' % (label, mean_vecs[label - 1])) d = 13 # number of features S_W = np.zeros((d, d)) for label, mv in zip(range(1, 4), mean_vecs): class_scatter = np.zeros((d, d)) # scatter matrix for each class for row in X_train_std[y_train == label]: row, mv = row.reshape(d, 1), mv.reshape(d, 1) # make column vectors class_scatter += (row - mv).dot((row - mv).T) S_W += class_scatter # sum class scatter matrices print('Within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1])) print('Class label distribution: %s' % np.bincount(y_train)[1:]) d = 13 # number of features S_W = np.zeros((d, d)) for label, mv in zip(range(1, 4), mean_vecs): class_scatter = np.cov(X_train_std[y_train == label].T) S_W += class_scatter print('Scaled within-class scatter matrix: %sx%s' % (S_W.shape[0], S_W.shape[1])) mean_overall = np.mean(X_train_std, axis=0) d = 13 # number of features S_B = np.zeros((d, d)) for i, mean_vec in enumerate(mean_vecs): n = X_train[y_train == i + 1, :].shape[0] mean_vec = mean_vec.reshape(d, 1) # make column vector mean_overall = mean_overall.reshape(d, 1) # make column vector S_B += n * (mean_vec - mean_overall).dot((mean_vec - mean_overall).T) print('Between-class scatter matrix: %sx%s' % (S_B.shape[0], S_B.shape[1])) ############################################################################# print(50 * '=') print('Section: Selecting linear discriminants for the new feature subspace') print(50 * '-') eigen_vals, eigen_vecs = np.linalg.eig(np.linalg.inv(S_W).dot(S_B)) # Make a list of (eigenvalue, eigenvector) tuples eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i]) for i in range(len(eigen_vals))] # Sort the (eigenvalue, eigenvector) tuples from high to low eigen_pairs = sorted(eigen_pairs, key=lambda k: k[0], reverse=True) # Visually confirm that the list is correctly sorted by decreasing eigenvalues print('Eigenvalues in decreasing order:\n') for eigen_val in eigen_pairs: print(eigen_val[0]) tot = sum(eigen_vals.real) discr = [(i / tot) for i in sorted(eigen_vals.real, reverse=True)] cum_discr = np.cumsum(discr) plt.bar(range(1, 14), discr, alpha=0.5, align='center', label='individual "discriminability"') plt.step(range(1, 14), cum_discr, where='mid', label='cumulative "discriminability"') plt.ylabel('"discriminability" ratio') plt.xlabel('Linear Discriminants') plt.ylim([-0.1, 1.1]) plt.legend(loc='best') # plt.tight_layout() # plt.savefig('./figures/lda1.png', dpi=300) plt.show() w = np.hstack((eigen_pairs[0][1][:, np.newaxis].real, eigen_pairs[1][1][:, np.newaxis].real)) print('Matrix W:\n', w) ############################################################################# print(50 * '=') print('Section: Projecting samples onto the new feature space') print(50 * '-') X_train_lda = X_train_std.dot(w) colors = ['r', 'b', 'g'] markers = ['s', 'x', 'o'] for l, c, m in zip(np.unique(y_train), colors, markers): plt.scatter(X_train_lda[y_train == l, 0] * (-1), X_train_lda[y_train == l, 1] * (-1), c=c, label=l, marker=m) plt.xlabel('LD 1') plt.ylabel('LD 2') plt.legend(loc='lower right') # plt.tight_layout() # plt.savefig('./figures/lda2.png', dpi=300) plt.show() ############################################################################# print(50 * '=') print('Section: LDA via scikit-learn') print(50 * '-') lda = LDA(n_components=2) X_train_lda = lda.fit_transform(X_train_std, y_train) lr = LogisticRegression() lr = lr.fit(X_train_lda, y_train) plot_decision_regions(X_train_lda, y_train, classifier=lr) plt.xlabel('LD 1') plt.ylabel('LD 2') plt.legend(loc='lower left') # plt.tight_layout() # plt.savefig('./images/lda3.png', dpi=300) plt.show() X_test_lda = lda.transform(X_test_std) plot_decision_regions(X_test_lda, y_test, classifier=lr) plt.xlabel('LD 1') plt.ylabel('LD 2') plt.legend(loc='lower left') # plt.tight_layout() # plt.savefig('./images/lda4.png', dpi=300) plt.show() ############################################################################# print(50 * '=') print('Section: Implementing a kernel principal component analysis in Python') print(50 * '-') def rbf_kernel_pca(X, gamma, n_components): """ RBF kernel PCA implementation. Parameters ------------ X: {NumPy ndarray}, shape = [n_samples, n_features] gamma: float Tuning parameter of the RBF kernel n_components: int Number of principal components to return Returns ------------ X_pc: {NumPy ndarray}, shape = [n_samples, k_features] Projected dataset """ # Calculate pairwise squared Euclidean distances # in the MxN dimensional dataset. sq_dists = pdist(X, 'sqeuclidean') # Convert pairwise distances into a square matrix. mat_sq_dists = squareform(sq_dists) # Compute the symmetric kernel matrix. K = exp(-gamma * mat_sq_dists) # Center the kernel matrix. N = K.shape[0] one_n = np.ones((N, N)) / N K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n) # Obtaining eigenpairs from the centered kernel matrix # numpy.eigh returns them in sorted order eigvals, eigvecs = eigh(K) # Collect the top k eigenvectors (projected samples) X_pc = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1))) return X_pc ############################################################################# print(50 * '=') print('Section: Example 1: Separating half-moon shapes') print(50 * '-') X, y = make_moons(n_samples=100, random_state=123) plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5) plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5) # plt.tight_layout() # plt.savefig('./figures/half_moon_1.png', dpi=300) plt.show() scikit_pca = PCA(n_components=2) X_spca = scikit_pca.fit_transform(X) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3)) ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_spca[y == 0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_spca[y == 1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') # plt.tight_layout() # plt.savefig('./figures/half_moon_2.png', dpi=300) plt.show() X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3)) ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_kpca[y == 0, 0], np.zeros((50, 1)) + 0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_kpca[y == 1, 0], np.zeros((50, 1)) - 0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') ax[0].xaxis.set_major_formatter(FormatStrFormatter('%0.1f')) ax[1].xaxis.set_major_formatter(FormatStrFormatter('%0.1f')) # plt.tight_layout() # plt.savefig('./figures/half_moon_3.png', dpi=300) plt.show() ############################################################################# print(50 * '=') print('Section: Example 2: Separating concentric circles') print(50 * '-') X, y = make_circles(n_samples=1000, random_state=123, noise=0.1, factor=0.2) plt.scatter(X[y == 0, 0], X[y == 0, 1], color='red', marker='^', alpha=0.5) plt.scatter(X[y == 1, 0], X[y == 1, 1], color='blue', marker='o', alpha=0.5) # plt.tight_layout() # plt.savefig('./figures/circles_1.png', dpi=300) plt.show() scikit_pca = PCA(n_components=2) X_spca = scikit_pca.fit_transform(X) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3)) ax[0].scatter(X_spca[y == 0, 0], X_spca[y == 0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_spca[y == 1, 0], X_spca[y == 1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_spca[y == 0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_spca[y == 1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') # plt.tight_layout() # plt.savefig('./figures/circles_2.png', dpi=300) plt.show() X_kpca = rbf_kernel_pca(X, gamma=15, n_components=2) fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(7, 3)) ax[0].scatter(X_kpca[y == 0, 0], X_kpca[y == 0, 1], color='red', marker='^', alpha=0.5) ax[0].scatter(X_kpca[y == 1, 0], X_kpca[y == 1, 1], color='blue', marker='o', alpha=0.5) ax[1].scatter(X_kpca[y == 0, 0], np.zeros((500, 1)) + 0.02, color='red', marker='^', alpha=0.5) ax[1].scatter(X_kpca[y == 1, 0], np.zeros((500, 1)) - 0.02, color='blue', marker='o', alpha=0.5) ax[0].set_xlabel('PC1') ax[0].set_ylabel('PC2') ax[1].set_ylim([-1, 1]) ax[1].set_yticks([]) ax[1].set_xlabel('PC1') # plt.tight_layout() # plt.savefig('./figures/circles_3.png', dpi=300) plt.show() ############################################################################# print(50 * '=') print('Section: Projecting new data points') print(50 * '-') def rbf_kernel_pca(X, gamma, n_components): """ RBF kernel PCA implementation. Parameters ------------ X: {NumPy ndarray}, shape = [n_samples, n_features] gamma: float Tuning parameter of the RBF kernel n_components: int Number of principal components to return Returns ------------ X_pc: {NumPy ndarray}, shape = [n_samples, k_features] Projected dataset lambdas: list Eigenvalues """ # Calculate pairwise squared Euclidean distances # in the MxN dimensional dataset. sq_dists = pdist(X, 'sqeuclidean') # Convert pairwise distances into a square matrix. mat_sq_dists = squareform(sq_dists) # Compute the symmetric kernel matrix. K = exp(-gamma * mat_sq_dists) # Center the kernel matrix. N = K.shape[0] one_n = np.ones((N, N)) / N K = K - one_n.dot(K) - K.dot(one_n) + one_n.dot(K).dot(one_n) # Obtaining eigenpairs from the centered kernel matrix # numpy.eigh returns them in sorted order eigvals, eigvecs = eigh(K) # Collect the top k eigenvectors (projected samples) alphas = np.column_stack((eigvecs[:, -i] for i in range(1, n_components + 1))) # Collect the corresponding eigenvalues lambdas = [eigvals[-i] for i in range(1, n_components + 1)] return alphas, lambdas X, y = make_moons(n_samples=100, random_state=123) alphas, lambdas = rbf_kernel_pca(X, gamma=15, n_components=1) x_new = X[25] print('New data point x_new:', x_new) x_proj = alphas[25] # original projection print('Original projection x_proj:', x_proj) def project_x(x_new, X, gamma, alphas, lambdas): pair_dist = np.array([np.sum((x_new - row)**2) for row in X]) k = np.exp(-gamma * pair_dist) return k.dot(alphas / lambdas) # projection of the "new" datapoint x_reproj = project_x(x_new, X, gamma=15, alphas=alphas, lambdas=lambdas) print('Reprojection x_reproj:', x_reproj) plt.scatter(alphas[y == 0, 0], np.zeros((50)), color='red', marker='^', alpha=0.5) plt.scatter(alphas[y == 1, 0], np.zeros((50)), color='blue', marker='o', alpha=0.5) plt.scatter(x_proj, 0, color='black', label='original projection of point X[25]', marker='^', s=100) plt.scatter(x_reproj, 0, color='green', label='remapped point X[25]', marker='x', s=500) plt.legend(scatterpoints=1) # plt.tight_layout() # plt.savefig('./figures/reproject.png', dpi=300) plt.show() ############################################################################# print(50 * '=') print('Section: Kernel principal component analysis in scikit-learn') print(50 * '-') X, y = make_moons(n_samples=100, random_state=123) scikit_kpca = KernelPCA(n_components=2, kernel='rbf', gamma=15) X_skernpca = scikit_kpca.fit_transform(X) plt.scatter(X_skernpca[y == 0, 0], X_skernpca[y == 0, 1], color='red', marker='^', alpha=0.5) plt.scatter(X_skernpca[y == 1, 0], X_skernpca[y == 1, 1], color='blue', marker='o', alpha=0.5) plt.xlabel('PC1') plt.ylabel('PC2') # plt.tight_layout() # plt.savefig('./figures/scikit_kpca.png', dpi=300) plt.show()
mit
LewBurton/sklearn_pycon2015
notebooks/fig_code/sgd_separator.py
54
1148
import numpy as np import matplotlib.pyplot as plt from sklearn.linear_model import SGDClassifier from sklearn.datasets.samples_generator import make_blobs def plot_sgd_separator(): # we create 50 separable points X, Y = make_blobs(n_samples=50, centers=2, random_state=0, cluster_std=0.60) # fit the model clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True) clf.fit(X, Y) # plot the line, the points, and the nearest vectors to the plane xx = np.linspace(-1, 5, 10) yy = np.linspace(-1, 5, 10) X1, X2 = np.meshgrid(xx, yy) Z = np.empty(X1.shape) for (i, j), val in np.ndenumerate(X1): x1 = val x2 = X2[i, j] p = clf.decision_function([x1, x2]) Z[i, j] = p[0] levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' ax = plt.axes() ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles) ax.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired) ax.axis('tight') if __name__ == '__main__': plot_sgd_separator() plt.show()
bsd-3-clause
sodafree/backend
build/ipython/IPython/frontend/terminal/console/app.py
3
5217
""" A minimal application using the ZMQ-based terminal IPython frontend. This is not a complete console app, as subprocess will not be able to receive input, there is no real readline support, among other limitations. Authors: * Min RK * Paul Ivanov """ #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import signal import sys import time from IPython.frontend.terminal.ipapp import TerminalIPythonApp, frontend_flags as term_flags from IPython.utils.traitlets import ( Dict, List, Unicode, Int, CaselessStrEnum, CBool, Any ) from IPython.utils.warn import warn,error from IPython.zmq.ipkernel import IPKernelApp from IPython.zmq.session import Session, default_secure from IPython.zmq.zmqshell import ZMQInteractiveShell from IPython.frontend.consoleapp import ( IPythonConsoleApp, app_aliases, app_flags, aliases, app_aliases, flags ) from IPython.frontend.terminal.console.interactiveshell import ZMQTerminalInteractiveShell #----------------------------------------------------------------------------- # Globals #----------------------------------------------------------------------------- _examples = """ ipython console # start the ZMQ-based console ipython console --existing # connect to an existing ipython session """ #----------------------------------------------------------------------------- # Flags and Aliases #----------------------------------------------------------------------------- # copy flags from mixin: flags = dict(flags) # start with mixin frontend flags: frontend_flags = dict(app_flags) # add TerminalIPApp flags: frontend_flags.update(term_flags) # disable quick startup, as it won't propagate to the kernel anyway frontend_flags.pop('quick') # update full dict with frontend flags: flags.update(frontend_flags) # copy flags from mixin aliases = dict(aliases) # start with mixin frontend flags frontend_aliases = dict(app_aliases) # load updated frontend flags into full dict aliases.update(frontend_aliases) # get flags&aliases into sets, and remove a couple that # shouldn't be scrubbed from backend flags: frontend_aliases = set(frontend_aliases.keys()) frontend_flags = set(frontend_flags.keys()) #----------------------------------------------------------------------------- # Classes #----------------------------------------------------------------------------- class ZMQTerminalIPythonApp(TerminalIPythonApp, IPythonConsoleApp): name = "ipython-console" """Start a terminal frontend to the IPython zmq kernel.""" description = """ The IPython terminal-based Console. This launches a Console application inside a terminal. The Console supports various extra features beyond the traditional single-process Terminal IPython shell, such as connecting to an existing ipython session, via: ipython console --existing where the previous session could have been created by another ipython console, an ipython qtconsole, or by opening an ipython notebook. """ examples = _examples classes = [ZMQTerminalInteractiveShell] + IPythonConsoleApp.classes flags = Dict(flags) aliases = Dict(aliases) frontend_aliases = Any(frontend_aliases) frontend_flags = Any(frontend_flags) subcommands = Dict() def parse_command_line(self, argv=None): super(ZMQTerminalIPythonApp, self).parse_command_line(argv) self.build_kernel_argv(argv) def init_shell(self): IPythonConsoleApp.initialize(self) # relay sigint to kernel signal.signal(signal.SIGINT, self.handle_sigint) self.shell = ZMQTerminalInteractiveShell.instance(config=self.config, display_banner=False, profile_dir=self.profile_dir, ipython_dir=self.ipython_dir, kernel_manager=self.kernel_manager) def init_gui_pylab(self): # no-op, because we don't want to import matplotlib in the frontend. pass def handle_sigint(self, *args): if self.shell._executing: if self.kernel_manager.has_kernel: # interrupt already gets passed to subprocess by signal handler. # Only if we prevent that should we need to explicitly call # interrupt_kernel, until which time, this would result in a # double-interrupt: # self.kernel_manager.interrupt_kernel() pass else: self.shell.write_err('\n') error("Cannot interrupt kernels we didn't start.\n") else: # raise the KeyboardInterrupt if we aren't waiting for execution, # so that the interact loop advances, and prompt is redrawn, etc. raise KeyboardInterrupt def init_code(self): # no-op in the frontend, code gets run in the backend pass def launch_new_instance(): """Create and run a full blown IPython instance""" app = ZMQTerminalIPythonApp.instance() app.initialize() app.start() if __name__ == '__main__': launch_new_instance()
bsd-3-clause
dudulianangang/vps
EneConsTest.py
1
5969
import sdf import matplotlib.pyplot as plt import numpy as np import matplotlib as mpl plt.style.use('seaborn-white') # plt.rcParams['font.family'] = 'sans-serif' # plt.rcParams['font.sans-serif'] = 'Tahoma' # # plt.rcParams['font.monospace'] = 'Ubuntu Mono' plt.rcParams['font.size'] = 16 # plt.rcParams['axes.labelsize'] = 10 # plt.rcParams['axes.labelweight'] = 'bold' # plt.rcParams['xtick.labelsize'] = 8 # plt.rcParams['ytick.labelsize'] = 8 # plt.rcParams['legend.fontsize'] = 10 # plt.rcParams['figure.titlesize'] = 12 # constants for normalization n0 = 1.8e20 me = 9.1e-31 qe = 1.6e-19 ep = 8.9e-12 c = 3e8 wp = np.sqrt(n0*qe*qe/me/ep) ld = c/wp e0 = me*c*wp/qe b0 = e0/c tt = 1/wp ts = 50*5 te = 1500 pct = 100 en0 = me*c**2 en1 = 0.5*ep*ld**2 # simulation domain nx = 3500 ny = 3500 lx = 3500 ly = 3500 # figure domain (set by grid) grid_min_x = 0 grid_max_x = nx grid_min_y = 0 grid_max_y = ny Gx = np.linspace(0,lx,nx) Gy = np.linspace(0,ly,ny) gx = Gx[grid_min_x:grid_max_x+1] gy = Gy[grid_min_y:grid_max_y+1] # figure parameters # fs = 24 jetcmap = plt.cm.get_cmap("rainbow", 9) #generate a jet map with 10 values jet_vals = jetcmap(np.arange(9)) #extract those values as an array jet_vals[0] = [1.0, 1, 1.0, 1] #change the first value newcmap = mpl.colors.LinearSegmentedColormap.from_list("newjet", jet_vals) # define array EneBmE = np.ones(7) EneBmI = np.ones(7) EneBgE = np.ones(7) EneBgI = np.ones(7) sex = np.ones(7) sey = np.ones(7) sez = np.ones(7) sbx = np.ones(7) sby = np.ones(7) sbz = np.ones(7) TpeC1 = np.ones(7) TpeS1 = np.ones(7) TfeC1 = np.ones(7) TfeS1 = np.ones(7) TpeC2 = np.ones(7) TpeS2 = np.ones(7) TfeC2 = np.ones(7) TfeS2 = np.ones(7) TeC1 = np.ones(7) TeS1 = np.ones(7) TeC2 = np.ones(7) TeS2 = np.ones(7) time = np.ones(7) # plot function file = '/Volumes/yaowp2016/' folder = 'nj' for i in range(7): ii = i*5 time[i] = i*ts fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct fname = file+folder+'/'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) Ex = datafile.Electric_Field_Ex.data Ey = datafile.Electric_Field_Ey.data Ez = datafile.Electric_Field_Ez.data Bx = datafile.Magnetic_Field_Bx.data*c By = datafile.Magnetic_Field_By.data*c Bz = datafile.Magnetic_Field_Bz.data*c sex[i] = np.sum(Ex**2)*en1 sey[i] = np.sum(Ey**2)*en1 sez[i] = np.sum(Ez**2)*en1 sbx[i] = np.sum(Bx**2)*en1 sby[i] = np.sum(By**2)*en1 sbz[i] = np.sum(Bz**2)*en1 TpeC1[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i] TfeC1[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i] TfeS1[i] = datafile.Total_Field_Energy_in_Simulation__J_.data TpeS1[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data folder = 'nj_non' for i in range(7): ii = i*5 time[i] = i*ts fname = file+folder+'/6'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) GamBmE = datafile.Particles_Gamma_subset_ele1_ele_bm.data GamBmI = datafile.Particles_Gamma_subset_ion1_ion_bm.data GamBgE = datafile.Particles_Gamma_subset_ele1_ele_e.data GamBgI = datafile.Particles_Gamma_subset_ion1_ion_e.data WgtBmE = datafile.Particles_Weight_subset_ele1_ele_bm.data WgtBmI = datafile.Particles_Weight_subset_ion1_ion_bm.data WgtBgE = datafile.Particles_Weight_subset_ele1_ele_e.data WgtBgI = datafile.Particles_Weight_subset_ion1_ion_e.data EneBmE[i] = np.sum((GamBmE-1)*en0*np.mean(WgtBmE))*pct EneBmI[i] = np.sum((GamBmI-1)*en0*np.mean(WgtBmI))*pct EneBgE[i] = np.sum((GamBgE-1)*en0*np.mean(WgtBgE))*pct EneBgI[i] = np.sum((GamBgI-1)*en0*np.mean(WgtBgI))*pct fname = file+folder+'/'+str(ii).zfill(4)+'.sdf' datafile = sdf.read(fname) Ex = datafile.Electric_Field_Ex.data Ey = datafile.Electric_Field_Ey.data Ez = datafile.Electric_Field_Ez.data Bx = datafile.Magnetic_Field_Bx.data*c By = datafile.Magnetic_Field_By.data*c Bz = datafile.Magnetic_Field_Bz.data*c sex[i] = np.sum(Ex**2)*en1 sey[i] = np.sum(Ey**2)*en1 sez[i] = np.sum(Ez**2)*en1 sbx[i] = np.sum(Bx**2)*en1 sby[i] = np.sum(By**2)*en1 sbz[i] = np.sum(Bz**2)*en1 TpeC2[i] = EneBmE[i]+EneBmI[i]+EneBgE[i]+EneBgI[i] TfeC2[i] = sex[i]+sey[i]+sez[i]+sbx[i]+sby[i]+sbz[i] TfeS2[i] = datafile.Total_Field_Energy_in_Simulation__J_.data TpeS2[i] = datafile.Total_Particle_Energy_in_Simulation__J_.data TeC1 = TpeC1+TfeC1 TeS1 = TpeS1+TfeS1 TeC2 = TpeC2+TfeC2 TeS2 = TpeS2+TfeS2 np.save('tpec1.npy', TpeC1) np.save('tpes1.npy', TpeS1) np.save('tfec1.npy', TfeC1) np.save('tfes1.npy', TfeS1) np.save('tpec2.npy', TpeC2) np.save('tpes2.npy', TpeS2) np.save('tfec2.npy', TfeC2) np.save('tfes2.npy', TfeS2) np.save('tec1.npy', TeC1) np.save('tes1.npy', TeS1) np.save('tec2.npy', TeC2) np.save('tes2.npy', TeS2) # plt.figure(figsize=(8,5)) # ax = plt.subplot() # ax.plot(time, TpeC1,'r-', lw=2, label='tbc-cal') # ax.plot(time, TpeS1,'r--', lw=2, label='tbc-sys') # ax.plot(time, TpeC2,'b-', lw=2, label='pbc-cal') # ax.plot(time, TpeS2,'b--', lw=2, label='pbc-sys') # plt.xlabel('time($\omega_{pe}^{-1}$)',fontsize=24) # plt.ylabel('energy($J$)',fontsize=24) # plt.legend(loc='best', numpoints=1, fancybox=True) # plt.title('total system energy',fontsize=32,fontstyle='normal') # plt.show() # plt.savefig(file+folder+'/plots/'+'TotalEnergyComp.png',bbox_inches='tight') # n means normalized # plt.close()
apache-2.0
pleoni/game-of-life
plot/old/test_perf_mpi/life_perf_compilers.py
1
1863
import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt from numpy import * import sys import datetime datafile1="life_host_icc.out" datafile2="life_host_gnu.out" datafile3="life_host_pgi.out" if len(sys.argv) > 1: datafile=sys.argv[1] plotfile="compilers_perf_eurora.png" data1 = loadtxt(datafile1) data2 = loadtxt(datafile2) data3 = loadtxt(datafile3) today = datetime.date.today() fig = plt.figure() # apre una nuova figura top = fig.add_subplot(211) bottom = fig.add_subplot(212) ############# TOP ICC_C1000 = data1[where((data1[:,0]==1) & (data1[:,5]==1000) ),:][0] # mpi 1 - Comp 1000 ICC_C0 = data1[where((data1[:,0]==1) & (data1[:,5]==0) ),:][0] # mpi 1 - comp 0 GNU_C1000 = data2[where((data2[:,0]==1) & (data2[:,5]==1000) ),:][0] # mpi 1 - Comp 1000 GNU_C0 = data2[where((data2[:,0]==1) & (data2[:,5]==0) ),:][0] # mpi 1 - comp 0 PGI_C1000 = data3[where((data3[:,0]==1) & (data3[:,5]==1000) ),:][0] # mpi 1 - Comp 1000 PGI_C0 = data3[where((data3[:,0]==1) & (data3[:,5]==0) ),:][0] # mpi 1 - comp 0 top.set_title(str(today) + ' life_hpc2 on eurora - NCOMP=1000') top.grid() top.set_xlabel('Lattice Size') top.set_ylabel('time') #top.set_yscale('log') #top.legend() top.plot(ICC_C1000[:,3],ICC_C1000[:,8],'-xr',GNU_C1000[:,3],GNU_C1000[:,8],'-xg',PGI_C1000[:,3],PGI_C1000[:,8],'-xc'); top.legend(('icc','gnu','pgi'), loc = 'upper left', shadow = False, prop={'size':9}) ############# BOTTOM bottom.set_title(str(today) + ' life_hpc2 on eurora - NCOMP=0') bottom.grid() bottom.set_xlabel('Lattice size') bottom.set_ylabel('time') bottom.plot(ICC_C0[:,3],ICC_C0[:,8],'-xr',GNU_C0[:,3],GNU_C0[:,8],'-xg',PGI_C0[:,3],PGI_C0[:,8],'-xc'); bottom.legend(('icc','gnu','pgi'), loc = 'upper left', shadow = False, prop={'size':9}) plt.subplots_adjust(hspace=0.5) plt.savefig(plotfile) #plt.show()
gpl-2.0
DistrictDataLabs/yellowbrick
yellowbrick/classifier/rocauc.py
1
29053
# yellowbrick.classifier.rocauc # Implements visual ROC/AUC curves for classification evaluation. # # Author: Rebecca Bilbro # Author: Benjamin Bengfort # Author: Neal Humphrey # Created: Tue May 03 18:15:42 2017 -0400 # # Copyright (C) 2016 The scikit-yb developers # For license information, see LICENSE.txt # # ID: rocauc.py [5388065] neal@nhumphrey.com $ """ Implements visual ROC/AUC curves for classification evaluation. """ ########################################################################## ## Imports ########################################################################## import numpy as np from sklearn.metrics import auc, roc_curve from sklearn.preprocessing import label_binarize from sklearn.utils.multiclass import type_of_target from yellowbrick.exceptions import ModelError from yellowbrick.style.palettes import LINE_COLOR from yellowbrick.exceptions import YellowbrickValueError from yellowbrick.classifier.base import ClassificationScoreVisualizer # Dictionary keys for ROCAUC MACRO = "macro" MICRO = "micro" # Target Type Constants BINARY = "binary" MULTICLASS = "multiclass" ########################################################################## ## ROCAUC Visualizer ########################################################################## class ROCAUC(ClassificationScoreVisualizer): """ Receiver Operating Characteristic (ROC) curves are a measure of a classifier's predictive quality that compares and visualizes the tradeoff between the models' sensitivity and specificity. The ROC curve displays the true positive rate on the Y axis and the false positive rate on the X axis on both a global average and per-class basis. The ideal point is therefore the top-left corner of the plot: false positives are zero and true positives are one. This leads to another metric, area under the curve (AUC), a computation of the relationship between false positives and true positives. The higher the AUC, the better the model generally is. However, it is also important to inspect the "steepness" of the curve, as this describes the maximization of the true positive rate while minimizing the false positive rate. Generalizing "steepness" usually leads to discussions about convexity, which we do not get into here. Parameters ---------- estimator : estimator A scikit-learn estimator that should be a classifier. If the model is not a classifier, an exception is raised. If the internal model is not fitted, it is fit when the visualizer is fitted, unless otherwise specified by ``is_fitted``. ax : matplotlib Axes, default: None The axes to plot the figure on. If not specified the current axes will be used (or generated if required). micro : bool, default: True Plot the micro-averages ROC curve, computed from the sum of all true positives and false positives across all classes. Micro is not defined for binary classification problems with estimators with only a decision_function method. macro : bool, default: True Plot the macro-averages ROC curve, which simply takes the average of curves across all classes. Macro is not defined for binary classification problems with estimators with only a decision_function method. per_class : bool, default: True Plot the ROC curves for each individual class. This should be set to false if only the macro or micro average curves are required. For true binary classifiers, setting per_class=False will plot the positive class ROC curve, and per_class=True will use ``1-P(1)`` to compute the curve of the negative class if only a decision_function method exists on the estimator. binary : bool, default: False This argument quickly resets the visualizer for true binary classification by updating the micro, macro, and per_class arguments to False (do not use in conjunction with those other arguments). Note that this is not a true hyperparameter to the visualizer, it just collects other parameters into a single, simpler argument. classes : list of str, defult: None The class labels to use for the legend ordered by the index of the sorted classes discovered in the ``fit()`` method. Specifying classes in this manner is used to change the class names to a more specific format or to label encoded integer classes. Some visualizers may also use this field to filter the visualization for specific classes. For more advanced usage specify an encoder rather than class labels. encoder : dict or LabelEncoder, default: None A mapping of classes to human readable labels. Often there is a mismatch between desired class labels and those contained in the target variable passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch ensuring that classes are labeled correctly in the visualization. is_fitted : bool or str, default="auto" Specify if the wrapped estimator is already fitted. If False, the estimator will be fit when the visualizer is fit, otherwise, the estimator will not be modified. If "auto" (default), a helper method will check if the estimator is fitted before fitting it again. force_model : bool, default: False Do not check to ensure that the underlying estimator is a classifier. This will prevent an exception when the visualizer is initialized but may result in unexpected or unintended behavior. kwargs : dict Keyword arguments passed to the visualizer base classes. Attributes ---------- classes_ : ndarray of shape (n_classes,) The class labels observed while fitting. class_count_ : ndarray of shape (n_classes,) Number of samples encountered for each class during fitting. score_ : float An evaluation metric of the classifier on test data produced when ``score()`` is called. This metric is between 0 and 1 -- higher scores are generally better. For classifiers, this score is usually accuracy, but if micro or macro is specified this returns an F1 score. target_type_ : string Specifies if the detected classification target was binary or multiclass. Notes ----- ROC curves are typically used in binary classification, and in fact the Scikit-Learn ``roc_curve`` metric is only able to perform metrics for binary classifiers. As a result it is necessary to binarize the output or to use one-vs-rest or one-vs-all strategies of classification. The visualizer does its best to handle multiple situations, but exceptions can arise from unexpected models or outputs. Another important point is the relationship of class labels specified on initialization to those drawn on the curves. The classes are not used to constrain ordering or filter curves; the ROC computation happens on the unique values specified in the target vector to the ``score`` method. To ensure the best quality visualization, do not use a LabelEncoder for this and do not pass in class labels. .. seealso:: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html .. todo:: Allow the class list to filter the curves on the visualization. Examples -------- >>> from yellowbrick.classifier import ROCAUC >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.model_selection import train_test_split >>> data = load_data("occupancy") >>> features = ["temp", "relative humidity", "light", "C02", "humidity"] >>> X_train, X_test, y_train, y_test = train_test_split(X, y) >>> oz = ROCAUC(LogisticRegression()) >>> oz.fit(X_train, y_train) >>> oz.score(X_test, y_test) >>> oz.show() """ def __init__( self, estimator, ax=None, micro=True, macro=True, per_class=True, binary=False, classes=None, encoder=None, is_fitted="auto", force_model=False, **kwargs ): super(ROCAUC, self).__init__( estimator, ax=ax, classes=classes, encoder=encoder, is_fitted=is_fitted, force_model=force_model, **kwargs ) # Set the visual parameters for ROCAUC # NOTE: the binary flag breaks our API since it's really just a meta parameter # for micro, macro, and per_class. We knew this going in, but did it anyway. self.binary = binary if self.binary: self.micro = False self.macro = False self.per_class = False else: self.micro = micro self.macro = macro self.per_class = per_class def fit(self, X, y=None): """ Fit the classification model. """ # The target determines what kind of estimator is fit ttype = type_of_target(y) if ttype.startswith(MULTICLASS): self.target_type_ = MULTICLASS elif ttype.startswith(BINARY): self.target_type_ = BINARY else: raise YellowbrickValueError( ( "{} does not support target type '{}', " "please provide a binary or multiclass single-output target" ).format(self.__class__.__name__, ttype) ) # Fit the model and return self return super(ROCAUC, self).fit(X, y) def score(self, X, y=None): """ Generates the predicted target values using the Scikit-Learn estimator. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values Returns ------- score_ : float Global accuracy unless micro or macro scores are requested. """ # Call super to check if fitted and to compute self.score_ # NOTE: this sets score to the base score if neither macro nor micro super(ROCAUC, self).score(X, y) # Compute the predictions for the test data y_pred = self._get_y_scores(X) if self.target_type_ == BINARY: # For binary, per_class must be True to draw micro/macro curves if (self.micro or self.macro) and not self.per_class: raise ModelError( "no curves will be drawn; ", "set per_class=True or micro=False and macro=False.", ) # For binary, if predictions are returned in shape (n,), micro and macro # curves are not defined if (self.micro or self.macro) and len(y_pred.shape) == 1: raise ModelError( "no curves will be drawn; set binary=True.", ) if self.target_type_ == MULTICLASS: # If it's multiclass classification, at least one of micro, macro, or # per_class must be True if not self.micro and not self.macro and not self.per_class: raise YellowbrickValueError( "no curves will be drawn; specify micro, macro, or per_class" ) # Classes may be label encoded so only use what's in y to compute. # The self.classes_ attribute will be used as names for labels. classes = np.unique(y) n_classes = len(classes) # Store the false positive rate, true positive rate and curve info. self.fpr = dict() self.tpr = dict() self.roc_auc = dict() # If the decision is binary draw only ROC curve for the positive class if self.target_type_ is BINARY and not self.per_class: # In this case predict_proba returns an array of shape (n, 2) which # specifies the probabilities of both the negative and positive classes. if len(y_pred.shape) == 2 and y_pred.shape[1] == 2: self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred[:, 1]) else: # decision_function returns array of shape (n,), so plot it directly self.fpr[BINARY], self.tpr[BINARY], _ = roc_curve(y, y_pred) self.roc_auc[BINARY] = auc(self.fpr[BINARY], self.tpr[BINARY]) # Per-class binary decisions may have to have the negative class curve computed elif self.target_type_ is BINARY and self.per_class: # draw a curve for class 1 (the positive class) if len(y_pred.shape) == 2 and y_pred.shape[1] == 2: # predict_proba returns array of shape (n, 2), so use # probability of class 1 to compute ROC self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred[:, 1]) else: # decision_function returns array of shape (n,) self.fpr[1], self.tpr[1], _ = roc_curve(y, y_pred) self.roc_auc[1] = auc(self.fpr[1], self.tpr[1]) # draw a curve for class 0 (the negative class) if len(y_pred.shape) == 2 and y_pred.shape[1] == 2: # predict_proba returns array of shape (n, 2), so use # probability of class 0 to compute ROC self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, y_pred[:, 0]) else: # decision_function returns array of shape (n,). # To draw a ROC curve for class 0 we swap the classes 0 and 1 in y # and reverse classifiers predictions y_pred. self.fpr[0], self.tpr[0], _ = roc_curve(1 - y, -y_pred) self.roc_auc[0] = auc(self.fpr[0], self.tpr[0]) else: # Otherwise compute the ROC curve and ROC area for each class for i, c in enumerate(classes): self.fpr[i], self.tpr[i], _ = roc_curve(y, y_pred[:, i], pos_label=c) self.roc_auc[i] = auc(self.fpr[i], self.tpr[i]) # Compute micro average if self.micro: self._score_micro_average(y, y_pred, classes, n_classes) # Compute macro average if self.macro: self._score_macro_average(n_classes) # Draw the Curves self.draw() # Set score to micro average if specified if self.micro: self.score_ = self.roc_auc[MICRO] # Set score to macro average if not micro if self.macro: self.score_ = self.roc_auc[MACRO] return self.score_ def draw(self): """ Renders ROC-AUC plot. Called internally by score, possibly more than once Returns ------- ax : the axis with the plotted figure """ colors = self.class_colors_[0 : len(self.classes_)] n_classes = len(colors) # If it's a binary decision, plot the single ROC curve if self.target_type_ == BINARY and not self.per_class: self.ax.plot( self.fpr[BINARY], self.tpr[BINARY], label="ROC for binary decision, AUC = {:0.2f}".format( self.roc_auc[BINARY] ), ) # If per-class plotting is requested, plot ROC curves for each class if self.per_class: for i, color in zip(range(n_classes), colors): self.ax.plot( self.fpr[i], self.tpr[i], color=color, label="ROC of class {}, AUC = {:0.2f}".format( self.classes_[i], self.roc_auc[i] ), ) # If requested, plot the ROC curve for the micro average if self.micro: self.ax.plot( self.fpr[MICRO], self.tpr[MICRO], linestyle="--", color=self.class_colors_[len(self.classes_) - 1], label="micro-average ROC curve, AUC = {:0.2f}".format( self.roc_auc["micro"] ), ) # If requested, plot the ROC curve for the macro average if self.macro: self.ax.plot( self.fpr[MACRO], self.tpr[MACRO], linestyle="--", color=self.class_colors_[len(self.classes_) - 1], label="macro-average ROC curve, AUC = {:0.2f}".format( self.roc_auc["macro"] ), ) # Plot the line of no discrimination to compare the curve to. self.ax.plot([0, 1], [0, 1], linestyle=":", c=LINE_COLOR) return self.ax def finalize(self, **kwargs): """ Sets a title and axis labels of the figures and ensures the axis limits are scaled between the valid ROCAUC score values. Parameters ---------- kwargs: generic keyword arguments. Notes ----- Generally this method is called from show and not directly by the user. """ # Set the title and add the legend self.set_title("ROC Curves for {}".format(self.name)) self.ax.legend(loc="lower right", frameon=True) # Set the limits for the ROC/AUC (always between 0 and 1) self.ax.set_xlim([0.0, 1.0]) self.ax.set_ylim([0.0, 1.0]) # Set x and y axis labels self.ax.set_ylabel("True Positive Rate") self.ax.set_xlabel("False Positive Rate") def _get_y_scores(self, X): """ The ``roc_curve`` metric requires target scores that can either be the probability estimates of the positive class, confidence values or non- thresholded measure of decisions (as returned by "decision_function"). This method computes the scores by resolving the estimator methods that retreive these values. .. todo:: implement confidence values metric. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features -- generally the test data that is associated with y_true values. """ # The resolution order of scoring functions attrs = ("predict_proba", "decision_function") # Return the first resolved function for attr in attrs: try: method = getattr(self.estimator, attr, None) if method: return method(X) except AttributeError: # Some Scikit-Learn estimators have both probability and # decision functions but override __getattr__ and raise an # AttributeError on access. # Note that because of the ordering of our attrs above, # estimators with both will *only* ever use probability. continue # If we've gotten this far, raise an error raise ModelError( "ROCAUC requires estimators with predict_proba or " "decision_function methods." ) def _score_micro_average(self, y, y_pred, classes, n_classes): """ Compute the micro average scores for the ROCAUC curves. """ # Convert y to binarized array for micro and macro scores y = label_binarize(y, classes=classes) if n_classes == 2: y = np.hstack((1 - y, y)) # Compute micro-average self.fpr[MICRO], self.tpr[MICRO], _ = roc_curve(y.ravel(), y_pred.ravel()) self.roc_auc[MICRO] = auc(self.fpr[MICRO], self.tpr[MICRO]) def _score_macro_average(self, n_classes): """ Compute the macro average scores for the ROCAUC curves. """ # Gather all FPRs all_fpr = np.unique(np.concatenate([self.fpr[i] for i in range(n_classes)])) avg_tpr = np.zeros_like(all_fpr) # Compute the averages per class for i in range(n_classes): avg_tpr += np.interp(all_fpr, self.fpr[i], self.tpr[i]) # Finalize the average avg_tpr /= n_classes # Store the macro averages self.fpr[MACRO] = all_fpr self.tpr[MACRO] = avg_tpr self.roc_auc[MACRO] = auc(self.fpr[MACRO], self.tpr[MACRO]) ########################################################################## ## Quick method for ROCAUC ########################################################################## def roc_auc( estimator, X_train, y_train, X_test=None, y_test=None, ax=None, micro=True, macro=True, per_class=True, binary=False, classes=None, encoder=None, is_fitted="auto", force_model=False, show=True, **kwargs ): """ROCAUC Receiver Operating Characteristic (ROC) curves are a measure of a classifier's predictive quality that compares and visualizes the tradeoff between the models' sensitivity and specificity. The ROC curve displays the true positive rate on the Y axis and the false positive rate on the X axis on both a global average and per-class basis. The ideal point is therefore the top-left corner of the plot: false positives are zero and true positives are one. This leads to another metric, area under the curve (AUC), a computation of the relationship between false positives and true positives. The higher the AUC, the better the model generally is. However, it is also important to inspect the "steepness" of the curve, as this describes the maximization of the true positive rate while minimizing the false positive rate. Generalizing "steepness" usually leads to discussions about convexity, which we do not get into here. Parameters ---------- estimator : estimator A scikit-learn estimator that should be a classifier. If the model is not a classifier, an exception is raised. If the internal model is not fitted, it is fit when the visualizer is fitted, unless otherwise specified by ``is_fitted``. X_train : array-like, 2D The table of instance data or independent variables that describe the outcome of the dependent variable, y. Used to fit the visualizer and also to score the visualizer if test splits are not specified. y_train : array-like, 2D The vector of target data or the dependent variable predicted by X. Used to fit the visualizer and also to score the visualizer if test splits not specified. X_test: array-like, 2D, default: None The table of instance data or independent variables that describe the outcome of the dependent variable, y. Used to score the visualizer if specified. y_test: array-like, 1D, default: None The vector of target data or the dependent variable predicted by X. Used to score the visualizer if specified. ax : matplotlib Axes, default: None The axes to plot the figure on. If not specified the current axes will be used (or generated if required). test_size : float, default=0.2 The percentage of the data to reserve as test data. random_state : int or None, default=None The value to seed the random number generator for shuffling data. micro : bool, default: True Plot the micro-averages ROC curve, computed from the sum of all true positives and false positives across all classes. Micro is not defined for binary classification problems with estimators with only a decision_function method. macro : bool, default: True Plot the macro-averages ROC curve, which simply takes the average of curves across all classes. Macro is not defined for binary classification problems with estimators with only a decision_function method. per_class : bool, default: True Plot the ROC curves for each individual class. This should be set to false if only the macro or micro average curves are required. For true binary classifiers, setting per_class=False will plot the positive class ROC curve, and per_class=True will use ``1-P(1)`` to compute the curve of the negative class if only a decision_function method exists on the estimator. binary : bool, default: False This argument quickly resets the visualizer for true binary classification by updating the micro, macro, and per_class arguments to False (do not use in conjunction with those other arguments). Note that this is not a true hyperparameter to the visualizer, it just collects other parameters into a single, simpler argument. classes : list of str, defult: None The class labels to use for the legend ordered by the index of the sorted classes discovered in the ``fit()`` method. Specifying classes in this manner is used to change the class names to a more specific format or to label encoded integer classes. Some visualizers may also use this field to filter the visualization for specific classes. For more advanced usage specify an encoder rather than class labels. encoder : dict or LabelEncoder, default: None A mapping of classes to human readable labels. Often there is a mismatch between desired class labels and those contained in the target variable passed to ``fit()`` or ``score()``. The encoder disambiguates this mismatch ensuring that classes are labeled correctly in the visualization. is_fitted : bool or str, default="auto" Specify if the wrapped estimator is already fitted. If False, the estimator will be fit when the visualizer is fit, otherwise, the estimator will not be modified. If "auto" (default), a helper method will check if the estimator is fitted before fitting it again. force_model : bool, default: False Do not check to ensure that the underlying estimator is a classifier. This will prevent an exception when the visualizer is initialized but may result in unexpected or unintended behavior. show: bool, default: True If True, calls ``show()``, which in turn calls ``plt.show()`` however you cannot call ``plt.savefig`` from this signature, nor ``clear_figure``. If False, simply calls ``finalize()`` kwargs : dict Keyword arguments passed to the visualizer base classes. Notes ----- ROC curves are typically used in binary classification, and in fact the Scikit-Learn ``roc_curve`` metric is only able to perform metrics for binary classifiers. As a result it is necessary to binarize the output or to use one-vs-rest or one-vs-all strategies of classification. The visualizer does its best to handle multiple situations, but exceptions can arise from unexpected models or outputs. Another important point is the relationship of class labels specified on initialization to those drawn on the curves. The classes are not used to constrain ordering or filter curves; the ROC computation happens on the unique values specified in the target vector to the ``score`` method. To ensure the best quality visualization, do not use a LabelEncoder for this and do not pass in class labels. .. seealso:: https://bit.ly/2IORWO2 .. todo:: Allow the class list to filter the curves on the visualization. Examples -------- >>> from yellowbrick.classifier import ROCAUC >>> from sklearn.linear_model import LogisticRegression >>> data = load_data("occupancy") >>> features = ["temp", "relative humidity", "light", "C02", "humidity"] >>> X = data[features].values >>> y = data.occupancy.values >>> roc_auc(LogisticRegression(), X, y) Returns ------- viz : ROCAUC Returns the fitted, finalized visualizer object """ # Instantiate the visualizer visualizer = ROCAUC( estimator=estimator, ax=ax, micro=micro, macro=macro, per_class=per_class, binary=binary, classes=classes, encoder=encoder, is_fitted=is_fitted, force_model=force_model, **kwargs ) # Fit and transform the visualizer (calls draw) visualizer.fit(X_train, y_train, **kwargs) # Scores the visualizer with X_test and y_test if provided, # X_train, y_train if not provided if X_test is not None and y_test is not None: visualizer.score(X_test, y_test) else: visualizer.score(X_train, y_train) if show: visualizer.show() else: visualizer.finalize() # Return the visualizer return visualizer
apache-2.0
Wonjuseo/Project101
others/sine_RNN.py
1
4425
import tensorflow as tf import numpy as np from sklearn.model_selection import train_test_split from sklearn.utils import shuffle def sin(x, T=100): return np.sin(2.0*np.pi*x/T) def problem(T=100,ampl=0.05): x = np.arange(0,2*T+1) noise = ampl*np.random.uniform(low=-1.0,high=1.0,size=len(x)) return sin(x) + noise class EarlyStopping(): def __init__(self,patience=0,verbose=0): self._step = 0 self._loss = float('inf') self.patience = patience self.verbose = verbose def validate(self,loss): if self._loss <loss: self._step+=1 if self._step>self.patience: if self.verbose: print('early stopping') return True else: self._step = 0 self._loss = loss return False def inference(x,n_batch,maxlen=None,n_hidden=None,n_out=None): def weight_variable(shape): initial = tf.truncated_normal(shape,stddev=0.01) return tf.Variable(initial) def bias_variable(shape): initial = tf.zeros(shape,dtype=tf.float32) return tf.Variable(initial) cell = tf.contrib.rnn.GRUCell(n_hidden) initial_state = cell.zero_state(n_batch,tf.float32) state = initial_state outputs= [] with tf.variable_scope('RNN'): for t in range(maxlen): if t>0: tf.get_variable_scope().reuse_variables() (cell_output,state) = cell(x[:,t,:],state) outputs.append(cell_output) output = outputs[-1] V = weight_variable([n_hidden,n_out]) c = bias_variable([n_out]) y = tf.matmul(output,V)+c return y def loss(y,t): mse = tf.reduce_mean(tf.square(y-t)) return mse def training(loss): optimizer = tf.train.AdamOptimizer(learning_rate=0.001,beta1=0.9,beta2=0.999) train_step = optimizer.minimize(loss) return train_step T=100 sine_data = problem(T) length = 2*T maxlen = 25 data = [] target = [] for i in range(0,length-maxlen+1): data.append(sine_data[i:i+maxlen]) target.append(sine_data[i+maxlen]) X = np.array(data).reshape(len(data),maxlen,1) # 1 dimension Y = np.array(target).reshape(len(data),1) X = np.zeros((len(data),maxlen,1),dtype=float) Y = np.zeros((len(data),1),dtype=float) for i, seq in enumerate(data): for t, value in enumerate(seq): X[i,t,0] = value Y[i,0] = target[i] train_data = int(len(data)*0.9) test_data = len(data)-train_data X_train, X_test, Y_train, Y_test = train_test_split(X,Y,test_size=test_data) n_in = len(X[0][0]) n_hidden = 20 n_out = len(Y[0]) x = tf.placeholder(tf.float32,shape=[None,maxlen,n_in]) t = tf.placeholder(tf.float32,shape=[None,n_out]) n_batch = tf.placeholder(tf.int32) y = inference(x,n_batch,maxlen=maxlen,n_hidden=n_hidden,n_out=n_out) loss_fun = loss(y,t) train_step = training(loss_fun) epochs = 500 batch_size = 10 init = tf.global_variables_initializer() sess = tf.Session() sess.run(init) n_batches = train_data//batch_size early_stopping = EarlyStopping(patience=10,verbose=1) history = {'val_loss':[],'val_acc':[]} for epoch in range(epochs): X_, Y_ = shuffle(X_train,Y_train) for i in range(n_batches): start = i*batch_size end = start + batch_size sess.run(train_step,feed_dict={x:X_[start:end],t:Y_[start:end],n_batch:batch_size}) val_loss = loss_fun.eval(session=sess,feed_dict={x:X_test,t:Y_test,n_batch:test_data}) history['val_loss'].append(val_loss) print('epochs:',epoch,'validation_loss:',val_loss) #if early_stopping.validate(val_loss): # break truncate = maxlen Z = X[:1] original = [sine_data[i] for i in range(maxlen)] predicted = [None for i in range(maxlen)] for i in range(length-maxlen+1): z_=Z[-1:] y_=y.eval(session=sess,feed_dict={x:Z[-1:],n_batch:1}) sequence_ = np.concatenate((z_.reshape(maxlen,n_in)[1:],y_),axis=0).reshape(1,maxlen,n_in) Z = np.append(Z,sequence_,axis=0) predicted.append(y_.reshape(-1)) import matplotlib.pyplot as plt plt.rc('font',family='serif') plt.figure() plt.plot(problem(T,ampl=0),linestyle='dotted',color='#aaaaaa') plt.plot(original,linestyle='dashed',color='black') plt.plot(predicted,color='black') plt.show()
apache-2.0
Srisai85/scikit-learn
examples/linear_model/plot_iris_logistic.py
283
1678
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Logistic Regression 3-class Classifier ========================================================= Show below is a logistic-regression classifiers decision boundaries on the `iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints are colored according to their labels. """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model, datasets # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. Y = iris.target h = .02 # step size in the mesh logreg = linear_model.LogisticRegression(C=1e5) # we create an instance of Neighbours Classifier and fit the data. logreg.fit(X, Y) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(4, 3)) plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
nagordon/mechpy
mechpy/composites.py
1
71681
# coding: utf-8 ''' Module for composite material analysis Hyer-Stress Analysis of Fiber-Reinforced Composite Materials Herakovich-Mechanics of Fibrous Composites Daniel-Engineering Mechanics of Composite Materials Kollar-Mechanics of COmposite Structures NASA- Basic Mechancis of Lamianted Composites https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19950009349.pdf TODO: * transverse shear stress reddy pg 136 or daniel pg 139 * include line loads (Qx,Qy) for combined loading * calculate capability of panel based on margin ''' #============================================================================== # Import Modules #============================================================================== from __future__ import print_function, division __author__ = 'Neal Gordon <nealagordon@gmail.com>' __date__ = '2016-12-02' __version__ = 0.1 from copy import copy from numpy import pi, zeros, ones, linspace, arange, array, sin, cos, sqrt, pi from numpy.linalg import solve, inv #from scipy import linalg import numpy as np #np.set_printoptions(suppress=False,precision=2) # suppress scientific notation np.set_printoptions(precision=3, linewidth=200)#, threshold=np.inf) import scipy from scipy.spatial import ConvexHull #np.set_printoptions(formatter={'float': lambda x: "{:.2f}".format(x)}) import pandas as pd import sympy as sp from sympy import Function, dsolve, Eq, Derivative, symbols, pprint from sympy.plotting import plot3d #from sympy import cos, sin #sp.init_printing(use_latex='mathjax') #sp.init_printing(wrap_line=False, pretty_print=True) import matplotlib as mpl mpl.rcParams['figure.figsize'] = (8,5) mpl.rcParams['font.size'] = 12 mpl.rcParams['legend.fontsize'] = 14 import matplotlib.pyplot as plt from matplotlib.pyplot import plot,figure,xlim,ylim,title,legend, \ grid, show, xlabel,ylabel, tight_layout from mpl_toolkits.mplot3d import axes3d # if using ipython console, turn off inline plotting #mpl.use('Qt5Agg') # inline plotting from IPython import get_ipython #get_ipython().magic('matplotlib inline') ###disable inline plotting try: get_ipython().magic('matplotlib') except: pass from IPython.display import display import os plt.close('all') #============================================================================== # Functions #============================================================================== def import_matprops(mymaterial=['T300_5208','AL_7075']): ''' import material properties ''' matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0) if mymaterial==[] or mymaterial=='': print(matprops.columns.tolist()) mat = matprops[mymaterial] #mat.applymap(lambda x:np.float(x)) mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore')) return mat def Sf(E1,E2,nu12,G12): '''transversely isptropic compliance matrix. pg 58 herakovich''' nu21 = E2*nu12/E1 S = array([[1/E1, -nu21/E2, 0], [-nu12/E1, 1/E2, 0], [0, 0, 1/G12]]) return S def S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23): ''' daniel pg 74 transversely isotropic compliance matrix. For transversly isotropic E2=E3, nu12=nu13,G12=G13,G23=E2/(2(1+nu23)) ''' S6 = array( [[ 1/E1, -nu12/E1, -nu12/E1, 0, 0, 0], [-nu12/E1, 1/E2, -nu23/E2, 0, 0, 0], [-nu12/E1, -nu23/E2, 1/E2, 0, 0, 0], [ 0, 0, 0, 1/G23, 0, 0], [ 0, 0, 0, 0, 1/G13, 0], [ 0, 0, 0, 0, 0, 1/G12]]) return S6 def C6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23): ''' daniel pg 74 transversely isotropic stiffness matrix. ''' C6 = inv(S6f(E1,E2,E3,nu12,nu13,nu23,G12,G13,G23)) return C6 def Qf(E1,E2,nu12,G12): '''transversly isptropic compliance matrix. pg 58 herakovich G12 = E1/(2*(1+nu12)) if isotropic''' nu21 = E2*nu12/E1 Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0], [ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0], [0, 0, G12]]) return Q def T61(th): '''Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax reddy pg 91''' n = sin(th*pi/180) m = cos(th*pi/180) T1 = array( [[m**2, n**2, 0, 0, 0, 2*m*n], [n**2, m**2, 0, 0, 0,-2*m*n], [0, 0, 1, 0, 0, 0], [0, 0, 0, m,-n, 0], [0, 0, 0, n, m, 0], [-m*n, m*n, 0, 0, 0,(m**2-n**2)]]) return T1 def T62(th): '''Strain voight notation for strain transform. epsilon1 = T2 @ epsilonx th=ply angle in degrees reddy pg 91 ''' n = sin(th*pi/180) m = cos(th*pi/180) T2 = array( [[m**2, n**2, 0, 0, 0, m*n], [n**2, m**2, 0, 0, 0,-m*n], [0, 0, 1, 0, 0, 0], [0, 0, 0, m,-n, 0], [0, 0, 0, n, m, 0], [-2*m*n, 2*m*n, 0, 0, 0,(m**2-n**2)]]) return T2 def T1(th): '''Stress Transform for Plane Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax recall T1(th)**-1 == T1(-th)''' n = sin(th*pi/180) m = cos(th*pi/180) T1 = array( [[m**2, n**2, 2*m*n], [n**2, m**2,-2*m*n], [-m*n, m*n,(m**2-n**2)]]) return T1 def T2(th): '''Strain Transform for Plane Stress th=ply angle in degrees voight notation for strain transform. epsilon1 = T2 @ epsilonx''' n = sin(th*pi/180) m = cos(th*pi/180) T2 = array( [[m**2, n**2, m*n], [n**2, m**2,-m*n], [-2*m*n, 2*m*n, (m**2-n**2)]]) return T2 def T1s(th): '''Symbolic Stress Transform for Plane Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax recall T1(th)**-1 == T1(-th)''' n = sp.sin(th*sp.pi/180) m = sp.cos(th*sp.pi/180) T1 = sp.Matrix( [[m**2, n**2, 2*m*n], [n**2, m**2,-2*m*n], [-m*n, m*n,(m**2-n**2)]]) return T1 def T2s(th): '''Symbolic Strain Transform for Plane Stress th=ply angle in degrees voight notation for strain transform. epsilon1 = T2 @ epsilonx''' n = sp.sin(th*sp.pi/180) m = sp.cos(th*sp.pi/180) T2 = sp.Matrix( [[m**2, n**2, m*n], [n**2, m**2,-m*n], [-2*m*n, 2*m*n, (m**2-n**2)]]) return T2 def failure_envelope(): # failure envelopes # max stress criteria # 1 direction in first row # 2 direction in second row # failure strength in compression #Fc = matrix([[-1250.0, -600.0], # [-200.0, -120.0]]) # ksi # ##failure strength in tension #Ft = matrix([[1500, 1000] # [50, 30]]) # ksi # ##Failure strength in shear #Fs = matrix( [100, 70] ) # Shear Fc1 = [-1250, -600] # Compression 1 direction Fc2 = [-200, -120] # Compression 2 direction Ft1 = [1500, 1000] # Tension 1 direction Ft2 = [50, 30] # Tension 2 direction Fs = [100, 70] # Shear # F1 = Ft(1); # F2 = Ft(1); # F6 = Fs(1); for c in range(2):# mattype factor = 1.25 # right plot( [Ft1[c], Ft1[c]], [Fc2[c], Ft2[c]]) # left plot( [Fc1[c], Fc1[c]] , [Fc2[c], Ft2[c]]) # top plot( [Fc1[c], Ft1[c]] , [Ft2[c], Ft2[c]]) # bottom plot( [Fc1[c], Ft1[c]] , [Fc2[c], Fc2[c]]) # center horizontal plot( [Fc1[c], Ft1[c]] , [0, 0]) # center vertical plot( [0, 0] , [Fc2[c], Ft2[c]]) #xlim([min(Fc1) max(Ft1)]*factor) #ylim([min(Fc2) max(Ft2)]*factor) xlabel('$\sigma_1,ksi$') ylabel('$\sigma_2,ksi$') title('failure envelope with Max-Stress Criteria') def material_plots(materials = ['Carbon_cloth_AGP3705H']): ''' plotting composite properties Sf(E1,E2,nu12,G12) ''' # plt.rcParams['figure.figsize'] = (10, 8) # plt.rcParams['font.size'] = 14 # plt.rcParams['legend.fontsize'] = 14 plyangle = arange(-45, 45.1, 0.1) h = 1 # lamina thickness layupname='[0]' mat = import_matprops(materials) Ex = mat[materials[0]].E1 Ey = mat[materials[0]].E2 nuxy = mat[materials[0]].nu12 Gxy = mat[materials[0]].G12 # layupname = '[0, 45, 45, 0]' # Ex= 2890983.38 # Ey= 2844063.06 # nuxy= 0.27 # Gxy= 1129326.25 # h = 0.0600 plt.close('all') S = Sf(Ex,Ey,nuxy,Gxy) C = inv(S) C11 = [(inv(T1(th)) @ C @ T2(th))[0,0] for th in plyangle] C22 = [(inv(T1(th)) @ C @ T2(th))[1,1] for th in plyangle] C33 = [(inv(T1(th)) @ C @ T2(th))[2,2] for th in plyangle] C12 = [(inv(T1(th)) @ C @ T2(th))[0,1] for th in plyangle] Exbar = zeros(len(plyangle)) Eybar = zeros(len(plyangle)) Gxybar = zeros(len(plyangle)) Q = Qf(Ex,Ey,nuxy,Gxy) Qbar = zeros((len(plyangle),3,3)) for i,th in enumerate(plyangle): Qbar[i] = solve(T1(th), Q) @ T2(th) #Qbar = [solve(T1(th),Q) @ T2(th) for th in plyangle] Qbar11 = Qbar[:,0,0] Qbar22 = Qbar[:,1,1] Qbar66 = Qbar[:,2,2] Qbar12 = Qbar[:,0,1] Qbar16 = Qbar[:,0,2] Qbar26 = Qbar[:,1,2] Aij = Qbar*h # laminate Stiffness # | Exbar Eybar Gxybar | # A = | vxybar vyxbar etasxbar | # | etaxsbar etaysbar etasybar | # laminate Comnpliance aij = zeros((len(plyangle),3,3)) for i, _Aij in enumerate(Aij): aij[i] = inv(_Aij) # material properties for whole laminate (Daniel, pg183) Exbar = [1/(h*_aij[0,0]) for _aij in aij] Eybar = [1/(h*_aij[1,1]) for _aij in aij] Gxybar = [1/(h*_aij[2,2]) for _aij in aij] # Global Stress s_xy = array([[100], [10], [5]]) # local ply stress s_12 = np.zeros((3,len(plyangle))) for i,th in enumerate(plyangle): #s_12[:,i] = np.transpose(T1(th) @ s_xy)[0] # local stresses s_12[:,[i]] = T1(th) @ s_xy # Plotting figure()#, figsize=(10,8)) plot(plyangle, C11, plyangle, C22, plyangle, C33, plyangle, C12) legend(['$\overline{C}_{11}$','$\overline{C}_{22}$', '$\overline{C}_{44}$', '$\overline{C}_{66}$']) title('Transversly Isotropic Stiffness properties of carbon fiber T300_5208') xlabel("$\Theta$") ylabel('$\overline{C}_{ii}$, ksi') grid() figure()#, figsize=(10,8)) plot(plyangle, Exbar, label = r"Modulus: $E_x$") plot(plyangle, Eybar, label = r"Modulus: $E_y$") plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$") title("Constitutive Properties in various angles") xlabel("$\Theta$") ylabel("modulus, psi") legend() grid() figure()#,figsize=(10,8)) plot(plyangle, s_12[0,:], label = '$\sigma_{11},ksi$' ) plot(plyangle, s_12[1,:], label = '$\sigma_{22},ksi$' ) plot(plyangle, s_12[2,:], label = '$\sigma_{12},ksi$' ) legend(loc='lower left') xlabel("$\Theta$") ylabel("Stress, ksi") grid() # plot plyangle as a function of time figure()#,figsize=(10,8)) plot(plyangle,Qbar11, label = "Qbar11") plot(plyangle,Qbar22, label = "Qbar22") plot(plyangle,Qbar66, label = "Qbar66") legend(loc='lower left') xlabel("$\Theta$") ylabel('Q') grid() # plot plyangle as a function of time figure()#,figsize=(10,8)) plot(plyangle,Qbar12, label = "Qbar12") plot(plyangle,Qbar16, label = "Qbar16") plot(plyangle,Qbar26, label = "Qbar26") legend(loc='lower left') xlabel("$\Theta$") ylabel('Q') grid() titlename = 'Laminate Properties varying angle for {} {}'.format(materials[0], layupname) #df = pd.DataFrame({'plyangle':plyangle, 'Exbar':Exbar, 'Eybar':Eybar,'Gxybar':Gxybar}) #print(df) #df.to_csv(titlename+'.csv') plt.figure(figsize=(9,6)) plot(plyangle, Exbar, label = r"Modulus: $E_x$") plot(plyangle, Eybar, label = r"Modulus: $E_y$") plot(plyangle, Gxybar, label = r"Modulus: $G_{xy}$") title(titlename) xlabel("$\Theta$") ylabel("modulus, psi") legend(loc='best') grid() #plt.savefig(titlename+'.png') show() def laminate_gen(lamthk=1.5, symang=[45,0,90], plyratio=2.0, matrixlayers=False, balancedsymmetric=True): ''' ## function created to quickly create laminates based on given parameters lamthk=1.5 # total #thickness of laminate symang = [45,0,90, 30] #symmertic ply angle plyratio=2.0 # lamina/matrix ratio matrixlayers=False # add matrix layers between lamina plys nonsym=False # symmetric mat = material type, as in different plies, matrix layer, uni tapes, etc #ply ratio can be used to vary the ratio of thickness between a matrix ply and lamina ply. if the same thickness is desired, plyratio = 1, if lamina is 2x as thick as matrix plyratio = 2 ''' if matrixlayers: nply = (len(symang)*2+1)*2 nm = nply-len(symang)*2 nf = len(symang)*2 tm = lamthk / (plyratio*nf + nm) tf = tm*plyratio plyangle = zeros(nply//2) mat = 2*ones(nply//2) # orthotropic fiber and matrix = 1, isotropic matrix=2, mat[1:-1:2] = 1 # [2 if x%2 else 1 for x in range(nply//2) ] plyangle[1:-1:2] = symang[:] # make a copy thk = tm*ones(nply//2) thk[2:2:-1] = tf lamang = list(symang) + list(symang[::-1]) plyangle = list(plyangle) + list(plyangle[::-1]) mat = list(mat) + list(mat[::-1]) thk = list(thk) + list(thk[::-1]) else: # no matrix layers, ignore ratio if balancedsymmetric: nply = len(symang)*2 mat = list(3*np.ones(nply)) thk = list(lamthk/nply*np.ones(nply)) lamang = list(symang) + list(symang[::-1]) plyangle = list(symang) + list(symang[::-1]) else: nply = len(symang) mat =[1]*nply thk = list(lamthk/nply*np.ones(nply)) lamang = symang[:] plyangle = symang[:] return thk,plyangle,mat,lamang def make_quasi(n0=4,n45=4): #n0 = 4 #n45 = 13 # #ply0 = [0]*n0 #ply45 = [45]*n45 #plyangle = [] #from itertools import zip_longest #for x,y in zip_longest(ply0,ply45): # if len(plyangle)<min(len(ply0),len(ply45))*2: # plyangle.append(x) # plyangle.append(y) # else: # plyangle.append(x) # plyangle.reverse() # plyangle.append(y) #plyangle = [x for x in plyangle if x is not None] #plyangle ntot = n45+n0 plyangle = [45]*int(n45) for p in [0]*int(n0): plyangle.append(p) plyangle.reverse() return plyangle #@xw.func def laminate_calcs(NM,ek,q0,plyangle,plymatindex,materials,platedim, zoffset,SF,plots,prints): ''' code to compute composite properties, applied mechanical and thermal loads and stress and strain inputs NM # force/moments lbs/in ek # strain, curvature in/in q0 = pressure plyangle # angle for each ply plymatindex # material for each ply materials # list materials used, general outline for computing elastic properties of composites 1) Determine engineering properties of unidirectional laminate. E1, E2, nu12, G12 2) Calculate ply stiffnesses Q11, Q22, Q12, Q66 in the principal/local coordinate system 3) Determine Fiber orientation of each ply 4) Calculate the transformed stiffness Qxy in the global coordinate system 5) Determine the through-thicknesses of each ply 6) Determine the laminate stiffness Matrix (ABD) 7) Calculate the laminate compliance matrix by inverting the ABD matrix 8) Calculate the laminate engineering properties # Stress Strain Relationship for a laminate, with Q=reduced stiffness matrix |sx | |Qbar11 Qbar12 Qbar16| |ex +z*kx | |sy |=|Qbar12 Qbar22 Qbar26|=|ey +z*ky | |sxy| |Qbar16 Qbar26 Qbar66| |exy+z*kxy| # Herakovich pg 84 Qbar = inv(T1) @ Q @ T2 == solve(T1, Q) @ T2 transformation reminders - see Herakovich for details sig1 = T1*sigx sigx = inv(T1)*sig1 eps1 = T2*epsx epsx = inv(T2)*epsx sigx = inv(T1)*Q*T2*epsx Qbar = inv(T1)*Q*T2 Sbar = inv(T2)*inv(Q)*T2 Notes, core transverse direction is G13, ribbon direction is G23 a_width = 50 # plate width (inches or meters) b_length = 50 # laminate length, inches or meters ''' #========================================================================== # Initialize python settings #========================================================================== #get_ipython().magic('matplotlib') plt.close('all') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams['font.size'] = 13 #plt.rcParams['legend.fontsize'] = 14 #========================================================================== # Define composite properties #========================================================================== assert(len(plyangle)==len(plymatindex)) a_width, b_length = platedim # either apply strains or loads , lb/in Nx_, Ny_, Nxy_, Mx_, My_, Mxy_ = NM NMbarapp = array([[Nx_],[Ny_],[Nxy_],[Mx_],[My_],[Mxy_]]) ex_, ey_, exy_, kx_, ky_, kxy_ = ek epsilonbarapp = array([[ex_],[ey_],[exy_],[kx_],[ky_],[kxy_]]) Ti = 0 # initial temperature (C) Tf = 0 # final temperature (C) #SF = 1.0 # safety factor #========================================================================== # Import Material Properties #========================================================================== mat = import_matprops(materials) #mat = import_matprops(['E-Glass Epoxy cloth','rohacell2lb']) # Herakovich alphaf = lambda mat: array([[mat.alpha1], [mat.alpha2], [0]]) ''' to get ply material info, use as follows alpha = alphaf(mat[materials[plymatindex[i]]]) mat[materials[1]].E2 ''' laminatethk = array([mat[materials[i]].plythk for i in plymatindex ]) nply = len(laminatethk) # number of plies H = np.sum(laminatethk) # plate thickness # area = a_width*H z = zeros(nply+1) zmid = zeros(nply) z[0] = -H/2 for i in range(nply): z[i+1] = z[i] + laminatethk[i] zmid[i] = z[i] + laminatethk[i]/2 #========================================================================== # ABD Matrix Compute #========================================================================== # Reduced stiffness matrix for a plane stress ply in principal coordinates # calcluating Q from the Compliance matrix may cause cancE1ation errors A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3)) for i in range(nply): # = nply Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 ) Qbar = solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) # inv(T1(plyangle[i])) @ Q @ T2(plyangle[i]) A += Qbar*(z[i+1]-z[i]) # coupling stiffness B += (1/2)*Qbar*(z[i+1]**2-z[i]**2) # bending or flexural laminate stiffness relating moments to curvatures D += (1/3)*Qbar*(z[i+1]**3-z[i]**3) #Cbar6 = T61 @ C6 @ np.transpose(T61) # laminate stiffness matrix ABD = zeros((6,6)) ABD[0:3,0:3] = A ABD[0:3,3:6] = B + zoffset*A ABD[3:6,0:3] = B + zoffset*A ABD[3:6,3:6] = D + 2*zoffset*B + zoffset**2*A # laminatee compliance abcd = inv(ABD) a = abcd[0:3,0:3] #========================================================================== # Laminate Properties #========================================================================== # effective laminate shear coupling coefficients etasxbar = a[0,2]/a[2,2] etasybar = a[1,2]/a[2,2] etaxsbar = a[2,0]/a[0,0] etaysbar = a[2,1]/a[1,1] # laminate engineer properties Exbar = 1 / (H*a[0,0]) Eybar = 1 / (H*a[1,1]) Gxybar = 1 / (H*a[2,2]) nuxybar = -a[0,1]/a[0,0] nuyxbar = -a[0,1]/a[1,1] # TODO: validate results, does not appear to be correct # strain centers, pg 72, NASA-Basic mechanics of lamianted composites # added divide by zero epsilon z_eps0_x = -B[0,0] / (D[0,0] + 1e-16) z_eps0_y = -B[0,1] / (D[0,1] + 1e-16) z_eps0_xy = -B[0,2] / (D[0,2] + 1e-16) z_sc = -B[2,2] / (D[2,2] +1e-16) # shear center # --------------------- Double Check --------------------- # # Laminate compliance matrix # LamComp = array([ [1/Exbar, -nuyxbar/Eybar, etasxbar/Gxybar], # [-nuxybar/Exbar, 1/Eybar , etasybar/Gxybar], # [etaxsbar/Exbar, etaysbar/Eybar, 1/Gxybar]] ) # # Daniel pg 183 # # combines applied loads and applied strains # strain_laminate = LamComp @ Nxyzapplied[:3]/H + strainxyzapplied[:3] # Nxyz = A @ strain_laminate # stress_laminate = Nxyz/H # -------------------------------------------------------- #========================================================================== # Pressure Load #========================================================================== #========================================================================== # pressure displacement and moments #========================================================================== D11,D12,D22,D66 = D[0,0], D[0,1], D[1,1], D[2,2] B11 = B[0,0] A11, A12 = A[0,0], A[0,1] # reddy pg 247 Navier displacement solution for a simply supported plate s = b_length/a_width x = a_width/2 y = b_length/2 # 5.2.8, reddy, or hyer 13.123 terms = 5 w0 = 0 for m in range(1,terms,2): for n in range(1,terms,2): dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4) alpha = m*pi/a_width beta = n*pi/b_length # for uniformly distributed loads, m,n = 1,3,5,... Qmn = 16*q0/(pi**2*m*n) Wmn = Qmn/dmn w0 += Wmn * sin(alpha*x) * sin(beta*y) w0_simplesupport = w0 # 5.2.12a, reddy # mid span moments Mxq=Myq=Mxyq=0 for m in range(1,terms,2): for n in range(1,terms,2): dmn = pi**4/b_length**4 * (D11*m**4*s**4 + 2*(D12 + 2*D66)*m**2*n**2*s**2 + D22*n**4) alpha = m*pi/a_width beta = n*pi/b_length # for uniformly distributed loads, m,n = 1,3,5,... Qmn = 16*q0/(pi**2*m*n) Wmn = Qmn/dmn Mxq += (D11*alpha**2 + D12*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length) Myq += (D12*alpha**2 + D22*beta**2 ) * Wmn * sin(m*pi*x/a_width) * sin(n*pi*y/b_length) Mxyq += alpha*beta*D66 * Wmn * cos(m*pi*x/a_width) * cos(n*pi*y/b_length) Mxyq = -2*Mxyq NMq = [[0],[0],[0],[Mxq],[Myq],[Mxyq]] # hyer, x-pin-pin, y-free-free plate reaction forces, pg 619 # Forces and Moments across the width of the plate A11R = A11*(1-B11**2/(A11*D11)) D11R = D11*(1-B11**2/(A11*D11)) Nxq0 = lambda x: B11/D11 * q0 * a_width**2 /12 Nyq0 = lambda x: B11 * A12*q0 * a_width**2 / (D11*A11R*12) * (6*(x/a_width)**2-1/2) Nxyq0 = lambda x: 0 Mxq0 = lambda x: q0 * a_width**2/8 * (1-4*(x/a_width)**2) Myq0 = lambda x: D12 * q0 * a_width**2 / (D11R*8) * ((1-2*B11**2/(3*A11*D11))-(4*(x/a_width)**2)) Mxyq0 = lambda x: 0 # clamped plate 5.4.11, reddy #w0_clamped = ( 49 * q0*a_width**4 * (x/a_width - (x/a_width)**2 )**2 * (y/b_length - (y/b_length)**2)**2) / (8 * (7*D11+4*(D12 + 2*D66)*s**2 + 7*D22*s**4) ) # reddy, 5.4.12 w0_clamped = 0.00342 * (q0*a_width**4) / (D11+0.5714*(D12+2*D66)*s**2+D22*s**4) # reddy, 5.4.15 #w0_clamped = 0.00348 * (q0*a_width**4) / (D11*b_length**4+0.6047*(D12+2*D66)*s**2+D22*s**4) # reddy 5.4.15, for isotropic D11=D w0_clamped_isotropic = 0.00134*q0*a_width**4/D11 #========================================================================== # Applied Loads and pressure loads #========================================================================== NMbarapptotal = NMbarapp + NMq + ABD @ epsilonbarapp #========================================================================== # Thermal Loads #========================================================================== ''' if the material is isotropic and unconstrained, then no thermal stresses will be experienced. If there are constraints, then the material will experience thermally induced stresses. As with orthotropic materials, various directions will have different stresses, and when stacked in various orientations, stresses can be unintuitive and complicated. Global Thermal strains are subtracted from applied strains # 1) determine the free unrestrained thermal strains in each layer, alphabar ''' dT = Tf-Ti Nhatth= zeros((3,1)) # unit thermal force in global CS Mhatth = zeros((3,1)) # unit thermal moment in global CS alphabar = zeros((3,nply)) # global ply CTE for i in range(nply): # = nply Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 ) alpha = alphaf(mat[materials[plymatindex[i]]]) Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i]) alphabar[:,[i]] = solve(T2(plyangle[i]), alpha) #alphabar[:,[i]] = inv(T2(plyangle[i])) @ alpha # Convert to global CS Nhatth += Qbar @ (alphabar[:,[i]])*(z[i+1] - z[i]) # Hyer method for calculating thermal unit loads Mhatth += 0.5*Qbar@(alphabar[:,[i]])*(z[i+1]**2-z[i]**2) NMhatth = np.vstack((Nhatth,Mhatth)) NMbarth = NMhatth*dT # resultant thermal loads # Laminate CTE epsilonhatth = abcd@NMhatth # laminate CTE # applied loads and thermal loads epsilonbarapp = abcd @ NMbarapptotal epsilonbarth = abcd @ NMbarth # resultant thermal strains epsilonbartotal = epsilonbarapp + epsilonbarth # Composite respone from applied mechanical loads and strains. Average # properties only. Used to compare results from tensile test. #epsilon_laminate = abcd@NMbarapptotal #sigma_laminate = ABD@epsilon_laminate/H epsilon_laminate = epsilonbartotal[:] sigma_laminate = ABD@epsilonbartotal/H alpha_laminate = a@Nhatth # determine thermal load and applied loads or strains Hyer pg 435,452 Nx = NMbarapptotal[0,0]*a_width # units kiloNewtons, total load as would be applied in a tensile test Ny = NMbarapptotal[1,0]*b_length # units kN #========================================================================== # Thermal and mechanical local and global stresses at the ply interface #========================================================================== # Declare variables for plotting epsilon_app = zeros((3,2*nply)) sigma_app = zeros((3,2*nply)) epsilonbar_app = zeros((3,2*nply)) sigmabar_app = zeros((3,2*nply)) epsilon_th = zeros((3,2*nply)) sigma_th = zeros((3,2*nply)) epsilonbar_th = zeros((3,2*nply)) sigmabar_th = zeros((3,2*nply)) epsilon = zeros((3,2*nply)) epsilonbar = zeros((3,2*nply)) sigma = zeros((3,2*nply)) sigmabar = zeros((3,2*nply)) for i,k in enumerate(range(0,2*nply,2)): # stress is calcuated at top and bottom of each ply Q = Qf(mat[materials[plymatindex[i]]].E1, mat[materials[plymatindex[i]]].E2, mat[materials[plymatindex[i]]].nu12, mat[materials[plymatindex[i]]].G12 ) Qbar = inv(T1(plyangle[i])) @ Q @ T2(plyangle[i]) ### transverse shear, herakovich pg 254 #Q44 = mat[materials[plymatindex[i]]].G23 #Q55 = mat[materials[plymatindex[i]]].G13 #Qbar44 = Q44*cos(plyangle[i])**2+Q55*sin(plyangle[i])**2 #Qbar55 = Q55*cos(plyangle[i])**2 + Q44*sin(plyangle[i])**2 #Qbar45 = (Q55-Q44)*cos(plyangle[i])*sin(plyangle[i]) #epsilontransverse = array([[gammayz],[gammaxz]]) #sigmatransverse = array([[Qbar44, Qbar45],[Qbar45, Qbar55]]) @ epsilontransverse # Global stresses and strains, applied load only epsbarapp1 = epsilonbarapp[0:3] + z[i]*epsilonbarapp[3:7] epsbarapp2 = epsilonbarapp[0:3] + z[i+1]*epsilonbarapp[3:7] sigbarapp1 = Qbar @ epsbarapp1 sigbarapp2 = Qbar @ epsbarapp2 # Local stresses and strains, appplied load only epsapp1 = T2(plyangle[i]) @ epsbarapp1 epsapp2 = T2(plyangle[i]) @ epsbarapp2 sigapp1 = Q @ epsapp1 sigapp2 = Q @ epsapp2 # Interface Stresses and Strains epsilon_app[:,k:k+2] = np.column_stack((epsapp1,epsapp2)) epsilonbar_app[:,k:k+2] = np.column_stack((epsbarapp1,epsbarapp2)) sigma_app[:,k:k+2] = np.column_stack((sigapp1,sigapp2)) sigmabar_app[:,k:k+2] = np.column_stack((sigbarapp1,sigbarapp2)) # Global stress and strains, thermal loading only epsbarth1 = epsilonbarth[0:3] + z[i]*epsilonbarth[3:7] - dT*alphabar[:,[i]] epsbarth2 = epsilonbarth[0:3] + z[i+1]*epsilonbarth[3:7] - dT*alphabar[:,[i]] sigbarth1 = Qbar @ epsbarth1 sigbarth2 = Qbar @ epsbarth2 # Local stress and strains, thermal loading only epsth1 = T2(plyangle[i]) @ epsbarth1 epsth2 = T2(plyangle[i]) @ epsbarth2 sigth1 = Q @ epsth1 sigth2 = Q @ epsth2 # Interface Stresses and Strains epsilon_th[:,k:k+2] = np.column_stack((epsth1,epsth2)) epsilonbar_th[:,k:k+2] = np.column_stack((epsbarth1+dT*alphabar[:,[i]],epsbarth2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress sigma_th[:,k:k+2] = np.column_stack((sigth1,sigth2)) sigmabar_th[:,k:k+2] = np.column_stack((sigbarth1,sigbarth2)) # TOTAL global stresses and strains, applied and thermal epsbar1 = epsbarapp1 + epsbarth1 epsbar2 = epsbarapp2 + epsbarth2 sigbar1 = Qbar @ epsbar1 sigbar2 = Qbar @ epsbar2 # TOTAL local stresses and strains , applied and thermal eps1 = T2(plyangle[i]) @ epsbar1 eps2 = T2(plyangle[i]) @ epsbar2 sig1 = Q @ eps1 sig2 = Q @ eps2 # Interface Stresses and Strains epsilon[:,k:k+2] = np.column_stack((eps1,eps2)) epsilonbar[:,k:k+2] = np.column_stack((epsbar1+dT*alphabar[:,[i]],epsbar2+dT*alphabar[:,[i]])) # remove the local thermal loads for plotting. only use local thermal strains for calculating stress sigma[:,k:k+2] = np.column_stack((sig1,sig2)) sigmabar[:,k:k+2] = np.column_stack((sigbar1,sigbar2)) #========================================================================== # Strength Failure Calculations #========================================================================== # Strength Ratio STRENGTHRATIO_MAXSTRESS = zeros((3,2*nply)) # Failure Index FAILUREINDEX_MAXSTRESS = zeros((3,2*nply)) STRENGTHRATIO_TSAIWU = zeros((nply)) for i,k in enumerate(range(0,2*nply,2)): # stress s1 = sigma[0,k] s2 = sigma[1,k] s12 = np.abs(sigma[2,k]) # strength F1 = mat[materials[plymatindex[i]]].F1t if s1 > 0 else mat[materials[plymatindex[i]]].F1c F2 = mat[materials[plymatindex[i]]].F2t if s2 > 0 else mat[materials[plymatindex[i]]].F2c F12 = mat[materials[plymatindex[i]]].F12 # Max Stress failure index ,failure if > 1, then fail, FI = 1/SR FAILUREINDEX_MAXSTRESS[0,k:k+2] = s1 / F1 FAILUREINDEX_MAXSTRESS[1,k:k+2] = s2 / F2 FAILUREINDEX_MAXSTRESS[2,k:k+2] = s12 / F12 # Tsai Wu, failure occures when > 1 F1t = mat[materials[plymatindex[i]]].F1t F1c = mat[materials[plymatindex[i]]].F1c F2t = mat[materials[plymatindex[i]]].F2t F2c = mat[materials[plymatindex[i]]].F2c F12 = mat[materials[plymatindex[i]]].F12 # inhomogeneous Tsai-Wu criterion # from Daniel # http://www2.mae.ufl.edu/haftka/composites/mcdaniel-nonhomogenous.pdf f1 = 1/F1t + 1/F1c f2 = 1/F2t + 1/F2c f11 = -1/(F1t*F1c) f22 = -1/(F2t*F2c) f66 = 1/F12**2 f12 = -0.5*sqrt(f11*f22) #TW = f1*s1 + f2*s2 + f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 # polynomial to solve. Added a machine epsilon to avoid divide by zero errors lam1 = f11*s1**2 + f22*s2**2 + f66*s12**2 + 2*f12*s1*s2 + 1e-16 lam2 = f1*s1 + f2*s2 + 1e-16 lam3 = -1 # smallest positive root roots = array([(-lam2+sqrt(lam2**2-4*lam1*lam3)) / (2*lam1) , (-lam2-sqrt(lam2**2-4*lam1*lam3)) / (2*lam1)] ) STRENGTHRATIO_TSAIWU[i] = roots[roots>=0].min() # strength ratio # f1 = 1/F1t - 1/F1c # f2 = 1/F2t - 1/F2c # f11 = 1/(F1t*F1c) # f22 = 1/(F2t*F2c) # f66 = 1/F12**2 # STRENGTHRATIO_TSAIWU[i] = 2 / (f1*s2 + f2*s2 + sqrt((f1*s1+f2*s2)**2+4*(f11*s1**2+f22*s2**2+f66*s12**2))) ### Apply safety factors FAILUREINDEX_MAXSTRESS = FAILUREINDEX_MAXSTRESS * SF STRENGTHRATIO_TSAIWU = STRENGTHRATIO_TSAIWU / SF ### MARGINSAFETY_TSAIWU = STRENGTHRATIO_TSAIWU-1 # margin of safety # strength ratio for max stress, if < 1, then fail, SR = 1/FI STRENGTHRATIO_MAXSTRESS = 1/(FAILUREINDEX_MAXSTRESS+1e-16) # margin of safety based on max stress criteria MARGINSAFETY_MAXSTRESS = STRENGTHRATIO_MAXSTRESS-1 # minimum margin of safety for Max stress failure MARGINSAFETY_MAXSTRESS_min = MARGINSAFETY_MAXSTRESS.min().min() FAILUREINDEX_MAXSTRESS_max = FAILUREINDEX_MAXSTRESS.max().max() # minimum margin of safety of both Tsai-Wu and Max Stress #MARGINSAFETY_MAXSTRESS_min = np.minimum(MARGINSAFETY_MAXSTRESS.min().min(), MARGINSAFETY_TSAIWU.min() ) # find critial values for all failure criteria #MARGINSAFETY_MAXSTRESS = MARGINSAFETY_MAXSTRESS[~np.isinf(MARGINSAFETY_MAXSTRESS)] # remove inf #MARGINSAFETY_TSAIWU = MARGINSAFETY_TSAIWU[~np.isinf(MARGINSAFETY_TSAIWU)] # remove inf #========================================================================== # Buckling Failure Calculations #========================================================================== ''' Buckling of Clamped plates under shear load, reddy, 5.6.17''' k11 = 537.181*D11/a_width**4 + 324.829*(D12+2*D66)/(a_width**2*b_length**2) + 537.181*D22/b_length**4 k12 = 23.107/(a_width*b_length) k22 = 3791.532*D11/a_width**4 + 4227.255*(D12+2*D66)/(a_width**2*b_length**2) + 3791.532*D22/b_length**4 Nxycrit0 = 1/k12*np.sqrt(k11*k22) FI_clamped_shear_buckling = (abs(Nxy_)*SF) / Nxycrit0 # failure if > 1 MS_clamped_shear_buckling = 1/(FI_clamped_shear_buckling+1e-16)-1 '''Kassapoglous pg 126,137 simply supported plate buckling, assumes Nx>0 is compression Nxcrit0 is the axial load that causes buckling Nxycrit0 is the shear load that cause buckling Nxcrit is the axial load part of a combined load that causes buckling Nxycrit is the shear load part of a combined load that causes buckling ''' # no buckling issues if Nx is positive # buckling calcuations assumes Nx compression is positive. Nx__ = abs(Nx_) if Nx_ < 0 else np.float64(0) Nxy__ = np.float64(0) if Nxy_ == 0 else abs(Nxy_) # assume shear in 1 direction although both directions are ok # Nxy=0 Nxcrit0 = pi**2/a_width**2 * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4) # Nx=0 Nxycrit0 = 9*pi**4*b_length / (32*a_width**3) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 + D22*a_width**4/b_length**4) FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling = 0,0,0,0 if Nx__ == 0 or Nxy__ == 0: FI_Nxy0_buckling = (Nxy__*SF)/Nxycrit0 FI_Nx0_buckling = (Nx__*SF)/Nxcrit0 else: # interaction term k = Nxy__ / Nx__ Nxcrit = min( abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 + sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) , abs((pi**2/a_width**2) * (D11 + 2*(D12 + 2*D66)*a_width**2/b_length**2 +D22*a_width**4/b_length**4 ) / (2-8192*a_width**2*k**2/(81*b_length**2*pi**4)) * (5 - sqrt(9 + 65536*a_width**2*k**2/(81*pi**4*b_length**2)))) ) Nxycrit = Nxycrit0*sqrt(1-Nxcrit/Nxcrit0) # interactive calc FI_Nx_buckling = (Nx__ *SF)/Nxcrit FI_Nxy_buckling = (Nxy__*SF)/Nxycrit FI_combinedload_simplesupport_buckle = max([FI_Nxy0_buckling, FI_Nx0_buckling, FI_Nx_buckling, FI_Nxy_buckling] ) MS_min_buckling = 1/(FI_combinedload_simplesupport_buckle+1e-16)-1 #========================================================================== # Facesheet Wrinkling #========================================================================== #========================================================================== # principal lamainte stresses #========================================================================== sigma_principal_laminate = np.linalg.eig(array([[sigma_laminate[0,0],sigma_laminate[2,0],0], [sigma_laminate[2,0],sigma_laminate[1,0],0], [0,0,0]]))[0] tauxy_p = sigma_laminate[2,0] sigmax_p = sigma_laminate[0,0] sigmay_p = sigma_laminate[1,0] thetap = 0.5 * np.arctan( 2*tauxy_p / ((sigmax_p-sigmay_p+1e-16))) * 180/np.pi #========================================================================== # Printing Results #========================================================================== if prints: print('--------------- laminate1 Stress analysis of fibers----------') print('(z-) plyangles (z+)'); print(plyangle) print('(z-) plymatindex (z+)'); print(plymatindex) print('ply layers') ; print(z) print('lamiante thickness, H = {:.4f}'.format(H)) #print('x- zero strain laminate center, z_eps0_x = {:.4f}'.format(z_eps0_x)) #print('y- zero strain laminate center, z_eps0_y = {:.4f}'.format(z_eps0_y)) #print('xy-zero strain laminate center, z_eps0_xy = {:.4f}'.format(z_eps0_xy)) #print('shear center laminate center, z_sc = {:.4f}'.format(z_sc)) print('Applied Loads'); print(NM) print('ABD=');print(ABD) print('Ex= {:.2f}'.format(Exbar) ) print('Ey= {:.2f}'.format(Eybar) ) print('nuxy= {:.2f}'.format(nuxybar) ) print('Gxy= {:.2f}'.format(Gxybar) ) print('epsilon_laminate') ; print(epsilon_laminate) print('sigma_laminate') ; print(sigma_laminate) print('sigma_principal_laminate') ; print(sigma_principal_laminate) print('principal_angle = {:.2f} deg'.format(thetap)) print('NMbarapp') ; print(NMbarapp) print('sigma') ; print(sigma) print('\nMax Stress Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format( MARGINSAFETY_MAXSTRESS_min ) ) print(MARGINSAFETY_MAXSTRESS) print('\nTsai-Wu Percent Margin of Safety, failure < 0, minimum = {:.4f}'.format(MARGINSAFETY_TSAIWU.min())) print(MARGINSAFETY_TSAIWU) print('\nmaximum failure index = {:.4f}'.format( FAILUREINDEX_MAXSTRESS_max )) print(FAILUREINDEX_MAXSTRESS) print('\nBuckling MS for Nxy only for clamped edges = {:.4f}\n'.format(MS_clamped_shear_buckling)) # print('---- Individual Buckling Failure Index (fail>1) combined loads and simple support -----') # print('FI_Nxy0 = {:.2f}'.format(FI_Nxy0_buckling) ) # print('FI_Nx0 = {:.2f}'.format(FI_Nx0_buckling) ) # print('---- Interactive Buckling Failure Index (fail>1) combined loads and simple support -----') # print('FI_Nx = {:.2f}'.format(FI_Nx_buckling) ) # print('FI_Nxy = {:.2f}'.format(FI_Nxy_buckling) ) # print('---- Buckling Failure Index (fail>1) combined loads and simple support -----') # print(FI_combinedload_simplesupport_buckle) print('buckling combined loads and simple support MS = {:.4f}\n'.format((MS_min_buckling))) print('Mx_midspan = {:.2f}'.format(Mxq) ) print('My_midspan = {:.2f}'.format(Myq) ) print('Mxy_midspan = {:.2f}'.format(Mxyq) ) print('w0_simplesupport = {:.6f}'.format(w0_simplesupport) ) print('w0_clamped = {:.6f}'.format(w0_clamped) ) print('w0_clamped_isotropic= {:.6f}'.format(w0_clamped_isotropic) ) #display(sp.Matrix(sigmabar)) #========================================================================== # Plotting #========================================================================== if plots: windowwidth = 800 windowheight = 450 zplot = zeros(2*nply) for i,k in enumerate(range(0,2*nply,2)): # = nply zplot[k:k+2] = z[i:i+2] #legendlab = ['total','thermal','applied','laminate'] # global stresses and strains mylw = 1.5 #linewidth # Global Stresses and Strains f1, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True) f1.canvas.set_window_title('Global Stress and Strain of %s laminate' % (plyangle)) stresslabel = ['$\sigma_x$','$\sigma_y$','$\\tau_{xy}$'] strainlabel = ['$\epsilon_x$','$\epsilon_y$','$\gamma_{xy}$'] for i,ax in enumerate([ax1,ax2,ax3]): ## the top axes ax.set_ylabel('thickness,z') ax.set_xlabel(strainlabel[i]) ax.set_title(' Ply Strain '+strainlabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2)) ax.plot(epsilonbar[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(epsilonbar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75, linestyle='--', label='thermal') ax.plot(epsilonbar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ax.grid(True) #ax.set_xticks(linspace( min(ax.get_xticks()) , max(ax.get_xticks()) ,6)) for i,ax in enumerate([ax4,ax5,ax6]): ax.set_ylabel('thickness,z') ax.set_xlabel(stresslabel[i]) ax.set_title(' Ply Stress '+stresslabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2)) ax.plot(sigmabar[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(sigmabar_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal') ax.plot(sigmabar_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ax.grid(True) leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3) tight_layout() try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(25,50,windowwidth,windowheight) except: pass f1.show() #plt.savefig('global-stresses-strains.png') ### Local Stresses and Strains f2, ((ax1,ax2,ax3), (ax4,ax5,ax6)) = plt.subplots(2,3, sharex='row', sharey=True) f2.canvas.set_window_title('Local Stress and Strain of %s laminate' % (plyangle)) stresslabel = ['$\sigma_1$','$\sigma_2$','$\\tau_{12}$'] strainlabel = ['$\epsilon_1$','$\epsilon_2$','$\gamma_{12}$'] strengthplot = [ [ [F1t,F1t],[zplot.min(), zplot.max()], [F1c, F1c],[zplot.min(), zplot.max()] ] , [ [F2t,F2t],[zplot.min(), zplot.max()], [F2c, F2c],[zplot.min(), zplot.max()] ] , [ [F12,F12],[zplot.min(), zplot.max()], [-F12,-F12],[zplot.min(), zplot.max()] ] ] for i,ax in enumerate([ax1,ax2,ax3]): ## the top axes ax.set_ylabel('thickness,z') ax.set_xlabel(strainlabel[i]) ax.set_title(' Ply Strain '+strainlabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2)) ax.plot(epsilon[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(epsilon_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal') ax.plot(epsilon_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([epsilon_laminate[i], epsilon_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ax.grid(True) for i,ax in enumerate([ax4,ax5,ax6]): ax.set_ylabel('thickness,z') ax.set_xlabel(stresslabel[i]) ax.set_title(' Ply Stress '+stresslabel[i]) ax.ticklabel_format(axis='x', style='sci', scilimits=(-3,3)) # scilimits=(-2,2)) ax.plot(sigma[i,:], zplot, color='blue', lw=mylw, label='total') ax.plot(sigma_th[i,:], zplot, color='red', lw=mylw, alpha=0.75,linestyle='--', label='thermal') ax.plot(sigma_app[i,:], zplot, color='green', lw=mylw, alpha=0.75,linestyle='-.', label='applied') ax.plot([sigma_laminate[i], sigma_laminate[i]],[np.min(z) , np.max(z)], color='black', lw=mylw, label='laminate') ### plots strengths #ax.plot(strengthplot[i][0],strengthplot[i][1], color='yellow', lw=mylw) ax.grid(True) leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3) tight_layout() try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(windowwidth+50,50,windowwidth,windowheight) except: pass f2.show() #plt.savefig('local-stresses-strains.png') ### Failure f3, ((ax1,ax2,ax3)) = plt.subplots(1,3, sharex=True, sharey=True) f3.canvas.set_window_title('Failure Index(failure if > 1), %s laminate' % (plyangle)) stresslabel = ['$\sigma_1/F_1$','$\sigma_2/F_2$','$\\tau_{12}/F_{12}$'] for i,ax in enumerate([ax1,ax2,ax3]): ## the top axes ax.set_ylabel('thickness,z') ax.set_xlabel(stresslabel[i]) #ax.set_title(' Ply Strain at $\epsilon=%f$' % (epsxapp*100)) ax.ticklabel_format(axis='x', style='sci', scilimits=(1,4)) # scilimits=(-2,2)) ax.plot(FAILUREINDEX_MAXSTRESS[i,:], zplot, color='blue', lw=mylw, label='total') ax.grid(True) ax.set_title('Failure Index, fail if > 1') #leg = legend(fancybox=True) ; leg.get_frame().set_alpha(0.3) tight_layout() try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(25,windowheight+100,windowwidth,windowheight) except: pass f2.show() #plt.savefig('local-stresses-strains.png') ### warpage res = 100 Xplt,Yplt = np.meshgrid(np.linspace(-a_width/2,a_width/2,res), np.linspace(-b_length/2,b_length/2,res)) epsx = epsilon_laminate[0,0] epsy = epsilon_laminate[1,0] epsxy = epsilon_laminate[2,0] kapx = epsilon_laminate[3,0] kapy = epsilon_laminate[4,0] kapxy = epsilon_laminate[5,0] ### dispalcement w = -0.5*(kapx*Xplt**2 + kapy*Yplt**2 + kapxy*Xplt*Yplt) u = epsx*Xplt # pg 451 hyer fig = plt.figure('plate-warpage') ax = fig.gca(projection='3d') ax.plot_surface(Xplt, Yplt, w+zmid[0], cmap=mpl.cm.jet, alpha=0.3) ###ax.auto_scale_xyz([-(a_width/2)*1.1, (a_width/2)*1.1], [(b_length/2)*1.1, (b_length/2)*1.1], [-1e10, 1e10]) ax.set_xlabel('plate width,y-direction,in') ax.set_ylabel('plate length,x-direction, in') ax.set_zlabel('warpage,in') #ax.set_zlim(-0.01, 0.04) #mngr = plt.get_current_fig_manager() ; mngr.window.setGeometry(450,550,600, 450) try: mngr = plt.get_current_fig_manager() mngr.window.setGeometry(windowwidth+50,windowheight+100,windowwidth,windowheight) except: pass plt.show() #plt.savefig('plate-warpage') return MARGINSAFETY_MAXSTRESS_min, FAILUREINDEX_MAXSTRESS_max def plate(): ''' composite plate mechanics TODO - results need vetted ''' #========================================================================== # Initialize #========================================================================== get_ipython().magic('matplotlib') plt.close('all') plt.rcParams['figure.figsize'] = (12, 8) plt.rcParams['font.size'] = 13 #plt.rcParams['legend.fontsize'] = 14 #========================================================================== # Import Material Properties #========================================================================== plythk = 0.0025 plyangle = array([0,90,-45,45,0]) * np.pi/180 # angle for each ply nply = len(plyangle) # number of plies laminatethk = np.zeros(nply) + plythk H = sum(laminatethk) # plate thickness # Create z dimensions of laminate z_ = np.linspace(-H/2, H/2, nply+1) a = 20 # plate width; b = 10 # plate height q0_ = 5.7 # plate load; # Transversly isotropic material properties E1 = 150e9 E2 = 12.1e9 nu12 = 0.248 G12 = 4.4e9 nu23 = 0.458 G23 = E2 / (2*(1+nu23)) # Failure Strengths F1t = 1500e6 F1c = -1250e6 F2t = 50e6 F2c = -200e6 F12t = 100e6 F12c = -100e6 Strength = np.array([[F1t, F1c], [F2t, F2c], [F12t, F12c]]) th = sp.symbols('th') # Stiffnes matrix in material coordinates Cijm6 = inv(Sij6) # reduced stiffness in structural Cij = sp.Matrix([[Cij6[0,0], Cij6[0,1], 0], [Cij6[0,1], Cij6[1,1], 0], [0, 0, Cij6[5,5] ]] ) Tij = sp.Matrix([[cos(th)**2, sin(th)**2, 2*sin(th)*cos(th)], [sin(th)**2, cos(th)**2, -2*sin(th)*cos(th)], [-cos(th)*sin(th), sin(th)*cos(th), (cos(th)**2-sin(th)**2)]]) ## Cylindrical Bending of a laminated plate # displacement in w (z direction) from sympy.abc import x f = Function('f') eq = dsolve(2*x*f(x) + (x**2 + f(x)**2)*f(x).diff(x), f(x), hint = '1st_homogeneous_coeff_best', simplify=False) pprint(eq) #============================================================================== th,x,y,z,q0,C1,C2,C3,C4,C5,C6,C7,A11,B11,D11,A16,B16 = symbols('th x y z q0 C1 C2 C3 C4 C5 C6 C7 A11 B11 D11 A16 B16') wfun = Function('wfun') ufun = Function('ufun') ## EQ 4.4.1a eq1 = A11*ufun(x).diff(x,2) - B11*wfun(x).diff(x,3) #eq1 = A11*diff(ufun,x,2) - B11*diff(wfun,x,3); # C5 C1 ## EQ 4.4.1b #eq2 = A16*diff(ufun,x,2) - B16*diff(wfun,x,3); # C5 C1 eq2 = A16*ufun(x).diff(x,2) - B16*wfun(x).diff(x,3) ## EQ 4.4.1c #eq3 = B11*diff(ufun,x,3) - D11*diff(wfun,x,4) + q0; eq3 = B11*ufun(x).diff(x,3) - D11*wfun(x).diff(x,4) + q0 ################## python conversion eded here ################################ # solve eq1 eq2 and eq3 to get the w and u functions # displacement in w (z direction) from eq1,eq2,eq3 wfun = A11*q0*x**4 / (4*(6*B11**2-6*A11*D11)) + C1 + C2*x + C3*x**2 + C4*x**3 # C1 C2 C3 C4 # displacement in u (x direction) from eq1,eq2,eq3 ufun = B11*q0*x**3 / (6*(B11**2-A11*D11)) + C7 + x*C6 + 3*B11*x**2*C5/A11 # C5 C6 C7 # Cij6.evalf(subs={th:plyangle[i]}) * (z_[i+1]**3-z_[i]**3) # cond1 -> w(0)=0 at x(0), roller C1sol = sp.solve(wfun.subs(x,0), C1)[0] # = 0 # cond2 -> angle at dw/dx at x(0) is 0, cantilever C2sol = sp.solve(wfun.diff(x).subs(x,0),C2)[0] # = 0 # cond3 -> w(z) = 0 at x(a), roller C4sol1 = sp.solve(wfun.subs({x:a,C1:C1sol,C2:C2sol}),C4)[0] # C3 # cond4 u = 0 at x = 0 C7sol = sp.solve(ufun.subs(x,0),C7)[0] #=0 # u=0 at x = a C5sol1 = sp.solve(ufun.subs({x:a, C7:C7sol}),C5)[0] #C6 # cond 5 EQ 4.4.14a Myy = 0 @ x(a) (Mxx , B11 D11) (Myy, B12 D12) roller no moment C6sol1 = sp.solve( ( ((B11*ufun.diff(x)+0.5*wfun.diff(x)**2 ) - D11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol})), C6)[0] # C6 C3 # EQ 4.4.13a, Nxx = 0 @ x(0) roller has no Nxx C6sol2 = sp.solve( ((A11* ufun.diff(x) + 0.5*wfun.diff(x)**2)-B11*wfun.diff(x,2)).subs({x:a, C1:C1sol, C2:C2sol, C4:C4sol1, C5:C5sol1, C7:C7sol}),C6)[0] # C6 C3 C3sol = sp.solve(C6sol1 - C6sol2,C3)[0] C4sol = C4sol1.subs(C3,C3sol) C6sol = sp.simplify(C6sol2.subs(C3,C3sol)) C5sol = sp.simplify(C5sol1.subs(C6,C6sol)) # substitute integration constants with actual values( _ is actual number) C1_ = copy(C1sol) C2_ = copy(C2sol) C7_ = copy(C7sol) C3_ = C3sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) C4_ = C4sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) C5_ = C5sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) C6_ = C6sol.subs({q0:q0_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) # function w(x) vertical displacement w along z with actual vaules wsol = wfun.subs({q0:q0_, C1:C1_, C2:C2_, C3:C3_, C4:C4_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) # function u(x) horizontal displacement u along x with actual vaules usol = ufun.subs({q0:q0_, C5:C5_, C6:C6_, C7:C7_, A11:Aij[0,0], B11:Bij[0,0], D11:Dij[0,0]}) # 3d plots plot3d(wsol,(x,0,a), (y,0,b)) plt.xlabel('x') plt.ylabel('y') plt.title('Cylindrical Bending -Displacement of a plate With CLPT') ## Strain calculation # eq 3.3.8 (pg 116 reddy (pdf = 138)) epstotal = array([[usol.diff(x) + 0.5* wsol.diff(x)**5 - z*wsol.diff(x,2)],[0],[0]]) epsx = epstotal[0,0] ## Calculating and plotting Stress in each layer res = 8 # accuracy of finding max and min stress xplot = linspace(0,a,res) yplot = linspace(0,b,res) G0 = sp.symbols('G0') Globalminstress = np.zeros((3, nply)) Globalmaxstress = np.zeros((3, nply)) for kstress in range(3): # stress state s_x, s_y, s_xz plt.figure(kstress+1) for klay in range(nply): # loop through all layers thplot = plyangle[klay] zplot = linspace(z_[klay],z_[klay+1],res) stressplot = np.zeros((len(zplot),len(xplot))) ## Calc Stresses if kstress == 2: # Shear stresses G0_ = -sp.integrate(s_stress[0].diff(x),z)+G0 # solve for shear stresses from s_1 s_xz = sp.solve(G0_,G0)[0] # out of plane shear S_xz does not need to be transformed ?? plot3d(s_xz, (x,0, a), (z, z_[klay], z_[klay+1]) ) else: # normal stresses # Cij = reduced structural stiffness in strictural coordinates 3x3 # stress in structural coordinates s_stress = Cij.subs(th,thplot) @ epstotal # stressin material coordinates m_stress = Tij.subs(th,thplot) @ s_stress #ezsurf(m_stress(kstress),[0,a,z_(klay),z_(klay+1)]) ## find max stress in each layer ii=0 for i in xplot: jj=0 for j in zplot: if kstress == 2: stressplot[ii,jj] = s_xz.subs({x:i, z:j}) else: stressplot[ii,jj] = m_stress[kstress].subs({x:i, z:j}) jj+=jj ii+=ii Globalminstress[kstress,klay] = np.min(stressplot) Globalmaxstress[kstress,klay] = np.max(stressplot) # plt.title('\sigma_%i' % kstress) ## Plot max stress and failure strength plt.figure() for i in range(3): plt.subplot(1, 3, i+1) plt.bar(range(nply), Globalmaxstress[i,:]) plt.bar(range(nply), Globalminstress[i,:]) plt.scatter(range(nply),np.ones(nply) * Strength[i,0]) plt.scatter(range(nply),np.ones(nply) * Strength[i,1]) plt.xlabel('layer') plt.title('\sigma%i' % i) def plate_navier(): ''' composite plate bending with navier solution TODO - code needs to be converted from matlab ''' ## Plate a*b*h simply supported under q = q0 CLPT pass ''' q0,a,b,m,n,x,y = sp.symbols('q0 a b m n x y') Qmn = 4/(a*b)*sp.integrate( sp.integrate( q0*sp.sin(m*pi*x/a)*sp.sin(n*pi*y/b),(x,0,a)) ,(y,0,b)) dmn = pi**4 / b**4 * (DTij(1,1)*m**4*(b/a)**4 + 2* (DTij(1,2)+2*DTij(6,6)) *m**2*n**2*(b/a)**2 + DTij(2,2)*n**4) Wmn = Qmn/dmn; w0 = Wmn * sin(m*pi*x/a) * sin(n*pi*y/b); w0_ = subs(w0,[q0 a b],[-q0_ a_ b_] ); figure w0sum = 0; for n_ = 1:10 for m_ = 1:10 w0sum = w0sum + subs(w0_,[n m],[n_ m_]); end end w0sum; % xplot = linspace(0,a_,res); % yplot = linspace(0,b_,res); ii=1; for i = xplot jj=1; for j = yplot w0plot(ii,jj) = subs(w0sum,[x y],[i j]); jj=jj+1; end ii=ii+1; end surf(xplot,yplot,w0plot) colorbar set(gca,'PlotBoxAspectRatio',[2 1 1]); xlabel('length a, u(x)') ylabel('length b, v(y)') zlabel('w(z)') ''' class laminate(object): """ IN-WORK - laminate object for composite material analysis """ # constructor def __init__(self, plyangle, matindex, matname): # run when laminate is instantiated # loads materials used self.plyangle = plyangle self.matindex = matindex self.matname = matname self.__mat = self.__import_matprops(matname) # create a simple function to handle CTE properties def __alphaf(self, mat): return array([[mat.alpha1], [mat.alpha2], [0]]) self.laminatethk = array([self.__mat[matname[i]].plythk for i in matindex ]) self.nply = len(self.laminatethk) # number of plies self.H = np.sum(self.laminatethk) # plate thickness # area = a_width*H z = zeros(self.nply+1) zmid = zeros(self.nply) z[0] = -self.H/2 for i in range(self.nply): z[i+1] = z[i] + self.laminatethk[i] zmid[i] = z[i] + self.laminatethk[i]/2 self.z = z self.zmid = zmid self.__abdmatrix() def __Qf(self, E1,E2,nu12,G12): '''transversly isptropic compliance matrix. pg 58 herakovich G12 = E1/(2*(1+nu12)) if isotropic''' nu21 = E2*nu12/E1 Q = array([[E1/(1-nu12*nu21), E2*nu12/(1-nu12*nu21), 0], [ E2*nu12/(1-nu12*nu21), E2/(1-nu12*nu21), 0], [0, 0, G12]]) return Q def __T1(self, th): '''Stress Transform for Plane Stress th=ply angle in degrees voight notation for stress tranform. sigma1 = T1 @ sigmax recall T1(th)**-1 == T1(-th)''' n = sin(th*pi/180) m = cos(th*pi/180) T1 = array( [[m**2, n**2, 2*m*n], [n**2, m**2,-2*m*n], [-m*n, m*n,(m**2-n**2)]]) return T1 def __T2(self, th): '''Strain Transform for Plane Stress th=ply angle in degrees voight notation for strain transform. epsilon1 = T2 @ epsilonx''' n = sin(th*pi/180) m = cos(th*pi/180) T2 = array( [[m**2, n**2, m*n], [n**2, m**2,-m*n], [-2*m*n, 2*m*n, (m**2-n**2)]]) return T2 # private method def __abdmatrix(self): '''used within the object but not accessible outside''' #========================================================================== # ABD Matrix Compute #========================================================================== # Reduced stiffness matrix for a plane stress ply in principal coordinates # calcluating Q from the Compliance matrix may cause cancE1ation errors A = zeros((3,3)); B = zeros((3,3)); D = zeros((3,3)) for i in range(self.nply): # = nply Q = self.__Qf(self.__mat[self.matname[self.matindex[i]]].E1, self.__mat[self.matname[self.matindex[i]]].E2, self.__mat[self.matname[self.matindex[i]]].nu12, self.__mat[self.matname[self.matindex[i]]].G12 ) Qbar = inv(self.__T1(self.plyangle[i])) @ Q @ self.__T2(self.plyangle[i]) # solve(T1(plyangle[i]), Q) @ T2(plyangle[i]) A += Qbar*(self.z[i+1]-self.z[i]) # coupling stiffness B += (1/2)*Qbar*(self.z[i+1]**2-self.z[i]**2) # bending or flexural laminate stiffness relating moments to curvatures D += (1/3)*Qbar*(self.z[i+1]**3-self.z[i]**3) # laminate stiffness matrix ABD = zeros((6,6)) ABD[0:3,0:3] = A ABD[0:3,3:6] = B ABD[3:6,0:3] = B ABD[3:6,3:6] = D self.ABD = ABD # method def available_materials(self): '''show the materials available in the library''' matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0) print('---available materials---') for k in matprops.columns.tolist(): print(k) print('-------------------------') # private method to be used internally def __import_matprops(self, mymaterial=['T300_5208','AL_7075']): ''' import material properties ''' matprops = pd.read_csv(os.path.join(os.path.dirname(__file__), "compositematerials.csv"), index_col=0) if mymaterial==[] or mymaterial=='': print(matprops.columns.tolist()) mat = matprops[mymaterial] #mat.applymap(lambda x:np.float(x)) mat = mat.applymap(lambda x:pd.to_numeric(x, errors='ignore')) return mat def failure_envelope_laminate(Nx,Ny,Nxy,Mx,My,Mxy,q0,mymat,layup): ''' find the miniumu margin give load conditions ''' # create a 45 carbon cloth panel with a 0.5 inch rohacell core _, FAILUREINDEX_MAXSTRESS_max = laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy], ek=[0,0,0,0,0,0], q0=q0, plyangle= layup, plymatindex=[0,0,0,0], materials = [mymat], platedim=[10,10], zoffset=0, SF=1.0, plots=0, prints=0) return FAILUREINDEX_MAXSTRESS_max def plot_single_max_failure_loads(mymat='E-Glass Epoxy fabric M10E-3783', mylayup=[0,45,45,0] ): ''' loops through and tries to find a load that is close to 0 and then attempts to find the root (ie margin=0) older version used newton method for root finding scipy.optimize.newton(laminate_min, guess) TODO: Current calculation is stupid using random points to plot. fix it by use FI, failure index instead of margin to generate a linear relationship and envelope ''' #laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0) loadnamelist = ['Nx','Ny','Nxy','Mx','My','Mxy','q0'] laminate_min_list = [] laminate_min_list.append(lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,N,0,0,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,N,0,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,N,0,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,N,0,mymat,mylayup)) laminate_min_list.append(lambda N: failure_envelope_laminate(0,0,0,0,0,0,N,mymat,mylayup)) envelope_loads = [] N_t = array([0,1]) N_c = array([0,-1]) for loadname,laminate_min in zip(loadnamelist,laminate_min_list): # tension FI = [laminate_min(N) for N in N_t] m = (FI[1]-FI[0]) / (N_t[1] - N_t[0]) b = FI[1]-m*N_t[1] N_crit_t = (1-b) / m # compression FI = [laminate_min(N) for N in N_c] m = (FI[1]-FI[0]) / (N_c[1] - N_c[0]) b = FI[1]-m*N_c[1] N_crit_c = (1-b) / m envelope_loads.append('{} = {:.1f} , {:.1f}'.format(loadname,N_crit_t, N_crit_c)) print('------------- enveloped loads for {} {} -----------------'.format(mylayup, mymat)) for k in envelope_loads: print(k) # plot envelope Nx_env = [] Nxy_env = [] laminate_min = lambda N: failure_envelope_laminate(N,0,0,0,0,0,0,mymat,mylayup) # compression FI = [laminate_min(N) for N in N_c] m = (FI[1]-FI[0]) / (N_c[1] - N_c[0]) b = FI[1]-m*N_c[1] Nx_env.append( (1-b) / m ) Nxy_env.append( 0 ) # tension FI = [laminate_min(N) for N in N_t] m = (FI[1]-FI[0]) / (N_t[1] - N_t[0]) b = FI[1]-m*N_t[1] Nx_env.append( (1-b) / m ) Nxy_env.append( 0 ) laminate_min = lambda N: failure_envelope_laminate(0,0,N,0,0,0,0,mymat,mylayup) # compression FI = [laminate_min(N) for N in N_c] m = (FI[1]-FI[0]) / (N_c[1] - N_c[0]) b = FI[1]-m*N_c[1] Nxy_env.append( (1-b) / m ) Nx_env.append( 0 ) # tension FI = [laminate_min(N) for N in N_t] m = (FI[1]-FI[0]) / (N_t[1] - N_t[0]) b = FI[1]-m*N_t[1] Nxy_env.append( (1-b) / m ) Nx_env.append( 0 ) laminate_min_Nx_Nxy_func = lambda Nx,Nxy: failure_envelope_laminate(Nx,0,Nxy,0,0,0,0,mymat,mylayup) n = 500 f = 1.25 # < 1 # arr1 = np.random.randint(Nx_env[0]-abs(Nx_env[0]*f),Nx_env[0]+abs(Nx_env[0])*f,n) # arr2 = np.random.randint(Nx_env[1]-abs(Nx_env[1]*f),Nx_env[1]+abs(Nx_env[1])*f,n) # Nx_r = np.concatenate((arr1, arr2)) # # arr1 = np.random.randint(Nxy_env[2]-abs(Nxy_env[2])*f,Nxy_env[2]+abs(Nxy_env[2])*f,n) # arr2 = np.random.randint(Nxy_env[3]-abs(Nxy_env[3])*f,Nxy_env[3]+abs(Nxy_env[3])*f,n) # Nxy_r = np.concatenate((arr1, arr2)) Nx_r = np.random.randint(Nx_env[0]*f,Nx_env[1]*f, n) Nxy_r = np.random.randint(Nxy_env[2]*f,Nxy_env[3]*f, n) for Nx_ri, Nxy_ri in zip(Nx_r, Nxy_r): FI = laminate_min_Nx_Nxy_func(Nx_ri, Nxy_ri) if FI < 1: Nx_env.append(Nx_ri) Nxy_env.append(Nxy_ri) points = array([ [x,xy] for x,xy in zip(Nx_env, Nxy_env)]) hull = scipy.spatial.ConvexHull(points) plot(points[:,0], points[:,1], 'bo') for simplex in hull.simplices: plot(points[simplex, 0], points[simplex, 1], 'k-') xlabel('Nx, lb/in') ylabel('Nxy, lb/in') title('Failure envelope') return envelope_loads def my_laminate_with_loading(): # loads lbs/in Nx = 50 Ny = 0 Nxy = 0 Mx = 0 My = 0 Mxy = 0 q0 = 0 # pressure # Qx = 0 # Qy = 0 a_width = 50 b_length = 3.14*6.75 ## sandwich laminate # plyangle= [45,45,0, 45,45], # plymatindex=[0, 0, 1, 0, 0], # create a 45 carbon cloth panel with a 0.5 inch rohacell core laminate_calcs(NM=[Nx,Ny,Nxy,Mx,My,Mxy], ek=[0,0,0,0,0,0], q0=q0, plyangle= [0,60,-60,-60,60,0], plymatindex=[0,0,0,0,0,0], materials = ['E-Glass Epoxy Uni'], platedim=[a_width,b_length], zoffset=0, SF=2.0, plots=0, prints=1) if __name__=='__main__': #plot_single_max_failure_loads() #plot_failure_index() my_laminate_with_loading() #material_plots(['E-Glass Epoxy fabric M10E-3783']) #plate() #plot_Nx_Nxy_failure_envelope(['Carbon_cloth_AGP3705H']) #plot_single_max_failure_loads() # # reload modules # import importlib ; importlib.reload # from composites import laminate # plyangle = [0,45] # matindex = [0,0] # matname = ['graphite-polymer_SI'] # lam1 = laminate(plyangle, matindex, matname) # lam1.ABD
mit
guziy/basemap
setup.py
1
6013
from __future__ import (absolute_import, division, print_function) import glob import io import os import sys from setuptools.dist import Distribution if sys.version_info < (2, 6): raise SystemExit("""matplotlib and the basemap toolkit require Python 2.6 or later.""") # Do not require numpy for just querying the package # Taken from the netcdf-python setup file (which took it from h5py setup file). inc_dirs = [] if any('--' + opt in sys.argv for opt in Distribution.display_option_names + ['help-commands', 'help']) or sys.argv[1] == 'egg_info': from setuptools import setup, Extension else: import numpy # Use numpy versions if they are available. from numpy.distutils.core import setup, Extension # append numpy include dir. inc_dirs.append(numpy.get_include()) def get_install_requirements(path): path = os.path.join(os.path.dirname(__file__), path) with io.open(path, encoding='utf-8') as fp: content = fp.read() return [req for req in content.split("\n") if req != '' and not req.startswith('#')] def checkversion(GEOS_dir): """check geos C-API header file (geos_c.h)""" try: f = open(os.path.join(GEOS_dir, 'include', 'geos_c.h')) except IOError: return None geos_version = None for line in f: if line.startswith('#define GEOS_VERSION'): geos_version = line.split()[2] return geos_version # get location of geos lib from environment variable if it is set. if 'GEOS_DIR' in os.environ: GEOS_dir = os.environ.get('GEOS_DIR') else: # set GEOS_dir manually here if automatic detection fails. GEOS_dir = None user_home = os.path.expanduser('~') geos_search_locations = [user_home, os.path.join(user_home, 'local'), '/usr', '/usr/local', '/sw', '/opt', '/opt/local'] if GEOS_dir is None: # if GEOS_dir not set, check a few standard locations. GEOS_dirs = geos_search_locations for direc in GEOS_dirs: geos_version = checkversion(direc) sys.stdout.write('checking for GEOS lib in %s ....\n' % direc) if geos_version is None or geos_version < '"3.1.1"': continue else: sys.stdout.write('GEOS lib (version %s) found in %s\n' %\ (geos_version[1:-1],direc)) GEOS_dir = direc break else: geos_version = checkversion(GEOS_dir) if GEOS_dir is None: raise SystemExit(""" Can't find geos library in standard locations ('%s'). Please install the corresponding packages using your systems software management system (e.g. for Debian Linux do: 'apt-get install libgeos-3.3.3 libgeos-c1 libgeos-dev' and/or set the environment variable GEOS_DIR to point to the location where geos is installed (for example, if geos_c.h is in /usr/local/include, and libgeos_c is in /usr/local/lib, set GEOS_DIR to /usr/local), or edit the setup.py script manually and set the variable GEOS_dir (right after the line that says "set GEOS_dir manually here".""" % "', '".join(geos_search_locations)) else: geos_include_dirs=[os.path.join(GEOS_dir,'include')] + inc_dirs geos_library_dirs=[os.path.join(GEOS_dir,'lib'),os.path.join(GEOS_dir,'lib64')] packages = ['mpl_toolkits','mpl_toolkits.basemap'] namespace_packages = ['mpl_toolkits'] package_dirs = {'':'lib'} # can't install _geoslib in mpl_toolkits.basemap namespace, # or Basemap objects won't be pickleable. # don't use runtime_library_dirs on windows (workaround # for a distutils bug - http://bugs.python.org/issue2437). if sys.platform == 'win32': runtime_lib_dirs = [] else: runtime_lib_dirs = geos_library_dirs extensions = [ Extension("_geoslib",['src/_geoslib.c'], library_dirs=geos_library_dirs, runtime_library_dirs=runtime_lib_dirs, include_dirs=geos_include_dirs, libraries=['geos_c']) ] # Specify all the required mpl data pathout =\ os.path.join('lib',os.path.join('mpl_toolkits',os.path.join('basemap','data'))) datafiles = glob.glob(os.path.join(pathout,'*')) datafiles = [os.path.join('data',os.path.basename(f)) for f in datafiles] package_data = {'mpl_toolkits.basemap':datafiles} install_requires = get_install_requirements("requirements.txt") __version__ = "1.2.1" setup( name = "basemap", version = __version__, description = "Plot data on map projections with matplotlib", long_description = """ An add-on toolkit for matplotlib that lets you plot data on map projections with coastlines, lakes, rivers and political boundaries. See http://matplotlib.org/basemap/users/examples.html for examples of what it can do.""", url = "https://matplotlib.org/basemap/", download_url = "https://github.com/matplotlib/basemap/archive/v{0}rel.tar.gz".format(__version__), author = "Jeff Whitaker", author_email = "jeffrey.s.whitaker@noaa.gov", maintainer = "Ben Root", maintainer_email = "ben.v.root@gmail.com", install_requires = install_requires, platforms = ["any"], license = "OSI Approved", keywords = ["python","plotting","plots","graphs","charts","GIS","mapping","map projections","maps"], classifiers = ["Development Status :: 5 - Production/Stable", "Intended Audience :: Science/Research", "License :: OSI Approved", "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Visualization", "Topic :: Software Development :: Libraries :: Python Modules", "Operating System :: OS Independent"], packages = packages, namespace_packages = namespace_packages, package_dir = package_dirs, ext_modules = extensions, package_data = package_data )
gpl-2.0
chugunovyar/factoryForBuild
env/lib/python2.7/site-packages/matplotlib/sphinxext/mathmpl.py
12
3822
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import os import sys from hashlib import md5 from docutils import nodes from docutils.parsers.rst import directives import warnings from matplotlib import rcParams from matplotlib.mathtext import MathTextParser rcParams['mathtext.fontset'] = 'cm' mathtext_parser = MathTextParser("Bitmap") # Define LaTeX math node: class latex_math(nodes.General, nodes.Element): pass def fontset_choice(arg): return directives.choice(arg, ['cm', 'stix', 'stixsans']) options_spec = {'fontset': fontset_choice} def math_role(role, rawtext, text, lineno, inliner, options={}, content=[]): i = rawtext.find('`') latex = rawtext[i+1:-1] node = latex_math(rawtext) node['latex'] = latex node['fontset'] = options.get('fontset', 'cm') return [node], [] math_role.options = options_spec def math_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): latex = ''.join(content) node = latex_math(block_text) node['latex'] = latex node['fontset'] = options.get('fontset', 'cm') return [node] # This uses mathtext to render the expression def latex2png(latex, filename, fontset='cm'): latex = "$%s$" % latex orig_fontset = rcParams['mathtext.fontset'] rcParams['mathtext.fontset'] = fontset if os.path.exists(filename): depth = mathtext_parser.get_depth(latex, dpi=100) else: try: depth = mathtext_parser.to_png(filename, latex, dpi=100) except: warnings.warn("Could not render math expression %s" % latex, Warning) depth = 0 rcParams['mathtext.fontset'] = orig_fontset sys.stdout.write("#") sys.stdout.flush() return depth # LaTeX to HTML translation stuff: def latex2html(node, source): inline = isinstance(node.parent, nodes.TextElement) latex = node['latex'] name = 'math-%s' % md5(latex.encode()).hexdigest()[-10:] destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl') if not os.path.exists(destdir): os.makedirs(destdir) dest = os.path.join(destdir, '%s.png' % name) path = '/'.join((setup.app.builder.imgpath, 'mathmpl')) depth = latex2png(latex, dest, node['fontset']) if inline: cls = '' else: cls = 'class="center" ' if inline and depth != 0: style = 'style="position: relative; bottom: -%dpx"' % (depth + 1) else: style = '' return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style) def setup(app): setup.app = app # Add visit/depart methods to HTML-Translator: def visit_latex_math_html(self, node): source = self.document.attributes['source'] self.body.append(latex2html(node, source)) def depart_latex_math_html(self, node): pass # Add visit/depart methods to LaTeX-Translator: def visit_latex_math_latex(self, node): inline = isinstance(node.parent, nodes.TextElement) if inline: self.body.append('$%s$' % node['latex']) else: self.body.extend(['\\begin{equation}', node['latex'], '\\end{equation}']) def depart_latex_math_latex(self, node): pass app.add_node(latex_math, html=(visit_latex_math_html, depart_latex_math_html), latex=(visit_latex_math_latex, depart_latex_math_latex)) app.add_role('math', math_role) app.add_directive('math', math_directive, True, (0, 0, 0), **options_spec) metadata = {'parallel_read_safe': True, 'parallel_write_safe': True} return metadata
gpl-3.0
pylayers/pylayers
pylayers/antprop/examples/ex_signature.py
3
3411
#!/usr/bin/python #-*- coding:Utf-8 -*- import matplotlib.pyplot as plt import numpy as np import networkx as nx from pylayers.gis.layout import * from pylayers.antprop.signature import * # load the layout graphs def showr2(L,r2d,tx,rx,k,l): col = ['r','b','g','c','m','k','y'] r = r2d[str(k)] pts = r['pt'] sig = r['sig'] fig,ax = showsig(L,sig[:,:,l],tx,rx) sh = np.shape(pts) x = np.hstack((tx[0],pts[0,:,l],rx[0])) y = np.hstack((tx[1],pts[1,:,l],rx[1])) plt.plot(x,y,col[k]) plt.title(sig[:,:,l]) return fig,ax def showr2d(L,r2d,tx,rx): """ r2d['pt'] : nd,ni,nr """ L.display['thin']=True col = ['r','b','g','c','m','k','y'] fig,ax = L.showGs() for k in r2d: r = r2d[k] pts = r['pt'] sh = np.shape(pts) for r in range(sh[2]): x = np.hstack((tx[0],pts[0,:,r],rx[0])) y = np.hstack((tx[1],pts[1,:,r],rx[1])) plt.plot(x,y,col[eval(k)]) return fig,ax def showsig(L,s,tx,rx): L.display['thin']=True fig,ax = L.showGs() L.display['thin']=False L.display['edlabel']=True fig,ax = L.showGs(fig=fig,ax=ax,edlist=s[0,:],width=4) plt.plot(tx[0],tx[1],'x') plt.plot(rx[0],rx[1],'+') plt.title(str(s[0,:])+str(s[1,:])) L.display['edlabel']=False return fig,ax strucname = 'TA-Office' #strucname = 'defstr' L = Layout(strucname+'.ini') L.boundary() print L.ax try: L.dumpr() except: L.build() L.dumpw() #tx = np.array([8., 8., 1.]) #rx = np.array([30., 11., 2.]) #tx = np.array([1., 0., 1.]) #rx = np.array([8., -1.5, 2.]) #L = Layout('TA-Office.str') #L.build() tx = np.array([20, 8, 1]) rx = np.array([35, 6, 2]) S = Signatures(L, tx, rx) print "Calcul signatures" #s1 = S.get_sigslist(tx, rx) s1 = S.run(tx,rx,2) print "Fin calcul signatures" #print "signatures --> rayons " #r2d = S.sigs2rays(s1) r2d = S.rays(s1) ##print "fin signatures --> rayons " ## #r22 = r2d['2'] #pt2 = r22['pt'] #sig2 = r22['sig'] #pt2 = np.swapaxes(pt2,0,2) #pt2 = np.swapaxes(pt2,1,2) #tx2 = np.kron(np.ones(2),tx).reshape(2,3,1) #rx2 = np.kron(np.ones(2),rx).reshape(2,3,1) #tx2[:,2,:]=0 #rx2[:,2,:]=0 #pt = np.concatenate((tx2,pt2,rx2),axis=2) #vsi = pt[:, :, 1:] - pt[:,:,:-1] #si = np.sqrt(np.sum(vsi*vsi, axis=1)) #alpha = np.cumsum(si,axis=1) #c = alpha[:,-1].reshape(2,1) #alpha = alpha/c #pt[:,2,1:]= tx[2]+alpha*(rx[2]-tx[2]) # # showr2d(L,r2d,tx,rx) print "rayons 2D --> rayons3D " #rays3d = S.ray2D3D(r2d) #print "fin rayons 2D --> rayons3D " ## #S.show3(rays=rays3d,strucname=strucname) ## ## ## #s = np.array([[5,1,8],[1,1,2]]) #sig = Signature(s) #rsig = sig.sig2ray(L,tx[0:2],rx[0:2]) #sig.ev(L) #M = sig.image(tx[0:2]) #Y = sig.backtrace(tx[0:2],rx[0:2],M) #plt.plot(M[0,:],M[1,:],'ob') #plt.plot(Y[0,:],Y[1,:],'xk') #fig,ax = showr2(L,r2d,tx[0:2],rx[0:2],3,4) #plt.show() #room8 = L.Gt.node[8] #polyg8 = room8['polyg'] #vnodes8 = room8['vnodes'] #udeg1 = [] #udeg2 = [] #for ik, inode in enumerate(vnodes8): # deg = L.Gs.degree(inode) # if vnodes8[0] < 0: # index = ik / 2 # else: # index = (ik - 1) / 2 # if inode < 0: # if deg == 2: # udeg2.append(index) # if deg == 1: # udeg1.append(index) # warning not used #Gv = polyg8.buildGv(show=True,udeg2=udeg2) #L.showGs() #nx.draw_networkx_edges(L.dGv[8],L.Gs.pos,nx.edges(L.dGv[8],nbunch=[47]))
mit
matbra/bokeh
examples/compat/mpl/listcollection.py
34
1602
from matplotlib.collections import LineCollection import matplotlib.pyplot as plt import numpy as np from bokeh import mpl from bokeh.plotting import output_file, show def make_segments(x, y): ''' Create list of line segments from x and y coordinates. ''' points = np.array([x, y]).T.reshape(-1, 1, 2) segments = np.concatenate([points[:-1], points[1:]], axis=1) return segments def colorline(x, y, colors=None, linewidth=3, alpha=1.0): ''' Plot a line with segments. Optionally, specify segments colors and segments widths. ''' # Make a list of colors cycling through the rgbcmyk series. # You have several ways to input the colors: # colors = ['r','g','b','c','y','m','k'] # colors = ['red','green','blue','cyan','yellow','magenta','black'] # colors = ['#ff0000', '#008000', '#0000ff', '#00bfbf', '#bfbf00', '#bf00bf', '#000000'] # colors = [(1.0, 0.0, 0.0, 1.0), (0.0, 0.5, 0.0, 1.0), (0.0, 0.0, 1.0, 1.0), (0.0, 0.75, 0.75, 1.0), # (0.75, 0.75, 0, 1.0), (0.75, 0, 0.75, 1.0), (0.0, 0.0, 0.0, 1.0)] colors = ['r', 'g', 'b', 'c', 'y', 'm', 'k'] widths = [5, 10, 20, 40, 20, 10, 5] segments = make_segments(x, y) lc = LineCollection(segments, colors=colors, linewidth=widths, alpha=alpha) ax = plt.gca() ax.add_collection(lc) return lc # Colored sine wave x = np.linspace(0, 4 * np.pi, 100) y = np.sin(x) colorline(x, y) plt.title("MPL support for ListCollection in Bokeh") plt.xlim(x.min(), x.max()) plt.ylim(-1.0, 1.0) output_file("listcollection.html") show(mpl.to_bokeh())
bsd-3-clause
eduardoneira/SistemasDistribuidos_TPFinal
CentroMonitoreoCiudad/FaceRecognizer/modules/old_feature_matcher.py
1
4628
#!/bin/python3 import numpy as np import cv2 import base64 import pdb from tkinter import * from matplotlib import pyplot as plt class FeatureMatcher: __PORC_DISTANCE = 0.7 def __init__(self,feature_extractor='SURF',upright=True,min_match_count=10,threshold=400): self.MIN_MATCH_COUNT = min_match_count self.__create_feature_extractor(feature_extractor,upright,threshold) FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 200) self.flann = cv2.FlannBasedMatcher(index_params, search_params) def __create_feature_extractor(self,feature_extractor,upright,threshold): if feature_extractor == 'SURF': self.feature_finder = cv2.xfeatures2d.SURF_create(threshold,extended=True) self.feature_finder.setUpright(upright) elif feature_extractor == 'SIFT': self.feature_finder = cv2.xfeatures2d.SIFT_create(edgeThreshold=20,sigma=1.1) elif feature_extractor == 'ORB': self.feature_finder = cv2.ORB_create() else: raise 'Feature extractor no encontrado' def compare(self,img1,img2): self.features_img1 = self.find_features(img1) self.features_img2 = self.find_features(img2) pdb.set_trace() return self.flann.knnMatch(self.features_img1[1],self.features_img2[1],k=2) def compare_base64(self,image1_base64,image2_base64): img1 = self.base64_to_img(image1_base64) img2 = self.base64_to_img(image2_base64) return self.compare(img1,img2) def are_similar(self,img1,img2): self.good_matches = [] for m,n in self.compare(img1,img2): if m.distance < self.__PORC_DISTANCE*n.distance: self.good_matches.append(m) return (len(self.good_matches) > self.MIN_MATCH_COUNT) def find_features(self,img): return self.feature_finder.detectAndCompute(img,None) def bytes_to_img(self,image_bytes): nparr = np.fromstring(image_bytes, np.uint8) return cv2.imdecode(nparr, 0) def base64_to_img(self,image_base64): return self.bytes_to_img(base64.b64decode(image_base64)) def compare_and_draw_base64(self,img1,img2): self.compare_and_draw(self.base64_to_img(img1),self.base64_to_img(img2)) def compare_and_draw(self,img1,img2): # if self.are_similar(img1,img2): # src_pts = np.float32([ self.features_img1[0][m.queryIdx].pt for m in self.good_matches ]).reshape(-1,1,2) # dst_pts = np.float32([ self.features_img2[0][m.trainIdx].pt for m in self.good_matches ]).reshape(-1,1,2) # M, mask = cv2.findHomography(src_pts,dst_pts,cv2.RANSAC,5.0) # matchesMask = mask.ravel().tolist() # h,w = img1.shape # pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) # dst = cv2.perspectiveTransform(pts,M) # img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3,cv2.LINE_AA) # else: # print("Not enough matches are found - %d/%d" % (len(self.good_matches),self.MIN_MATCH_COUNT)) # matchesMask = None # draw_params = dict(matchColor = (0,255,0), # singlePointColor = (255,0,0), # matchesMask = matchesMask, # flags = 2) # img3 = cv2.drawMatchesKnn(img1,self.features_img1[0],img2,self.features_img2[0],self.good_matches,None,**draw_params) # plt.imshow(img3,'gray'),plt.show() hash1 = self.find_features(img1) hash2 = self.find_features(img2) matches = self.flann.knnMatch(hash1[1],hash2[1],k=2) good = [] for m,n in matches: if m.distance < 0.95*n.distance: good.append(m) print(len(good)) if len(good)>self.MIN_MATCH_COUNT: src_pts = np.float32([ hash1[0][m.queryIdx].pt for m in good ]).reshape(-1,1,2) dst_pts = np.float32([ hash2[0][m.trainIdx].pt for m in good ]).reshape(-1,1,2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC,5.0) matchesMask = mask.ravel().tolist() h,w = img1.shape pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2) dst = cv2.perspectiveTransform(pts,M) img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA) else: print( "Not enough matches are found - {}/{}".format(len(good), self.MIN_MATCH_COUNT) ) matchesMask = None draw_params = dict(matchColor = (0,255,0), # draw matches in green color singlePointColor = (255,0,0), matchesMask = matchesMask, # draw only inliers flags = 2) img3 = cv2.drawMatches(img1,hash1[0],img2,hash2[0],good,None,**draw_params) plt.imshow(img3, 'gray'),plt.show()
gpl-3.0
DTOcean/dtocean-core
tests/test_data_definitions_simplepie.py
1
2601
import pytest import matplotlib.pyplot as plt from aneris.control.factory import InterfaceFactory from dtocean_core.core import (AutoFileInput, AutoFileOutput, AutoPlot, Core) from dtocean_core.data import CoreMetaData from dtocean_core.data.definitions import SimplePie def test_SimplePie_available(): new_core = Core() all_objs = new_core.control._store._structures assert "SimplePie" in all_objs.keys() def test_SimplePie(): meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "types": ["float"]}) test = SimplePie() raw = {"a": 0, "b": 1} a = test.get_data(raw, meta) b = test.get_value(a) assert b["a"] == 0 assert b["b"] == 1 def test_get_None(): test = SimplePie() result = test.get_value(None) assert result is None @pytest.mark.parametrize("fext", [".csv", ".xls", ".xlsx"]) def test_SimplePie_auto_file(tmpdir, fext): test_path = tmpdir.mkdir("sub").join("test{}".format(fext)) test_path_str = str(test_path) raw = {"a": 0, "b": 1} meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "types": ["float"]}) test = SimplePie() fout_factory = InterfaceFactory(AutoFileOutput) FOutCls = fout_factory(meta, test) fout = FOutCls() fout._path = test_path_str fout.data.result = test.get_data(raw, meta) fout.connect() assert len(tmpdir.listdir()) == 1 fin_factory = InterfaceFactory(AutoFileInput) FInCls = fin_factory(meta, test) fin = FInCls() fin._path = test_path_str fin.connect() result = test.get_data(fin.data.result, meta) assert result["a"] == 0 assert result["b"] == 1 def test_SimplePie_auto_plot(): raw = {"a": 0, "b": 1} meta = CoreMetaData({"identifier": "test", "structure": "test", "title": "test", "types": ["float"]}) test = SimplePie() fout_factory = InterfaceFactory(AutoPlot) PlotCls = fout_factory(meta, test) plot = PlotCls() plot.data.result = test.get_data(raw, meta) plot.meta.result = meta plot.connect() assert len(plt.get_fignums()) == 1 plt.close("all")
gpl-3.0
buntyke/GPy
GPy/core/gp.py
8
37031
# Copyright (c) 2012-2014, GPy authors (see AUTHORS.txt). # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np import sys from .. import kern from .model import Model from .parameterization import ObsAr from .mapping import Mapping from .. import likelihoods from ..inference.latent_function_inference import exact_gaussian_inference, expectation_propagation from .parameterization.variational import VariationalPosterior import logging import warnings from GPy.util.normalizer import MeanNorm logger = logging.getLogger("GP") class GP(Model): """ General purpose Gaussian process model :param X: input observations :param Y: output observations :param kernel: a GPy kernel, defaults to rbf+white :param likelihood: a GPy likelihood :param inference_method: The :class:`~GPy.inference.latent_function_inference.LatentFunctionInference` inference method to use for this GP :rtype: model object :param Norm normalizer: normalize the outputs Y. Prediction will be un-normalized using this normalizer. If normalizer is None, we will normalize using MeanNorm. If normalizer is False, no normalization will be done. .. Note:: Multiple independent outputs are allowed using columns of Y """ def __init__(self, X, Y, kernel, likelihood, mean_function=None, inference_method=None, name='gp', Y_metadata=None, normalizer=False): super(GP, self).__init__(name) assert X.ndim == 2 if isinstance(X, (ObsAr, VariationalPosterior)): self.X = X.copy() else: self.X = ObsAr(X) self.num_data, self.input_dim = self.X.shape assert Y.ndim == 2 logger.info("initializing Y") if normalizer is True: self.normalizer = MeanNorm() elif normalizer is False: self.normalizer = None else: self.normalizer = normalizer if self.normalizer is not None: self.normalizer.scale_by(Y) self.Y_normalized = ObsAr(self.normalizer.normalize(Y)) self.Y = Y elif isinstance(Y, np.ndarray): self.Y = ObsAr(Y) self.Y_normalized = self.Y else: self.Y = Y if Y.shape[0] != self.num_data: #There can be cases where we want inputs than outputs, for example if we have multiple latent #function values warnings.warn("There are more rows in your input data X, \ than in your output data Y, be VERY sure this is what you want") _, self.output_dim = self.Y.shape assert ((Y_metadata is None) or isinstance(Y_metadata, dict)) self.Y_metadata = Y_metadata assert isinstance(kernel, kern.Kern) #assert self.input_dim == kernel.input_dim self.kern = kernel assert isinstance(likelihood, likelihoods.Likelihood) self.likelihood = likelihood #handle the mean function self.mean_function = mean_function if mean_function is not None: assert isinstance(self.mean_function, Mapping) assert mean_function.input_dim == self.input_dim assert mean_function.output_dim == self.output_dim self.link_parameter(mean_function) #find a sensible inference method logger.info("initializing inference method") if inference_method is None: if isinstance(likelihood, likelihoods.Gaussian) or isinstance(likelihood, likelihoods.MixedNoise): inference_method = exact_gaussian_inference.ExactGaussianInference() else: inference_method = expectation_propagation.EP() print("defaulting to ", inference_method, "for latent function inference") self.inference_method = inference_method logger.info("adding kernel and likelihood as parameters") self.link_parameter(self.kern) self.link_parameter(self.likelihood) self.posterior = None # The predictive variable to be used to predict using the posterior object's # woodbury_vector and woodbury_inv is defined as predictive_variable # as long as the posterior has the right woodbury entries. # It is the input variable used for the covariance between # X_star and the posterior of the GP. # This is usually just a link to self.X (full GP) or self.Z (sparse GP). # Make sure to name this variable and the predict functions will "just work" # In maths the predictive variable is: # K_{xx} - K_{xp}W_{pp}^{-1}K_{px} # W_{pp} := \texttt{Woodbury inv} # p := _predictive_variable @property def _predictive_variable(self): return self.X def set_XY(self, X=None, Y=None): """ Set the input / output data of the model This is useful if we wish to change our existing data but maintain the same model :param X: input observations :type X: np.ndarray :param Y: output observations :type Y: np.ndarray """ self.update_model(False) if Y is not None: if self.normalizer is not None: self.normalizer.scale_by(Y) self.Y_normalized = ObsAr(self.normalizer.normalize(Y)) self.Y = Y else: self.Y = ObsAr(Y) self.Y_normalized = self.Y if X is not None: if self.X in self.parameters: # LVM models if isinstance(self.X, VariationalPosterior): assert isinstance(X, type(self.X)), "The given X must have the same type as the X in the model!" self.unlink_parameter(self.X) self.X = X self.link_parameter(self.X) else: self.unlink_parameter(self.X) from ..core import Param self.X = Param('latent mean',X) self.link_parameter(self.X) else: self.X = ObsAr(X) self.update_model(True) def set_X(self,X): """ Set the input data of the model :param X: input observations :type X: np.ndarray """ self.set_XY(X=X) def set_Y(self,Y): """ Set the output data of the model :param X: output observations :type X: np.ndarray """ self.set_XY(Y=Y) def parameters_changed(self): """ Method that is called upon any changes to :class:`~GPy.core.parameterization.param.Param` variables within the model. In particular in the GP class this method reperforms inference, recalculating the posterior and log marginal likelihood and gradients of the model .. warning:: This method is not designed to be called manually, the framework is set up to automatically call this method upon changes to parameters, if you call this method yourself, there may be unexpected consequences. """ self.posterior, self._log_marginal_likelihood, self.grad_dict = self.inference_method.inference(self.kern, self.X, self.likelihood, self.Y_normalized, self.mean_function, self.Y_metadata) self.likelihood.update_gradients(self.grad_dict['dL_dthetaL']) self.kern.update_gradients_full(self.grad_dict['dL_dK'], self.X) if self.mean_function is not None: self.mean_function.update_gradients(self.grad_dict['dL_dm'], self.X) def log_likelihood(self): """ The log marginal likelihood of the model, :math:`p(\mathbf{y})`, this is the objective function of the model being optimised """ return self._log_marginal_likelihood def _raw_predict(self, Xnew, full_cov=False, kern=None): """ For making predictions, does not account for normalization or likelihood full_cov is a boolean which defines whether the full covariance matrix of the prediction is computed. If full_cov is False (default), only the diagonal of the covariance is returned. .. math:: p(f*|X*, X, Y) = \int^{\inf}_{\inf} p(f*|f,X*)p(f|X,Y) df = N(f*| K_{x*x}(K_{xx} + \Sigma)^{-1}Y, K_{x*x*} - K_{xx*}(K_{xx} + \Sigma)^{-1}K_{xx*} \Sigma := \texttt{Likelihood.variance / Approximate likelihood covariance} """ if kern is None: kern = self.kern Kx = kern.K(self._predictive_variable, Xnew) mu = np.dot(Kx.T, self.posterior.woodbury_vector) if len(mu.shape)==1: mu = mu.reshape(-1,1) if full_cov: Kxx = kern.K(Xnew) if self.posterior.woodbury_inv.ndim == 2: var = Kxx - np.dot(Kx.T, np.dot(self.posterior.woodbury_inv, Kx)) elif self.posterior.woodbury_inv.ndim == 3: # Missing data var = np.empty((Kxx.shape[0],Kxx.shape[1],self.posterior.woodbury_inv.shape[2])) from ..util.linalg import mdot for i in range(var.shape[2]): var[:, :, i] = (Kxx - mdot(Kx.T, self.posterior.woodbury_inv[:, :, i], Kx)) var = var else: Kxx = kern.Kdiag(Xnew) if self.posterior.woodbury_inv.ndim == 2: var = (Kxx - np.sum(np.dot(self.posterior.woodbury_inv.T, Kx) * Kx, 0))[:,None] elif self.posterior.woodbury_inv.ndim == 3: # Missing data var = np.empty((Kxx.shape[0],self.posterior.woodbury_inv.shape[2])) for i in range(var.shape[1]): var[:, i] = (Kxx - (np.sum(np.dot(self.posterior.woodbury_inv[:, :, i].T, Kx) * Kx, 0))) var = var #add in the mean function if self.mean_function is not None: mu += self.mean_function.f(Xnew) return mu, var def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None): """ Predict the function(s) at the new point(s) Xnew. :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray (Nnew x self.input_dim) :param full_cov: whether to return the full covariance matrix, or just the diagonal :type full_cov: bool :param Y_metadata: metadata about the predicting point to pass to the likelihood :param kern: The kernel to use for prediction (defaults to the model kern). this is useful for examining e.g. subprocesses. :returns: (mean, var): mean: posterior mean, a Numpy array, Nnew x self.input_dim var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise If full_cov and self.input_dim > 1, the return shape of var is Nnew x Nnew x self.input_dim. If self.input_dim == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. Note: If you want the predictive quantiles (e.g. 95% confidence interval) use :py:func:"~GPy.core.gp.GP.predict_quantiles". """ #predict the latent function values mu, var = self._raw_predict(Xnew, full_cov=full_cov, kern=kern) if self.normalizer is not None: mu, var = self.normalizer.inverse_mean(mu), self.normalizer.inverse_variance(var) # now push through likelihood mean, var = self.likelihood.predictive_values(mu, var, full_cov, Y_metadata=Y_metadata) return mean, var def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, kern=None): """ Get the predictive quantiles around the prediction at X :param X: The points at which to make a prediction :type X: np.ndarray (Xnew x self.input_dim) :param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval :type quantiles: tuple :param kern: optional kernel to use for prediction :type predict_kw: dict :returns: list of quantiles for each X and predictive quantiles for interval combination :rtype: [np.ndarray (Xnew x self.output_dim), np.ndarray (Xnew x self.output_dim)] """ m, v = self._raw_predict(X, full_cov=False, kern=kern) if self.normalizer is not None: m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) return self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata=Y_metadata) def predictive_gradients(self, Xnew): """ Compute the derivatives of the predicted latent function with respect to X* Given a set of points at which to predict X* (size [N*,Q]), compute the derivatives of the mean and variance. Resulting arrays are sized: dmu_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one). Note that this is not the same as computing the mean and variance of the derivative of the function! dv_dX* -- [N*, Q], (since all outputs have the same variance) :param X: The points at which to get the predictive gradients :type X: np.ndarray (Xnew x self.input_dim) :returns: dmu_dX, dv_dX :rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q) ] """ dmu_dX = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim)) for i in range(self.output_dim): dmu_dX[:,:,i] = self.kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self.X) # gradients wrt the diagonal part k_{xx} dv_dX = self.kern.gradients_X(np.eye(Xnew.shape[0]), Xnew) #grads wrt 'Schur' part K_{xf}K_{ff}^{-1}K_{fx} alpha = -2.*np.dot(self.kern.K(Xnew, self.X),self.posterior.woodbury_inv) dv_dX += self.kern.gradients_X(alpha, Xnew, self.X) return dmu_dX, dv_dX def predict_jacobian(self, Xnew, kern=None, full_cov=True): """ Compute the derivatives of the posterior of the GP. Given a set of points at which to predict X* (size [N*,Q]), compute the mean and variance of the derivative. Resulting arrays are sized: dL_dX* -- [N*, Q ,D], where D is the number of output in this GP (usually one). Note that this is the mean and variance of the derivative, not the derivative of the mean and variance! (See predictive_gradients for that) dv_dX* -- [N*, Q], (since all outputs have the same variance) If there is missing data, it is not implemented for now, but there will be one output variance per output dimension. :param X: The points at which to get the predictive gradients. :type X: np.ndarray (Xnew x self.input_dim) :param kern: The kernel to compute the jacobian for. :param boolean full_cov: whether to return the full covariance of the jacobian. :returns: dmu_dX, dv_dX :rtype: [np.ndarray (N*, Q ,D), np.ndarray (N*,Q,(D)) ] Note: We always return sum in input_dim gradients, as the off-diagonals in the input_dim are not needed for further calculations. This is a compromise for increase in speed. Mathematically the jacobian would have another dimension in Q. """ if kern is None: kern = self.kern mean_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim)) for i in range(self.output_dim): mean_jac[:,:,i] = kern.gradients_X(self.posterior.woodbury_vector[:,i:i+1].T, Xnew, self._predictive_variable) dK_dXnew_full = np.empty((self._predictive_variable.shape[0], Xnew.shape[0], Xnew.shape[1])) for i in range(self._predictive_variable.shape[0]): dK_dXnew_full[i] = kern.gradients_X([[1.]], Xnew, self._predictive_variable[[i]]) if full_cov: dK2_dXdX = kern.gradients_XX([[1.]], Xnew) else: dK2_dXdX = kern.gradients_XX_diag([[1.]], Xnew) def compute_cov_inner(wi): if full_cov: # full covariance gradients: var_jac = dK2_dXdX - np.einsum('qnm,miq->niq', dK_dXnew_full.T.dot(wi), dK_dXnew_full) else: var_jac = dK2_dXdX - np.einsum('qim,miq->iq', dK_dXnew_full.T.dot(wi), dK_dXnew_full) return var_jac if self.posterior.woodbury_inv.ndim == 3: # Missing data: if full_cov: var_jac = np.empty((Xnew.shape[0],Xnew.shape[0],Xnew.shape[1],self.output_dim)) for d in range(self.posterior.woodbury_inv.shape[2]): var_jac[:, :, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d]) else: var_jac = np.empty((Xnew.shape[0],Xnew.shape[1],self.output_dim)) for d in range(self.posterior.woodbury_inv.shape[2]): var_jac[:, :, d] = compute_cov_inner(self.posterior.woodbury_inv[:, :, d]) else: var_jac = compute_cov_inner(self.posterior.woodbury_inv) return mean_jac, var_jac def predict_wishard_embedding(self, Xnew, kern=None, mean=True, covariance=True): """ Predict the wishard embedding G of the GP. This is the density of the input of the GP defined by the probabilistic function mapping f. G = J_mean.T*J_mean + output_dim*J_cov. :param array-like Xnew: The points at which to evaluate the magnification. :param :py:class:`~GPy.kern.Kern` kern: The kernel to use for the magnification. Supplying only a part of the learning kernel gives insights into the density of the specific kernel part of the input function. E.g. one can see how dense the linear part of a kernel is compared to the non-linear part etc. """ if kern is None: kern = self.kern mu_jac, var_jac = self.predict_jacobian(Xnew, kern, full_cov=False) mumuT = np.einsum('iqd,ipd->iqp', mu_jac, mu_jac) Sigma = np.zeros(mumuT.shape) if var_jac.ndim == 3: Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = var_jac.sum(-1) else: Sigma[(slice(None), )+np.diag_indices(Xnew.shape[1], 2)] = self.output_dim*var_jac G = 0. if mean: G += mumuT if covariance: G += Sigma return G def predict_magnification(self, Xnew, kern=None, mean=True, covariance=True): """ Predict the magnification factor as sqrt(det(G)) for each point N in Xnew """ G = self.predict_wishard_embedding(Xnew, kern, mean, covariance) from ..util.linalg import jitchol mag = np.empty(Xnew.shape[0]) for n in range(Xnew.shape[0]): try: mag[n] = np.sqrt(np.exp(2*np.sum(np.log(np.diag(jitchol(G[n, :, :])))))) except: mag[n] = np.sqrt(np.linalg.det(G[n, :, :])) return mag def posterior_samples_f(self,X,size=10, full_cov=True): """ Samples the posterior GP at the points X. :param X: The points at which to take the samples. :type X: np.ndarray (Nnew x self.input_dim) :param size: the number of a posteriori samples. :type size: int. :param full_cov: whether to return the full covariance matrix, or just the diagonal. :type full_cov: bool. :returns: fsim: set of simulations :rtype: np.ndarray (N x samples) """ m, v = self._raw_predict(X, full_cov=full_cov) if self.normalizer is not None: m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v) v = v.reshape(m.size,-1) if len(v.shape)==3 else v if not full_cov: fsim = np.random.multivariate_normal(m.flatten(), np.diag(v.flatten()), size).T else: fsim = np.random.multivariate_normal(m.flatten(), v, size).T return fsim def posterior_samples(self, X, size=10, full_cov=False, Y_metadata=None): """ Samples the posterior GP at the points X. :param X: the points at which to take the samples. :type X: np.ndarray (Nnew x self.input_dim.) :param size: the number of a posteriori samples. :type size: int. :param full_cov: whether to return the full covariance matrix, or just the diagonal. :type full_cov: bool. :param noise_model: for mixed noise likelihood, the noise model to use in the samples. :type noise_model: integer. :returns: Ysim: set of simulations, a Numpy array (N x samples). """ fsim = self.posterior_samples_f(X, size, full_cov=full_cov) Ysim = self.likelihood.samples(fsim, Y_metadata=Y_metadata) return Ysim def plot_f(self, plot_limits=None, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=True, linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx', apply_link=False): """ Plot the GP's view of the world, where the data is normalized and before applying a likelihood. This is a call to plot with plot_raw=True. Data will not be plotted in this, as the GP's view of the world may live in another space, or units then the data. Can plot only part of the data and part of the posterior functions using which_data_rowsm which_data_ycols. :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice model.X, model.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_ycols: 'all' or a list of integers :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v. :type fixed_inputs: a list of tuples :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D :type resolution: int :param levels: number of levels to plot in a contour plot. :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure :type levels: int :param samples: the number of a posteriori samples to plot :type samples: int :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle :param linecol: color of line to plot [Tango.colorsHex['darkBlue']] :type linecol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib :param fillcol: color of fill [Tango.colorsHex['lightBlue']] :type fillcol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib :param Y_metadata: additional data associated with Y which may be needed :type Y_metadata: dict :param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx') :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib. :param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f* :type apply_link: boolean """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots kw = {} if linecol is not None: kw['linecol'] = linecol if fillcol is not None: kw['fillcol'] = fillcol return models_plots.plot_fit(self, plot_limits, which_data_rows, which_data_ycols, fixed_inputs, levels, samples, fignum, ax, resolution, plot_raw=plot_raw, Y_metadata=Y_metadata, data_symbol=data_symbol, apply_link=apply_link, **kw) def plot(self, plot_limits=None, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], levels=20, samples=0, fignum=None, ax=None, resolution=None, plot_raw=False, linecol=None,fillcol=None, Y_metadata=None, data_symbol='kx', predict_kw=None, plot_training_data=True, samples_y=0, apply_link=False): """ Plot the posterior of the GP. - In one dimension, the function is plotted with a shaded region identifying two standard deviations. - In two dimsensions, a contour-plot shows the mean predicted function - In higher dimensions, use fixed_inputs to plot the GP with some of the inputs fixed. Can plot only part of the data and part of the posterior functions using which_data_rowsm which_data_ycols. :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice model.X, model.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_ycols: 'all' or a list of integers :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v. :type fixed_inputs: a list of tuples :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D :type resolution: int :param levels: number of levels to plot in a contour plot. :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure :type levels: int :param samples: the number of a posteriori samples to plot, p(f*|y) :type samples: int :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle :param linecol: color of line to plot [Tango.colorsHex['darkBlue']] :type linecol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib :param fillcol: color of fill [Tango.colorsHex['lightBlue']] :type fillcol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib :param Y_metadata: additional data associated with Y which may be needed :type Y_metadata: dict :param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx') :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib. :param plot_training_data: whether or not to plot the training points :type plot_training_data: boolean :param samples_y: the number of a posteriori samples to plot, p(y*|y) :type samples_y: int :param apply_link: if there is a link function of the likelihood, plot the link(f*) rather than f*, when plotting posterior samples f :type apply_link: boolean """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots kw = {} if linecol is not None: kw['linecol'] = linecol if fillcol is not None: kw['fillcol'] = fillcol return models_plots.plot_fit(self, plot_limits, which_data_rows, which_data_ycols, fixed_inputs, levels, samples, fignum, ax, resolution, plot_raw=plot_raw, Y_metadata=Y_metadata, data_symbol=data_symbol, predict_kw=predict_kw, plot_training_data=plot_training_data, samples_y=samples_y, apply_link=apply_link, **kw) def plot_data(self, which_data_rows='all', which_data_ycols='all', visible_dims=None, fignum=None, ax=None, data_symbol='kx'): """ Plot the training data - For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed. Can plot only part of the data using which_data_rows and which_data_ycols. :param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits :type plot_limits: np.array :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice model.X, model.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_ycols: 'all' or a list of integers :param visible_dims: an array specifying the input dimensions to plot (maximum two) :type visible_dims: a numpy array :param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D :type resolution: int :param levels: number of levels to plot in a contour plot. :param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure :type levels: int :param samples: the number of a posteriori samples to plot, p(f*|y) :type samples: int :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle :param linecol: color of line to plot [Tango.colorsHex['darkBlue']] :type linecol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib :param fillcol: color of fill [Tango.colorsHex['lightBlue']] :type fillcol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) as is standard in matplotlib :param data_symbol: symbol as used matplotlib, by default this is a black cross ('kx') :type data_symbol: color either as Tango.colorsHex object or character ('r' is red, 'g' is green) alongside marker type, as is standard in matplotlib. """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots kw = {} return models_plots.plot_data(self, which_data_rows, which_data_ycols, visible_dims, fignum, ax, data_symbol, **kw) def errorbars_trainset(self, which_data_rows='all', which_data_ycols='all', fixed_inputs=[], fignum=None, ax=None, linecol=None, data_symbol='kx', predict_kw=None, plot_training_data=True,lw=None): """ Plot the posterior error bars corresponding to the training data - For higher dimensions than two, use fixed_inputs to plot the data points with some of the inputs fixed. Can plot only part of the data using which_data_rows and which_data_ycols. :param which_data_rows: which of the training data to plot (default all) :type which_data_rows: 'all' or a slice object to slice model.X, model.Y :param which_data_ycols: when the data has several columns (independant outputs), only plot these :type which_data_rows: 'all' or a list of integers :param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v. :type fixed_inputs: a list of tuples :param fignum: figure to plot on. :type fignum: figure number :param ax: axes to plot on. :type ax: axes handle :param plot_training_data: whether or not to plot the training points :type plot_training_data: boolean """ assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import models_plots kw = {} if lw is not None: kw['lw'] = lw return models_plots.errorbars_trainset(self, which_data_rows, which_data_ycols, fixed_inputs, fignum, ax, linecol, data_symbol, predict_kw, plot_training_data, **kw) def plot_magnification(self, labels=None, which_indices=None, resolution=50, ax=None, marker='o', s=40, fignum=None, legend=True, plot_limits=None, aspect='auto', updates=False, plot_inducing=True, kern=None, **kwargs): import sys assert "matplotlib" in sys.modules, "matplotlib package has not been imported." from ..plotting.matplot_dep import dim_reduction_plots return dim_reduction_plots.plot_magnification(self, labels, which_indices, resolution, ax, marker, s, fignum, plot_inducing, legend, plot_limits, aspect, updates, **kwargs) def input_sensitivity(self, summarize=True): """ Returns the sensitivity for each dimension of this model """ return self.kern.input_sensitivity(summarize=summarize) def optimize(self, optimizer=None, start=None, **kwargs): """ Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors. kwargs are passed to the optimizer. They can be: :param max_f_eval: maximum number of function evaluations :type max_f_eval: int :messages: whether to display during optimisation :type messages: bool :param optimizer: which optimizer to use (defaults to self.preferred optimizer), a range of optimisers can be found in :module:`~GPy.inference.optimization`, they include 'scg', 'lbfgs', 'tnc'. :type optimizer: string """ self.inference_method.on_optimization_start() try: super(GP, self).optimize(optimizer, start, **kwargs) except KeyboardInterrupt: print("KeyboardInterrupt caught, calling on_optimization_end() to round things up") self.inference_method.on_optimization_end() raise def infer_newX(self, Y_new, optimize=True): """ Infer X for the new observed data *Y_new*. :param Y_new: the new observed data for inference :type Y_new: numpy.ndarray :param optimize: whether to optimize the location of new X (True by default) :type optimize: boolean :return: a tuple containing the posterior estimation of X and the model that optimize X :rtype: (:class:`~GPy.core.parameterization.variational.VariationalPosterior` and numpy.ndarray, :class:`~GPy.core.model.Model`) """ from ..inference.latent_function_inference.inferenceX import infer_newX return infer_newX(self, Y_new, optimize=optimize) def log_predictive_density(self, x_test, y_test, Y_metadata=None): """ Calculation of the log predictive density .. math: p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*}) :param x_test: test locations (x_{*}) :type x_test: (Nx1) array :param y_test: test observations (y_{*}) :type y_test: (Nx1) array :param Y_metadata: metadata associated with the test points """ mu_star, var_star = self._raw_predict(x_test) return self.likelihood.log_predictive_density(y_test, mu_star, var_star, Y_metadata=Y_metadata) def log_predictive_density_sampling(self, x_test, y_test, Y_metadata=None, num_samples=1000): """ Calculation of the log predictive density by sampling .. math: p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*}) :param x_test: test locations (x_{*}) :type x_test: (Nx1) array :param y_test: test observations (y_{*}) :type y_test: (Nx1) array :param Y_metadata: metadata associated with the test points :param num_samples: number of samples to use in monte carlo integration :type num_samples: int """ mu_star, var_star = self._raw_predict(x_test) return self.likelihood.log_predictive_density_sampling(y_test, mu_star, var_star, Y_metadata=Y_metadata, num_samples=num_samples)
mit
sinhrks/scikit-learn
examples/manifold/plot_lle_digits.py
138
8594
""" ============================================================================= Manifold learning on handwritten digits: Locally Linear Embedding, Isomap... ============================================================================= An illustration of various embeddings on the digits dataset. The RandomTreesEmbedding, from the :mod:`sklearn.ensemble` module, is not technically a manifold embedding method, as it learn a high-dimensional representation on which we apply a dimensionality reduction method. However, it is often useful to cast a dataset into a representation in which the classes are linearly-separable. t-SNE will be initialized with the embedding that is generated by PCA in this example, which is not the default setting. It ensures global stability of the embedding, i.e., the embedding does not depend on random initialization. """ # Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Gael Varoquaux # License: BSD 3 clause (C) INRIA 2011 print(__doc__) from time import time import numpy as np import matplotlib.pyplot as plt from matplotlib import offsetbox from sklearn import (manifold, datasets, decomposition, ensemble, discriminant_analysis, random_projection) digits = datasets.load_digits(n_class=6) X = digits.data y = digits.target n_samples, n_features = X.shape n_neighbors = 30 #---------------------------------------------------------------------- # Scale and visualize the embedding vectors def plot_embedding(X, title=None): x_min, x_max = np.min(X, 0), np.max(X, 0) X = (X - x_min) / (x_max - x_min) plt.figure() ax = plt.subplot(111) for i in range(X.shape[0]): plt.text(X[i, 0], X[i, 1], str(digits.target[i]), color=plt.cm.Set1(y[i] / 10.), fontdict={'weight': 'bold', 'size': 9}) if hasattr(offsetbox, 'AnnotationBbox'): # only print thumbnails with matplotlib > 1.0 shown_images = np.array([[1., 1.]]) # just something big for i in range(digits.data.shape[0]): dist = np.sum((X[i] - shown_images) ** 2, 1) if np.min(dist) < 4e-3: # don't show points that are too close continue shown_images = np.r_[shown_images, [X[i]]] imagebox = offsetbox.AnnotationBbox( offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r), X[i]) ax.add_artist(imagebox) plt.xticks([]), plt.yticks([]) if title is not None: plt.title(title) #---------------------------------------------------------------------- # Plot images of the digits n_img_per_row = 20 img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row)) for i in range(n_img_per_row): ix = 10 * i + 1 for j in range(n_img_per_row): iy = 10 * j + 1 img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8)) plt.imshow(img, cmap=plt.cm.binary) plt.xticks([]) plt.yticks([]) plt.title('A selection from the 64-dimensional digits dataset') #---------------------------------------------------------------------- # Random 2D projection using a random unitary matrix print("Computing random projection") rp = random_projection.SparseRandomProjection(n_components=2, random_state=42) X_projected = rp.fit_transform(X) plot_embedding(X_projected, "Random Projection of the digits") #---------------------------------------------------------------------- # Projection on to the first 2 principal components print("Computing PCA projection") t0 = time() X_pca = decomposition.TruncatedSVD(n_components=2).fit_transform(X) plot_embedding(X_pca, "Principal Components projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Projection on to the first 2 linear discriminant components print("Computing Linear Discriminant Analysis projection") X2 = X.copy() X2.flat[::X.shape[1] + 1] += 0.01 # Make X invertible t0 = time() X_lda = discriminant_analysis.LinearDiscriminantAnalysis(n_components=2).fit_transform(X2, y) plot_embedding(X_lda, "Linear Discriminant projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Isomap projection of the digits dataset print("Computing Isomap embedding") t0 = time() X_iso = manifold.Isomap(n_neighbors, n_components=2).fit_transform(X) print("Done.") plot_embedding(X_iso, "Isomap projection of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Locally linear embedding of the digits dataset print("Computing LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='standard') t0 = time() X_lle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_lle, "Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Modified Locally linear embedding of the digits dataset print("Computing modified LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='modified') t0 = time() X_mlle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_mlle, "Modified Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # HLLE embedding of the digits dataset print("Computing Hessian LLE embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='hessian') t0 = time() X_hlle = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_hlle, "Hessian Locally Linear Embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # LTSA embedding of the digits dataset print("Computing LTSA embedding") clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method='ltsa') t0 = time() X_ltsa = clf.fit_transform(X) print("Done. Reconstruction error: %g" % clf.reconstruction_error_) plot_embedding(X_ltsa, "Local Tangent Space Alignment of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # MDS embedding of the digits dataset print("Computing MDS embedding") clf = manifold.MDS(n_components=2, n_init=1, max_iter=100) t0 = time() X_mds = clf.fit_transform(X) print("Done. Stress: %f" % clf.stress_) plot_embedding(X_mds, "MDS embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Random Trees embedding of the digits dataset print("Computing Totally Random Trees embedding") hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0, max_depth=5) t0 = time() X_transformed = hasher.fit_transform(X) pca = decomposition.TruncatedSVD(n_components=2) X_reduced = pca.fit_transform(X_transformed) plot_embedding(X_reduced, "Random forest embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # Spectral embedding of the digits dataset print("Computing Spectral embedding") embedder = manifold.SpectralEmbedding(n_components=2, random_state=0, eigen_solver="arpack") t0 = time() X_se = embedder.fit_transform(X) plot_embedding(X_se, "Spectral embedding of the digits (time %.2fs)" % (time() - t0)) #---------------------------------------------------------------------- # t-SNE embedding of the digits dataset print("Computing t-SNE embedding") tsne = manifold.TSNE(n_components=2, init='pca', random_state=0) t0 = time() X_tsne = tsne.fit_transform(X) plot_embedding(X_tsne, "t-SNE embedding of the digits (time %.2fs)" % (time() - t0)) plt.show()
bsd-3-clause
yaojenkuo/stockflow
ctrls/CandleDrawer.py
2
3513
#!/bin/python # -*- coding: utf-8 -*- import numpy as np from settings import * from datetime import datetime from ctrls.Reader import Reader import matplotlib.pyplot as plt from matplotlib.finance import candlestick_ohlc class CandleDrawer(): '''畫出近 n 天 K 線圖+Ma20布林通道+高低通道+量''' def _getBooleanBand(self, series): bool_next = []# 近 n 天和 Moving Average 的分佈 bool_up_series = []# boolean band 上界 ma_series = []# boolean band 中間 bool_down_series = []# boolean band 上界 for i in xrange(CANDLE_BOOL_NUM, len(series)): ma_series.append(np.mean(series[i - CANDLE_BOOL_NUM:i])) # Boolean Band # 近 n 天和 Moving Average 的分佈 bool_next.append(series[i] - ma_series[-1]) if len(bool_next) > CANDLE_BOOL_NUM: bool_next.pop(0) # 通道大小 bool_width = 2 * np.std(bool_next) bool_up_series.append(ma_series[-1] + bool_width) bool_down_series.append(ma_series[-1] - bool_width) return bool_up_series, ma_series, bool_down_series def _getFigTitle(self, number): t = datetime.now() return ('%s, Update: %s/%s/%s %s:%s:%s' % (number, str(t.year), str(t.month),str(t.day), str(t.hour), str(t.minute), str(t.second)) ) def draw(self, number, length = CANDLE_FIG_LENGTH): reader = Reader(number) series = [[] for x in xrange(7)] # Candle Stick candle_sticks = [] idx = -1 while True: idx +=1 row = reader.getInput() if row == None: break for i in [1, 3, 4, 5, 6]: series[i].append(float(row[i])) # matplotlib 的 candlestick_ohlc 依序放入 [編號, 收盤, 最高, 最低, 開盤] 會畫出 K 線圖 candle_sticks.append(( idx, float(row[6]), float(row[4]), float(row[5]), float(row[3]) )) bool_up_series, ma_series, bool_down_series = self._getBooleanBand(series[6]) # Draw Figure line_width = CANDLE_FIG_LINE_WIDTH fig, axarr = plt.subplots(2, sharex=True) candlestick_ohlc(axarr[0], candle_sticks[-length:], width=CANDLE_STICK_WIDTH) x_axis = range(len(series[6])) # set zorder 讓 candlestick 可以在上面 axarr[0].plot(x_axis[-length:], ma_series[-length:], c='#00ff00', ls='-', lw=line_width, zorder=-5) axarr[0].plot(x_axis[-length:], bool_up_series[-length:], c='#ff0000', ls='-', lw=line_width, zorder=-4) axarr[0].plot(x_axis[-length:], bool_down_series[-length:], c='#0000ff', ls='-', lw=line_width, zorder=-3) axarr[0].plot(x_axis[-length:], series[4][-length:], c='#ff3399', ls='-', lw=line_width, zorder=-2) axarr[0].plot(x_axis[-length:], series[5][-length:], c='#0099ff', ls='-', lw=line_width, zorder=-1) axarr[0].set_title(self._getFigTitle(number)) axarr[1].plot(x_axis[-length:], series[1][-length:], c='#000000', ls='-', lw=line_width) # set figure arguments fig.set_size_inches(FIGURE_WIDTH, FIGURE_HEIGHT) # output figure fig.savefig(CANDLE_FIG_PATH+'/'+number+'.png', dpi=FIGURE_DPI) plt.clf() plt.close('all')
mit
PatrickChrist/scikit-learn
examples/svm/plot_svm_anova.py
250
2000
""" ================================================= SVM-Anova: SVM with univariate feature selection ================================================= This example shows how to perform univariate feature before running a SVC (support vector classifier) to improve the classification scores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets, feature_selection, cross_validation from sklearn.pipeline import Pipeline ############################################################################### # Import some data to play with digits = datasets.load_digits() y = digits.target # Throw away data, to be in the curse of dimension settings y = y[:200] X = digits.data[:200] n_samples = len(y) X = X.reshape((n_samples, -1)) # add 200 non-informative features X = np.hstack((X, 2 * np.random.random((n_samples, 200)))) ############################################################################### # Create a feature-selection transform and an instance of SVM that we # combine together to have an full-blown estimator transform = feature_selection.SelectPercentile(feature_selection.f_classif) clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))]) ############################################################################### # Plot the cross-validation score as a function of percentile of features score_means = list() score_stds = list() percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf.set_params(anova__percentile=percentile) # Compute cross-validation score using all CPUs this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1) score_means.append(this_scores.mean()) score_stds.append(this_scores.std()) plt.errorbar(percentiles, score_means, np.array(score_stds)) plt.title( 'Performance of the SVM-Anova varying the percentile of features selected') plt.xlabel('Percentile') plt.ylabel('Prediction rate') plt.axis('tight') plt.show()
bsd-3-clause
NEONScience/NEON-Data-Skills
tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.py
1
20510
#!/usr/bin/env python # coding: utf-8 # --- # syncID: e6ccf19a4b454ca594388eeaa88ebe12 # title: "Calculate Vegetation Biomass from LiDAR Data in Python" # description: "Learn to calculate the biomass of standing vegetation using a canopy height model data product." # dateCreated: 2017-06-21 # authors: Tristan Goulden # contributors: Donal O'Leary # estimatedTime: 1 hour # packagesLibraries: numpy, gdal, matplotlib, matplotlib.pyplot, os # topics: lidar,remote-sensing # languagesTool: python # dataProduct: DP1.10098.001, DP3.30015.001, # code1: https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/tutorials/Python/Lidar/lidar-biomass/calc-biomass_py/calc-biomass_py.ipynb # tutorialSeries: intro-lidar-py-series # urlTitle: calc-biomass-py # --- # <div id="ds-objectives" markdown="1"> # # In this tutorial, we will calculate the biomass for a section of the SJER site. We # will be using the Canopy Height Model discrete LiDAR data product as well as NEON # field data on vegetation data. This tutorial will calculate Biomass for individual # trees in the forest. # # ### Objectives # After completing this tutorial, you will be able to: # # * Learn how to apply a guassian smoothing fernal for high-frequency spatial filtering # * Apply a watershed segmentation algorithm for delineating tree crowns # * Calculate biomass predictor variables from a CHM # * Setup training data for Biomass predictions # * Apply a Random Forest machine learning approach to calculate biomass # # # ### Install Python Packages # # * **numpy** # * **gdal** # * **matplotlib** # * **matplotlib.pyplot** # * **os** # # # ### Download Data # # If you have already downloaded the data set for the Data Institute, you have the # data for this tutorial within the SJER directory. If you would like to just # download the data for this tutorial use the following link. # # <a href="https://neondata.sharefile.com/d-s58db39240bf49ac8" class="link--button link--arrow"> # Download the Biomass Calculation teaching data subset</a> # # </div> # In this tutorial, we will calculate the biomass for a section of the SJER site. We # will be using the Canopy Height Model discrete LiDAR data product as well as NEON # field data on vegetation data. This tutorial will calculate Biomass for individual # trees in the forest. # # The calculation of biomass consists of four primary steps: # # 1. Delineating individual tree crowns # 2. Calculating predictor variables for all individuals # 3. Collecting training data # 4. Applying a regression model to estiamte biomass from predictors # # In this tutorial we will use a watershed segmentation algorithm for delineating # tree crowns (step 1) and and a Random Forest (RF) machine learning algorithm for # relating the predictor variables to biomass (part 4). The predictor variables were # selected following suggestions by Gleason et al. (2012) and biomass estimates were # determined from DBH (diamter at breast height) measurements following relationships # given in Jenkins et al. (2003). # # ## Get Started # # First, we need to specify the directory where we will find and save the data needed for this tutorial. You will need to change this line to suit your local machine. I have decided to save my data in the following directory: # In[1]: data_path = '/Users/olearyd/Git/data/' # Next, we will import several of the typical libraries. # In[2]: import numpy as np import os import gdal, osr import matplotlib.pyplot as plt import sys from scipy import ndimage as ndi get_ipython().run_line_magic('matplotlib', 'inline') # Next, we will add libraries from skilearn which will help with the watershed delination, determination of predictor variables and random forest algorithm # In[3]: #Import biomass specific libraries from skimage.morphology import watershed from skimage.feature import peak_local_max from skimage.measure import regionprops from sklearn.ensemble import RandomForestRegressor # ## Define functions # # Now we will define a few functions that allow us to more easily work with the NEON data. # # * `plot_band_array`: function to plot NEON spatial data. # In[4]: #Define plot band array function def plot_band_array(band_array,image_extent,title,cmap_title,colormap,colormap_limits): plt.imshow(band_array,extent=image_extent) cbar = plt.colorbar(); plt.set_cmap(colormap); plt.clim(colormap_limits) cbar.set_label(cmap_title,rotation=270,labelpad=20) plt.title(title); ax = plt.gca() ax.ticklabel_format(useOffset=False, style='plain') rotatexlabels = plt.setp(ax.get_xticklabels(),rotation=90) # * `array2raster`: function to output geotiff files. # In[5]: def array2raster(newRasterfn,rasterOrigin,pixelWidth,pixelHeight,array,epsg): cols = array.shape[1] rows = array.shape[0] originX = rasterOrigin[0] originY = rasterOrigin[1] driver = gdal.GetDriverByName('GTiff') outRaster = driver.Create(newRasterfn, cols, rows, 1, gdal.GDT_Float32) outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight)) outband = outRaster.GetRasterBand(1) outband.WriteArray(array) outRasterSRS = osr.SpatialReference() outRasterSRS.ImportFromEPSG(epsg) outRaster.SetProjection(outRasterSRS.ExportToWkt()) outband.FlushCache() # * `raster2array`: function to conver rasters to an array. # In[6]: def raster2array(geotif_file): metadata = {} dataset = gdal.Open(geotif_file) metadata['array_rows'] = dataset.RasterYSize metadata['array_cols'] = dataset.RasterXSize metadata['bands'] = dataset.RasterCount metadata['driver'] = dataset.GetDriver().LongName metadata['projection'] = dataset.GetProjection() metadata['geotransform'] = dataset.GetGeoTransform() mapinfo = dataset.GetGeoTransform() metadata['pixelWidth'] = mapinfo[1] metadata['pixelHeight'] = mapinfo[5] metadata['ext_dict'] = {} metadata['ext_dict']['xMin'] = mapinfo[0] metadata['ext_dict']['xMax'] = mapinfo[0] + dataset.RasterXSize/mapinfo[1] metadata['ext_dict']['yMin'] = mapinfo[3] + dataset.RasterYSize/mapinfo[5] metadata['ext_dict']['yMax'] = mapinfo[3] metadata['extent'] = (metadata['ext_dict']['xMin'],metadata['ext_dict']['xMax'], metadata['ext_dict']['yMin'],metadata['ext_dict']['yMax']) if metadata['bands'] == 1: raster = dataset.GetRasterBand(1) metadata['noDataValue'] = raster.GetNoDataValue() metadata['scaleFactor'] = raster.GetScale() # band statistics metadata['bandstats'] = {} # make a nested dictionary to store band stats in same stats = raster.GetStatistics(True,True) metadata['bandstats']['min'] = round(stats[0],2) metadata['bandstats']['max'] = round(stats[1],2) metadata['bandstats']['mean'] = round(stats[2],2) metadata['bandstats']['stdev'] = round(stats[3],2) array = dataset.GetRasterBand(1).ReadAsArray(0,0, metadata['array_cols'], metadata['array_rows']).astype(np.float) array[array==int(metadata['noDataValue'])]=np.nan array = array/metadata['scaleFactor'] return array, metadata elif metadata['bands'] > 1: print('More than one band ... need to modify function for case of multiple bands') # * `crown_geometric_volume_pth`: function to get tree crown volumn. # In[7]: def crown_geometric_volume_pth(tree_data,min_tree_height,pth): p = np.percentile(tree_data, pth) tree_data_pth = [v if v < p else p for v in tree_data] crown_geometric_volume_pth = np.sum(tree_data_pth - min_tree_height) return crown_geometric_volume_pth, p # * `get_predictors`: function to get the trees from the biomass data. # In[8]: def get_predictors(tree,chm_array, labels): indexes_of_tree = np.asarray(np.where(labels==tree.label)).T tree_crown_heights = chm_array[indexes_of_tree[:,0],indexes_of_tree[:,1]] full_crown = np.sum(tree_crown_heights - np.min(tree_crown_heights)) crown50, p50 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,50) crown60, p60 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,60) crown70, p70 = crown_geometric_volume_pth(tree_crown_heights,tree.min_intensity,70) return [tree.label, np.float(tree.area), tree.major_axis_length, tree.max_intensity, tree.min_intensity, p50, p60, p70, full_crown, crown50, crown60, crown70] # ## Canopy Height Data # # With everything set up, we can now start working with our data by define the file path to our CHM file. Note that you will need to change this and subsequent filepaths according to your local machine. # In[9]: chm_file = data_path+'NEON_D17_SJER_DP3_256000_4106000_CHM.tif' # When we output the results, we will want to include the same file information as the input, so we will gather the file name information. # In[10]: #Get info from chm file for outputting results just_chm_file = os.path.basename(chm_file) just_chm_file_split = just_chm_file.split(sep="_") # Now we will get the CHM data... # In[11]: chm_array, chm_array_metadata = raster2array(chm_file) # ..., plot it, and save the figure. # In[12]: #Plot the original CHM plt.figure(1) #Plot the CHM figure plot_band_array(chm_array,chm_array_metadata['extent'], 'Canopy height Model', 'Canopy height (m)', 'Greens',[0, 9]) plt.savefig(data_path+just_chm_file[0:-4]+'_CHM.png',dpi=300,orientation='landscape', bbox_inches='tight', pad_inches=0.1) # It looks like SJER primarily has low vegetation with scattered taller trees. # # ## Create Filtered CHM # # Now we will use a Gaussian smoothing kernal (convolution) across the data set to remove spurious high vegetation points. This will help ensure we are finding the treetops properly before running the watershed segmentation algorithm. # # For different forest types it may be necessary to change the input parameters. Information on the function can be found in the <a href="https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html" target="_blank">SciPy documentation</a>. # # Of most importance are the second and fifth inputs. The second input defines the standard deviation of the Gaussian smoothing kernal. Too large a value will apply too much smoothing, too small and some spurious high points may be left behind. The fifth, the truncate value, controls after how many standard deviations the Gaussian kernal will get cut off (since it theoretically goes to infinity). # In[13]: #Smooth the CHM using a gaussian filter to remove spurious points chm_array_smooth = ndi.gaussian_filter(chm_array,2, mode='constant',cval=0,truncate=2.0) chm_array_smooth[chm_array==0] = 0 # Now save a copy of filtered CHM. We will later use this in our code, so we'll output it into our data directory. # In[14]: #Save the smoothed CHM array2raster(data_path+'chm_filter.tif', (chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']), 1,-1, np.array(chm_array_smooth,dtype=float), 32611) # ## Determine local maximums # # Now we will run an algorithm to determine local maximums within the image. Setting indices to 'False' returns a raster of the maximum points, as opposed to a list of coordinates. The footprint parameter is an area where only a single peak can be found. This should be approximately the size of the smallest tree. Information on more sophisticated methods to define the window can be found in Chen (2006). # In[15]: #Calculate local maximum points in the smoothed CHM local_maxi = peak_local_max(chm_array_smooth,indices=False, footprint=np.ones((5, 5))) # Our new object `local_maxi` is an array of boolean values where each pixel is identified as either being the local maximum (`True`) or not being the local maximum (`False`). # In[16]: local_maxi # This is very helpful, but it can be difficult to visualizee boolean values using our typical numeric plotting procedures as defined in the `plot_band_array` function above. Therefore, we will need to convert this boolean array to an numeric format to use this function. Booleans convert easily to integers with values of `False=0` and `True=1` using the `.astype(int)` method. # In[17]: local_maxi.astype(int) # Next ,we can plot the raster of local maximums bo coercing the boolean array into an array ofintegers inline. The following figure shows the difference in finding local maximums for a filtered vs. non-filtered CHM. # # We will save the graphics (.png) in an outputs folder sister to our working directory and data outputs (.tif) to our data directory. # In[18]: #Plot the local maximums plt.figure(2) plot_band_array(local_maxi.astype(int),chm_array_metadata['extent'], 'Maximum', 'Maxi', 'Greys', [0, 1]) plt.savefig(data_path+just_chm_file[0:-4]+ '_Maximums.png', dpi=300,orientation='landscape', bbox_inches='tight',pad_inches=0.1) array2raster(data_path+'maximum.tif', (chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']), 1,-1,np.array(local_maxi,dtype=np.float32),32611) # If we were to look at the overlap between the tree crowns and the local maxima from each method, it would appear a bit like this raster. # # <figure> # <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg"> # <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-filter-vs-nonfilter.jpg"></a> # <figcaption> The difference in finding local maximums for a filtered vs. # non-filtered CHM. # Source: National Ecological Observatory Network (NEON) # </figcaption> # </figure> # # # Apply labels to all of the local maximum points # In[19]: #Identify all the maximum points markers = ndi.label(local_maxi)[0] # Next we will create a mask layer of all of the vegetation points so that the watershed segmentation will only occur on the trees and not extend into the surrounding ground points. Since 0 represent ground points in the CHM, setting the mask to 1 where the CHM is not zero will define the mask # In[20]: #Create a CHM mask so the segmentation will only occur on the trees chm_mask = chm_array_smooth chm_mask[chm_array_smooth != 0] = 1 # ## Watershed segmentation # # As in a river system, a watershed is divided by a ridge that divides areas. Here our watershed are the individual tree canopies and the ridge is the delineation between each one. # # <figure> # <a href="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png"> # <img src="https://raw.githubusercontent.com/NEONScience/NEON-Data-Skills/main/graphics/raster-general/raster-classification-watershed-segments.png"></a> # <figcaption> A raster classified based on watershed segmentation. # Source: National Ecological Observatory Network (NEON) # </figcaption> # </figure> # # Next, we will perform the watershed segmentation which produces a raster of labels. # In[21]: #Perfrom watershed segmentation labels = watershed(chm_array_smooth, markers, mask=chm_mask) labels_for_plot = labels.copy() labels_for_plot = np.array(labels_for_plot,dtype = np.float32) labels_for_plot[labels_for_plot==0] = np.nan max_labels = np.max(labels) # In[22]: #Plot the segments plot_band_array(labels_for_plot,chm_array_metadata['extent'], 'Crown Segmentation','Tree Crown Number', 'Spectral',[0, max_labels]) plt.savefig(data_path+just_chm_file[0:-4]+'_Segmentation.png', dpi=300,orientation='landscape', bbox_inches='tight',pad_inches=0.1) array2raster(data_path+'labels.tif', (chm_array_metadata['ext_dict']['xMin'], chm_array_metadata['ext_dict']['yMax']), 1,-1,np.array(labels,dtype=float),32611) # Now we will get several properties of the individual trees will be used as predictor variables. # In[23]: #Get the properties of each segment tree_properties = regionprops(labels,chm_array) # Now we will get the predictor variables to match the (soon to be loaded) training data using the function defined above. The first column will be segment IDs, the rest will be the predictor variables. # In[24]: predictors_chm = np.array([get_predictors(tree, chm_array, labels) for tree in tree_properties]) X = predictors_chm[:,1:] tree_ids = predictors_chm[:,0] # ## Training data # # We now bring in the training data file which is a simple CSV file with no header. The first column is biomass, and the remaining columns are the same predictor variables defined above. The tree diameter and max height are defined in the NEON vegetation structure data along with the tree DBH. The field validated values are used for training, while the other were determined from the CHM and camera images by manually delineating the tree crowns and pulling out the relevant information from the CHM. # # Biomass was calculated from DBH according to the formulas in Jenkins et al. (2003). # # If you didn't download this training dataset above, you can <a href="https://neondata.sharefile.com/share/view/cdc8242e24ad4517/fobd4959-4cf0-44ab-acc6-0695a04a1afc" target="_blank">Download the training dataset CSV here</a>. # In[25]: #Define the file of training data training_data_file = data_path+'SJER_Biomass_Training.csv' #Read in the training data from a CSV file training_data = np.genfromtxt(training_data_file,delimiter=',') #Grab the biomass (Y) from the first line biomass = training_data[:,0] #Grab the biomass prdeictors from the remaining lines biomass_predictors = training_data[:,1:12] # ## Random Forest classifiers # # We can then define parameters of the Random Forest classifier and fit the predictor variables from the training data to the Biomass estaimtes. # In[26]: #Define paraemters for Random forest regressor max_depth = 30 #Define regressor rules regr_rf = RandomForestRegressor(max_depth=max_depth, random_state=2) #Fit the biomass to regressor variables regr_rf.fit(biomass_predictors,biomass) # We now apply the Random Forest model to the predictor variables to retreive biomass # In[27]: #Apply the model to the predictors estimated_biomass = regr_rf.predict(X) # For outputting a raster, copy the labels raster to a biomass raster, then cycle through the segments and assign the biomass estimate to each individual tree segment. # In[28]: #Set an out raster with the same size as the labels biomass_map = np.array((labels),dtype=float) #Assign the appropriate biomass to the labels biomass_map[biomass_map==0] = np.nan for tree_id, biomass_of_tree_id in zip(tree_ids, estimated_biomass): biomass_map[biomass_map == tree_id] = biomass_of_tree_id # ## Calc Biomass # Collect some of the biomass statistics and then plot the results and save an output geotiff. # In[29]: #Get biomass stats for plotting mean_biomass = np.mean(estimated_biomass) std_biomass = np.std(estimated_biomass) min_biomass = np.min(estimated_biomass) sum_biomass = np.sum(estimated_biomass) print('Sum of biomass is ',sum_biomass,' kg') #Plot the biomass! plt.figure(5) plot_band_array(biomass_map,chm_array_metadata['extent'], 'Biomass (kg)','Biomass (kg)', 'winter', [min_biomass+std_biomass, mean_biomass+std_biomass*3]) plt.savefig(data_path+just_chm_file_split[0]+'_'+just_chm_file_split[1]+'_'+just_chm_file_split[2]+'_'+just_chm_file_split[3]+'_'+just_chm_file_split[4]+'_'+just_chm_file_split[5]+'_'+'Biomass.png', dpi=300,orientation='landscape', bbox_inches='tight', pad_inches=0.1) array2raster(data_path+'biomass.tif', (chm_array_metadata['ext_dict']['xMin'],chm_array_metadata['ext_dict']['yMax']), 1,-1,np.array(biomass_map,dtype=float),32611) # In[ ]:
agpl-3.0
antoinecarme/pyaf
setup.py
1
1126
from setuptools import setup from setuptools import find_packages with open("README.md", "r") as fh: pyaf_long_description = fh.read() setup(name='pyaf', version='3.0-RC1', description='Python Automatic Forecasting', long_description=pyaf_long_description, long_description_content_type="text/markdown", author='Antoine CARME', author_email='antoine.carme@laposte.net', url='https://github.com/antoinecarme/pyaf', license='BSD 3-clause', packages=find_packages(include=['pyaf', 'pyaf.*']), python_requires='>=3', classifiers=['Development Status :: 5 - Production/Stable', 'Programming Language :: Python :: 3'], keywords='arx automatic-forecasting autoregressive benchmark cycle decomposition exogenous forecasting heroku hierarchical-forecasting horizon jupyter pandas python scikit-learn seasonal time-series transformation trend web-service', install_requires=[ 'scipy', 'pandas', 'sklearn', 'matplotlib', 'pydot', 'dill', 'sqlalchemy' ])
bsd-3-clause
saimn/astropy
astropy/visualization/wcsaxes/frame.py
8
10649
# Licensed under a 3-clause BSD style license - see LICENSE.rst import abc from collections import OrderedDict import numpy as np from matplotlib import rcParams from matplotlib.lines import Line2D, Path from matplotlib.patches import PathPatch __all__ = ['RectangularFrame1D', 'Spine', 'BaseFrame', 'RectangularFrame', 'EllipticalFrame'] class Spine: """ A single side of an axes. This does not need to be a straight line, but represents a 'side' when determining which part of the frame to put labels and ticks on. """ def __init__(self, parent_axes, transform): self.parent_axes = parent_axes self.transform = transform self.data = None self.pixel = None self.world = None @property def data(self): return self._data @data.setter def data(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = value self._pixel = self.parent_axes.transData.transform(self._data) with np.errstate(invalid='ignore'): self._world = self.transform.transform(self._data) self._update_normal() @property def pixel(self): return self._pixel @pixel.setter def pixel(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.parent_axes.transData.inverted().transform(self._data) self._pixel = value self._world = self.transform.transform(self._data) self._update_normal() @property def world(self): return self._world @world.setter def world(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.transform.transform(value) self._pixel = self.parent_axes.transData.transform(self._data) self._world = value self._update_normal() def _update_normal(self): # Find angle normal to border and inwards, in display coordinate dx = self.pixel[1:, 0] - self.pixel[:-1, 0] dy = self.pixel[1:, 1] - self.pixel[:-1, 1] self.normal_angle = np.degrees(np.arctan2(dx, -dy)) def _halfway_x_y_angle(self): """ Return the x, y, normal_angle values halfway along the spine """ x_disp, y_disp = self.pixel[:, 0], self.pixel[:, 1] # Get distance along the path d = np.hstack([0., np.cumsum(np.sqrt(np.diff(x_disp) ** 2 + np.diff(y_disp) ** 2))]) xcen = np.interp(d[-1] / 2., d, x_disp) ycen = np.interp(d[-1] / 2., d, y_disp) # Find segment along which the mid-point lies imin = np.searchsorted(d, d[-1] / 2.) - 1 # Find normal of the axis label facing outwards on that segment normal_angle = self.normal_angle[imin] + 180. return xcen, ycen, normal_angle class SpineXAligned(Spine): """ A single side of an axes, aligned with the X data axis. This does not need to be a straight line, but represents a 'side' when determining which part of the frame to put labels and ticks on. """ @property def data(self): return self._data @data.setter def data(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = value self._pixel = self.parent_axes.transData.transform(self._data) with np.errstate(invalid='ignore'): self._world = self.transform.transform(self._data[:,0:1]) self._update_normal() @property def pixel(self): return self._pixel @pixel.setter def pixel(self, value): if value is None: self._data = None self._pixel = None self._world = None else: self._data = self.parent_axes.transData.inverted().transform(self._data) self._pixel = value self._world = self.transform.transform(self._data[:,0:1]) self._update_normal() class BaseFrame(OrderedDict, metaclass=abc.ABCMeta): """ Base class for frames, which are collections of :class:`~astropy.visualization.wcsaxes.frame.Spine` instances. """ spine_class = Spine def __init__(self, parent_axes, transform, path=None): super().__init__() self.parent_axes = parent_axes self._transform = transform self._linewidth = rcParams['axes.linewidth'] self._color = rcParams['axes.edgecolor'] self._path = path for axis in self.spine_names: self[axis] = self.spine_class(parent_axes, transform) @property def origin(self): ymin, ymax = self.parent_axes.get_ylim() return 'lower' if ymin < ymax else 'upper' @property def transform(self): return self._transform @transform.setter def transform(self, value): self._transform = value for axis in self: self[axis].transform = value def _update_patch_path(self): self.update_spines() x, y = [], [] for axis in self: x.append(self[axis].data[:, 0]) y.append(self[axis].data[:, 1]) vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose() if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices @property def patch(self): self._update_patch_path() return PathPatch(self._path, transform=self.parent_axes.transData, facecolor=rcParams['axes.facecolor'], edgecolor='white') def draw(self, renderer): for axis in self: x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000) line.draw(renderer) def sample(self, n_samples): self.update_spines() spines = OrderedDict() for axis in self: data = self[axis].data p = np.linspace(0., 1., data.shape[0]) p_new = np.linspace(0., 1., n_samples) spines[axis] = self.spine_class(self.parent_axes, self.transform) spines[axis].data = np.array([np.interp(p_new, p, d) for d in data.T]).transpose() return spines def set_color(self, color): """ Sets the color of the frame. Parameters ---------- color : str The color of the frame. """ self._color = color def get_color(self): return self._color def set_linewidth(self, linewidth): """ Sets the linewidth of the frame. Parameters ---------- linewidth : float The linewidth of the frame in points. """ self._linewidth = linewidth def get_linewidth(self): return self._linewidth @abc.abstractmethod def update_spines(self): raise NotImplementedError("") class RectangularFrame1D(BaseFrame): """ A classic rectangular frame. """ spine_names = 'bt' spine_class = SpineXAligned def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() self['b'].data = np.array(([xmin, ymin], [xmax, ymin])) self['t'].data = np.array(([xmax, ymax], [xmin, ymax])) def _update_patch_path(self): self.update_spines() xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() x = [xmin, xmax, xmax, xmin, xmin] y = [ymin, ymin, ymax, ymax, ymin] vertices = np.vstack([np.hstack(x), np.hstack(y)]).transpose() if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices def draw(self, renderer): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() x = [xmin, xmax, xmax, xmin, xmin] y = [ymin, ymin, ymax, ymax, ymin] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000, transform=self.parent_axes.transData) line.draw(renderer) class RectangularFrame(BaseFrame): """ A classic rectangular frame. """ spine_names = 'brtl' def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() self['b'].data = np.array(([xmin, ymin], [xmax, ymin])) self['r'].data = np.array(([xmax, ymin], [xmax, ymax])) self['t'].data = np.array(([xmax, ymax], [xmin, ymax])) self['l'].data = np.array(([xmin, ymax], [xmin, ymin])) class EllipticalFrame(BaseFrame): """ An elliptical frame. """ spine_names = 'chv' def update_spines(self): xmin, xmax = self.parent_axes.get_xlim() ymin, ymax = self.parent_axes.get_ylim() xmid = 0.5 * (xmax + xmin) ymid = 0.5 * (ymax + ymin) dx = xmid - xmin dy = ymid - ymin theta = np.linspace(0., 2 * np.pi, 1000) self['c'].data = np.array([xmid + dx * np.cos(theta), ymid + dy * np.sin(theta)]).transpose() self['h'].data = np.array([np.linspace(xmin, xmax, 1000), np.repeat(ymid, 1000)]).transpose() self['v'].data = np.array([np.repeat(xmid, 1000), np.linspace(ymin, ymax, 1000)]).transpose() def _update_patch_path(self): """Override path patch to include only the outer ellipse, not the major and minor axes in the middle.""" self.update_spines() vertices = self['c'].data if self._path is None: self._path = Path(vertices) else: self._path.vertices = vertices def draw(self, renderer): """Override to draw only the outer ellipse, not the major and minor axes in the middle. FIXME: we may want to add a general method to give the user control over which spines are drawn.""" axis = 'c' x, y = self[axis].pixel[:, 0], self[axis].pixel[:, 1] line = Line2D(x, y, linewidth=self._linewidth, color=self._color, zorder=1000) line.draw(renderer)
bsd-3-clause
mne-tools/mne-tools.github.io
0.11/_downloads/plot_evoked_topomap.py
18
1498
""" ======================================== Plotting topographic maps of evoked data ======================================== Load evoked data and plot topomaps for selected time points. """ # Authors: Christian Brodbeck <christianbrodbeck@nyu.edu> # Tal Linzen <linzen@nyu.edu> # Denis A. Engeman <denis.engemann@gmail.com> # # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt from mne.datasets import sample from mne import read_evokeds print(__doc__) path = sample.data_path() fname = path + '/MEG/sample/sample_audvis-ave.fif' # load evoked and subtract baseline condition = 'Left Auditory' evoked = read_evokeds(fname, condition=condition, baseline=(None, 0)) # set time instants in seconds (from 50 to 150ms in a step of 10ms) times = np.arange(0.05, 0.15, 0.01) # If times is set to None only 10 regularly spaced topographies will be shown # plot magnetometer data as topomaps evoked.plot_topomap(times, ch_type='mag') # compute a 50 ms bin to stabilize topographies evoked.plot_topomap(times, ch_type='mag', average=0.05) # plot gradiometer data (plots the RMS for each pair of gradiometers) evoked.plot_topomap(times, ch_type='grad') # plot magnetometer data as topomap at 1 time point : 100 ms # and add channel labels and title evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False, size=6, res=128, title='Auditory response') plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
bsd-3-clause
nikitasingh981/scikit-learn
examples/semi_supervised/plot_label_propagation_versus_svm_iris.py
50
2378
""" ===================================================================== Decision boundary of label propagation versus SVM on the Iris dataset ===================================================================== Comparison for decision boundary generated on iris dataset between Label Propagation and SVM. This demonstrates Label Propagation learning a good boundary even with a small amount of labeled data. """ print(__doc__) # Authors: Clay Woolam <clay@woolam.org> # License: BSD import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn import svm from sklearn.semi_supervised import label_propagation rng = np.random.RandomState(0) iris = datasets.load_iris() X = iris.data[:, :2] y = iris.target # step size in the mesh h = .02 y_30 = np.copy(y) y_30[rng.rand(len(y)) < 0.3] = -1 y_50 = np.copy(y) y_50[rng.rand(len(y)) < 0.5] = -1 # we create an instance of SVM and fit out data. We do not scale our # data since we want to plot the support vectors ls30 = (label_propagation.LabelSpreading().fit(X, y_30), y_30) ls50 = (label_propagation.LabelSpreading().fit(X, y_50), y_50) ls100 = (label_propagation.LabelSpreading().fit(X, y), y) rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # title for the plots titles = ['Label Spreading 30% data', 'Label Spreading 50% data', 'Label Spreading 100% data', 'SVC with rbf kernel'] color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)} for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. plt.subplot(2, 2, i + 1) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points colors = [color_map[y] for y in y_train] plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired) plt.title(titles[i]) plt.text(.90, 0, "Unlabeled points are colored white") plt.show()
bsd-3-clause
DongjunLee/kino-bot
kino/slack/plot.py
1
2684
from matplotlib import pyplot as plt import matplotlib.dates as dt import seaborn seaborn.set() import datetime class Plot(object): def __init__(self): pass def make_bar( x, y, f_name, title=None, legend=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) if x_ticks is not None: plt.xticks(x, x_ticks) if y_ticks is not None: plt.yticks(y_ticks) plt.bar(x, y, align="center") if legend is not None: plt.legend(legend) plt.savefig(f_name) plt.close(fig) def make_line( x, y, f_name, title=None, legend=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) if x_ticks is not None: plt.xticks(x, x_ticks) if y_ticks is not None: plt.yticks(y_ticks) if isinstance(y[0], list): for data in y: plt.plot(x, data) else: plt.plot(x, y) if legend is not None: plt.legend(legend) plt.savefig(f_name) plt.close(fig) def make_efficiency_date( total_data, avg_data, f_name, title=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None, ): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) v_date = [] v_val = [] for data in total_data: dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M")) to_int = round(float(data[1])) plt.plot_date(dates, data[1], color=plt.cm.brg(to_int)) for data in avg_data: dates = dt.date2num(datetime.datetime.strptime(data[0], "%H:%M")) v_date.append(dates) v_val.append(data[1]) plt.plot_date(v_date, v_val, "^y-", label="Average") plt.legend() plt.savefig(f_name) plt.close(fig)
mit
sylvchev/mdla
examples/example_benchmark_performance.py
1
6309
"""Benchmarking dictionary learning algorithms on random dataset""" from multiprocessing import cpu_count from time import time import matplotlib.pyplot as plt import numpy as np from numpy import array from numpy.linalg import norm from numpy.random import permutation, rand, randint, randn from mdla import MiniBatchMultivariateDictLearning, MultivariateDictLearning # TODO: # investigate perf break from pydico def benchmarking_plot(figname, pst, plot_sep, minibatchRange, mprocessRange): _ = plt.figure(figsize=(15, 10)) bar_width = 0.35 _ = plt.bar( np.array([0]), pst[0], bar_width, color="b", label="Online, no multiprocessing (baseline)", ) index = [0] for i in range(1, plot_sep[1]): if i == 1: _ = plt.bar( np.array([i + 1]), pst[i], bar_width, color="r", label="Online with minibatch", ) else: _ = plt.bar(np.array([i + 1]), pst[i], bar_width, color="r") index.append(i + 1) for _ in range(plot_sep[1], plot_sep[2]): if i == plot_sep[1]: _ = plt.bar( np.array([i + 2]), pst[i], bar_width, label="Batch with multiprocessing", color="magenta", ) else: _ = plt.bar(np.array([i + 2]), pst[i], bar_width, color="magenta") index.append(i + 2) plt.ylabel("Time per iteration (s)") plt.title("Processing time for online and batch processing") tick = [""] tick.extend(map(str, minibatchRange)) tick.extend(map(str, mprocessRange)) plt.xticks(index, tuple(tick)) plt.legend() plt.savefig(figname + ".png") def _generate_testbed( kernel_init_len, n_nonzero_coefs, n_kernels, n_samples=10, n_features=5, n_dims=3, snr=1000, ): """Generate a dataset from a random dictionary Generate a random dictionary and a dataset, where samples are combination of n_nonzero_coefs dictionary atoms. Noise is added, based on SNR value, with 1000 indicated that no noise should be added. Return the dictionary, the dataset and an array indicated how atoms are combined to obtain each sample """ print("Dictionary sampled from uniform distribution") dico = [rand(kernel_init_len, n_dims) for i in range(n_kernels)] for i in range(len(dico)): dico[i] /= norm(dico[i], "fro") signals = list() decomposition = list() for _ in range(n_samples): s = np.zeros(shape=(n_features, n_dims)) d = np.zeros(shape=(n_nonzero_coefs, 3)) rk = permutation(range(n_kernels)) for j in range(n_nonzero_coefs): k_idx = rk[j] k_amplitude = 3.0 * rand() + 1.0 k_offset = randint(n_features - kernel_init_len + 1) s[k_offset : k_offset + kernel_init_len, :] += k_amplitude * dico[k_idx] d[j, :] = array([k_amplitude, k_offset, k_idx]) decomposition.append(d) noise = randn(n_features, n_dims) if snr == 1000: alpha = 0 else: ps = norm(s, "fro") pn = norm(noise, "fro") alpha = ps / (pn * 10 ** (snr / 20.0)) signals.append(s + alpha * noise) signals = np.array(signals) return dico, signals, decomposition rng_global = np.random.RandomState(1) n_samples, n_dims = 1500, 1 n_features = kernel_init_len = 5 n_nonzero_coefs = 3 n_kernels, max_iter, learning_rate = 50, 10, 1.5 n_jobs, batch_size = -1, None iter_time, plot_separator, it_separator = list(), list(), 0 generating_dict, X, code = _generate_testbed( kernel_init_len, n_nonzero_coefs, n_kernels, n_samples, n_features, n_dims ) # Online without mini-batch print( "Processing ", max_iter, "iterations in online mode, " "without multiprocessing:", end="", ) batch_size, n_jobs = n_samples, 1 learned_dict = MiniBatchMultivariateDictLearning( n_kernels=n_kernels, batch_size=batch_size, n_iter=max_iter, n_nonzero_coefs=n_nonzero_coefs, n_jobs=n_jobs, learning_rate=learning_rate, kernel_init_len=kernel_init_len, verbose=1, dict_init=None, random_state=rng_global, ) ts = time() learned_dict = learned_dict.fit(X) iter_time.append((time() - ts) / max_iter) it_separator += 1 plot_separator.append(it_separator) # Online with mini-batch minibatch_range = [cpu_count()] minibatch_range.extend([cpu_count() * i for i in range(3, 10, 2)]) n_jobs = -1 for mb in minibatch_range: print( "\nProcessing ", max_iter, "iterations in online mode, with ", "minibatch size", mb, "and", cpu_count(), "processes:", end="", ) batch_size = mb learned_dict = MiniBatchMultivariateDictLearning( n_kernels=n_kernels, batch_size=batch_size, n_iter=max_iter, n_nonzero_coefs=n_nonzero_coefs, n_jobs=n_jobs, learning_rate=learning_rate, kernel_init_len=kernel_init_len, verbose=1, dict_init=None, random_state=rng_global, ) ts = time() learned_dict = learned_dict.fit(X) iter_time.append((time() - ts) / max_iter) it_separator += 1 plot_separator.append(it_separator) # Batch learning mp_range = range(1, cpu_count() + 1) for p in mp_range: print( "\nProcessing ", max_iter, "iterations in batch mode, with", p, "processes:", end="", ) n_jobs = p learned_dict = MultivariateDictLearning( n_kernels=n_kernels, max_iter=max_iter, verbose=1, n_nonzero_coefs=n_nonzero_coefs, n_jobs=n_jobs, learning_rate=learning_rate, kernel_init_len=kernel_init_len, dict_init=None, random_state=rng_global, ) ts = time() learned_dict = learned_dict.fit(X) iter_time.append((time() - ts) / max_iter) it_separator += 1 plot_separator.append(it_separator) print("Done benchmarking") figname = "minibatch-performance" print("Plotting results in", figname) benchmarking_plot(figname, iter_time, plot_separator, minibatch_range, mp_range) print("Exiting.")
gpl-3.0
david-ragazzi/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/axes.py
69
259904
from __future__ import division, generators import math, sys, warnings, datetime, new import numpy as np from numpy import ma import matplotlib rcParams = matplotlib.rcParams import matplotlib.artist as martist import matplotlib.axis as maxis import matplotlib.cbook as cbook import matplotlib.collections as mcoll import matplotlib.colors as mcolors import matplotlib.contour as mcontour import matplotlib.dates as mdates import matplotlib.font_manager as font_manager import matplotlib.image as mimage import matplotlib.legend as mlegend import matplotlib.lines as mlines import matplotlib.mlab as mlab import matplotlib.patches as mpatches import matplotlib.quiver as mquiver import matplotlib.scale as mscale import matplotlib.table as mtable import matplotlib.text as mtext import matplotlib.ticker as mticker import matplotlib.transforms as mtransforms iterable = cbook.iterable is_string_like = cbook.is_string_like def _process_plot_format(fmt): """ Process a matlab(TM) style color/line style format string. Return a (*linestyle*, *color*) tuple as a result of the processing. Default values are ('-', 'b'). Example format strings include: * 'ko': black circles * '.b': blue dots * 'r--': red dashed lines .. seealso:: :func:`~matplotlib.Line2D.lineStyles` and :func:`~matplotlib.pyplot.colors`: for all possible styles and color format string. """ linestyle = None marker = None color = None # Is fmt just a colorspec? try: color = mcolors.colorConverter.to_rgb(fmt) return linestyle, marker, color # Yes. except ValueError: pass # No, not just a color. # handle the multi char special cases and strip them from the # string if fmt.find('--')>=0: linestyle = '--' fmt = fmt.replace('--', '') if fmt.find('-.')>=0: linestyle = '-.' fmt = fmt.replace('-.', '') if fmt.find(' ')>=0: linestyle = 'None' fmt = fmt.replace(' ', '') chars = [c for c in fmt] for c in chars: if c in mlines.lineStyles: if linestyle is not None: raise ValueError( 'Illegal format string "%s"; two linestyle symbols' % fmt) linestyle = c elif c in mlines.lineMarkers: if marker is not None: raise ValueError( 'Illegal format string "%s"; two marker symbols' % fmt) marker = c elif c in mcolors.colorConverter.colors: if color is not None: raise ValueError( 'Illegal format string "%s"; two color symbols' % fmt) color = c else: raise ValueError( 'Unrecognized character %c in format string' % c) if linestyle is None and marker is None: linestyle = rcParams['lines.linestyle'] if linestyle is None: linestyle = 'None' if marker is None: marker = 'None' return linestyle, marker, color def set_default_color_cycle(clist): """ Change the default cycle of colors that will be used by the plot command. This must be called before creating the :class:`Axes` to which it will apply; it will apply to all future axes. *clist* is a sequence of mpl color specifiers """ _process_plot_var_args.defaultColors = clist[:] rcParams['lines.color'] = clist[0] class _process_plot_var_args: """ Process variable length arguments to the plot command, so that plot commands like the following are supported:: plot(t, s) plot(t1, s1, t2, s2) plot(t1, s1, 'ko', t2, s2) plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3) an arbitrary number of *x*, *y*, *fmt* are allowed """ defaultColors = ['b','g','r','c','m','y','k'] def __init__(self, axes, command='plot'): self.axes = axes self.command = command self._clear_color_cycle() def _clear_color_cycle(self): self.colors = _process_plot_var_args.defaultColors[:] # if the default line color is a color format string, move it up # in the que try: ind = self.colors.index(rcParams['lines.color']) except ValueError: self.firstColor = rcParams['lines.color'] else: self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0] self.firstColor = self.colors[0] self.Ncolors = len(self.colors) self.count = 0 def set_color_cycle(self, clist): self.colors = clist[:] self.firstColor = self.colors[0] self.Ncolors = len(self.colors) self.count = 0 def _get_next_cycle_color(self): if self.count==0: color = self.firstColor else: color = self.colors[int(self.count % self.Ncolors)] self.count += 1 return color def __call__(self, *args, **kwargs): if self.axes.xaxis is not None and self.axes.yaxis is not None: xunits = kwargs.pop( 'xunits', self.axes.xaxis.units) yunits = kwargs.pop( 'yunits', self.axes.yaxis.units) if xunits!=self.axes.xaxis.units: self.axes.xaxis.set_units(xunits) if yunits!=self.axes.yaxis.units: self.axes.yaxis.set_units(yunits) ret = self._grab_next_args(*args, **kwargs) return ret def set_lineprops(self, line, **kwargs): assert self.command == 'plot', 'set_lineprops only works with "plot"' for key, val in kwargs.items(): funcName = "set_%s"%key if not hasattr(line,funcName): raise TypeError, 'There is no line property "%s"'%key func = getattr(line,funcName) func(val) def set_patchprops(self, fill_poly, **kwargs): assert self.command == 'fill', 'set_patchprops only works with "fill"' for key, val in kwargs.items(): funcName = "set_%s"%key if not hasattr(fill_poly,funcName): raise TypeError, 'There is no patch property "%s"'%key func = getattr(fill_poly,funcName) func(val) def _xy_from_y(self, y): if self.axes.yaxis is not None: b = self.axes.yaxis.update_units(y) if b: return np.arange(len(y)), y, False if not ma.isMaskedArray(y): y = np.asarray(y) if len(y.shape) == 1: y = y[:,np.newaxis] nr, nc = y.shape x = np.arange(nr) if len(x.shape) == 1: x = x[:,np.newaxis] return x,y, True def _xy_from_xy(self, x, y): if self.axes.xaxis is not None and self.axes.yaxis is not None: bx = self.axes.xaxis.update_units(x) by = self.axes.yaxis.update_units(y) # right now multicol is not supported if either x or y are # unit enabled but this can be fixed.. if bx or by: return x, y, False x = ma.asarray(x) y = ma.asarray(y) if len(x.shape) == 1: x = x[:,np.newaxis] if len(y.shape) == 1: y = y[:,np.newaxis] nrx, ncx = x.shape nry, ncy = y.shape assert nrx == nry, 'Dimensions of x and y are incompatible' if ncx == ncy: return x, y, True if ncx == 1: x = np.repeat(x, ncy, axis=1) if ncy == 1: y = np.repeat(y, ncx, axis=1) assert x.shape == y.shape, 'Dimensions of x and y are incompatible' return x, y, True def _plot_1_arg(self, y, **kwargs): assert self.command == 'plot', 'fill needs at least 2 arguments' ret = [] x, y, multicol = self._xy_from_y(y) if multicol: for j in xrange(y.shape[1]): color = self._get_next_cycle_color() seg = mlines.Line2D(x, y[:,j], color = color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) else: color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color = color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) return ret def _plot_2_args(self, tup2, **kwargs): ret = [] if is_string_like(tup2[1]): assert self.command == 'plot', ('fill needs at least 2 non-string ' 'arguments') y, fmt = tup2 x, y, multicol = self._xy_from_y(y) linestyle, marker, color = _process_plot_format(fmt) def makeline(x, y): _color = color if _color is None: _color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=_color, linestyle=linestyle, marker=marker, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) if multicol: for j in xrange(y.shape[1]): makeline(x[:,j], y[:,j]) else: makeline(x, y) return ret else: x, y = tup2 x, y, multicol = self._xy_from_xy(x, y) def makeline(x, y): color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=color, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) def makefill(x, y): x = self.axes.convert_xunits(x) y = self.axes.convert_yunits(y) facecolor = self._get_next_cycle_color() seg = mpatches.Polygon(np.hstack( (x[:,np.newaxis],y[:,np.newaxis])), facecolor = facecolor, fill=True, closed=closed ) self.set_patchprops(seg, **kwargs) ret.append(seg) if self.command == 'plot': func = makeline else: closed = kwargs.get('closed', True) func = makefill if multicol: for j in xrange(y.shape[1]): func(x[:,j], y[:,j]) else: func(x, y) return ret def _plot_3_args(self, tup3, **kwargs): ret = [] x, y, fmt = tup3 x, y, multicol = self._xy_from_xy(x, y) linestyle, marker, color = _process_plot_format(fmt) def makeline(x, y): _color = color if _color is None: _color = self._get_next_cycle_color() seg = mlines.Line2D(x, y, color=_color, linestyle=linestyle, marker=marker, axes=self.axes, ) self.set_lineprops(seg, **kwargs) ret.append(seg) def makefill(x, y): facecolor = color x = self.axes.convert_xunits(x) y = self.axes.convert_yunits(y) seg = mpatches.Polygon(np.hstack( (x[:,np.newaxis],y[:,np.newaxis])), facecolor = facecolor, fill=True, closed=closed ) self.set_patchprops(seg, **kwargs) ret.append(seg) if self.command == 'plot': func = makeline else: closed = kwargs.get('closed', True) func = makefill if multicol: for j in xrange(y.shape[1]): func(x[:,j], y[:,j]) else: func(x, y) return ret def _grab_next_args(self, *args, **kwargs): remaining = args while 1: if len(remaining)==0: return if len(remaining)==1: for seg in self._plot_1_arg(remaining[0], **kwargs): yield seg remaining = [] continue if len(remaining)==2: for seg in self._plot_2_args(remaining, **kwargs): yield seg remaining = [] continue if len(remaining)==3: if not is_string_like(remaining[2]): raise ValueError, 'third arg must be a format string' for seg in self._plot_3_args(remaining, **kwargs): yield seg remaining=[] continue if is_string_like(remaining[2]): for seg in self._plot_3_args(remaining[:3], **kwargs): yield seg remaining=remaining[3:] else: for seg in self._plot_2_args(remaining[:2], **kwargs): yield seg remaining=remaining[2:] class Axes(martist.Artist): """ The :class:`Axes` contains most of the figure elements: :class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`, :class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`, :class:`~matplotlib.patches.Polygon`, etc., and sets the coordinate system. The :class:`Axes` instance supports callbacks through a callbacks attribute which is a :class:`~matplotlib.cbook.CallbackRegistry` instance. The events you can connect to are 'xlim_changed' and 'ylim_changed' and the callback will be called with func(*ax*) where *ax* is the :class:`Axes` instance. """ name = "rectilinear" _shared_x_axes = cbook.Grouper() _shared_y_axes = cbook.Grouper() def __str__(self): return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds) def __init__(self, fig, rect, axisbg = None, # defaults to rc axes.facecolor frameon = True, sharex=None, # use Axes instance's xaxis info sharey=None, # use Axes instance's yaxis info label='', **kwargs ): """ Build an :class:`Axes` instance in :class:`~matplotlib.figure.Figure` *fig* with *rect=[left, bottom, width, height]* in :class:`~matplotlib.figure.Figure` coordinates Optional keyword arguments: ================ ========================================= Keyword Description ================ ========================================= *adjustable* [ 'box' | 'datalim' ] *alpha* float: the alpha transparency *anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W' ] *aspect* [ 'auto' | 'equal' | aspect_ratio ] *autoscale_on* [ *True* | *False* ] whether or not to autoscale the *viewlim* *axis_bgcolor* any matplotlib color, see :func:`~matplotlib.pyplot.colors` *axisbelow* draw the grids and ticks below the other artists *cursor_props* a (*float*, *color*) tuple *figure* a :class:`~matplotlib.figure.Figure` instance *frame_on* a boolean - draw the axes frame *label* the axes label *navigate* [ *True* | *False* ] *navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation toolbar button status *position* [left, bottom, width, height] in class:`~matplotlib.figure.Figure` coords *sharex* an class:`~matplotlib.axes.Axes` instance to share the x-axis with *sharey* an class:`~matplotlib.axes.Axes` instance to share the y-axis with *title* the title string *visible* [ *True* | *False* ] whether the axes is visible *xlabel* the xlabel *xlim* (*xmin*, *xmax*) view limits *xscale* [%(scale)s] *xticklabels* sequence of strings *xticks* sequence of floats *ylabel* the ylabel strings *ylim* (*ymin*, *ymax*) view limits *yscale* [%(scale)s] *yticklabels* sequence of strings *yticks* sequence of floats ================ ========================================= """ % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])} martist.Artist.__init__(self) if isinstance(rect, mtransforms.Bbox): self._position = rect else: self._position = mtransforms.Bbox.from_bounds(*rect) self._originalPosition = self._position.frozen() self.set_axes(self) self.set_aspect('auto') self._adjustable = 'box' self.set_anchor('C') self._sharex = sharex self._sharey = sharey if sharex is not None: self._shared_x_axes.join(self, sharex) if sharex._adjustable == 'box': sharex._adjustable = 'datalim' #warnings.warn( # 'shared axes: "adjustable" is being changed to "datalim"') self._adjustable = 'datalim' if sharey is not None: self._shared_y_axes.join(self, sharey) if sharey._adjustable == 'box': sharey._adjustable = 'datalim' #warnings.warn( # 'shared axes: "adjustable" is being changed to "datalim"') self._adjustable = 'datalim' self.set_label(label) self.set_figure(fig) # this call may differ for non-sep axes, eg polar self._init_axis() if axisbg is None: axisbg = rcParams['axes.facecolor'] self._axisbg = axisbg self._frameon = frameon self._axisbelow = rcParams['axes.axisbelow'] self._hold = rcParams['axes.hold'] self._connected = {} # a dict from events to (id, func) self.cla() # funcs used to format x and y - fall back on major formatters self.fmt_xdata = None self.fmt_ydata = None self.set_cursor_props((1,'k')) # set the cursor properties for axes self._cachedRenderer = None self.set_navigate(True) self.set_navigate_mode(None) if len(kwargs): martist.setp(self, **kwargs) if self.xaxis is not None: self._xcid = self.xaxis.callbacks.connect('units finalize', self.relim) if self.yaxis is not None: self._ycid = self.yaxis.callbacks.connect('units finalize', self.relim) def get_window_extent(self, *args, **kwargs): ''' get the axes bounding box in display space; *args* and *kwargs* are empty ''' return self.bbox def _init_axis(self): "move this out of __init__ because non-separable axes don't use it" self.xaxis = maxis.XAxis(self) self.yaxis = maxis.YAxis(self) self._update_transScale() def set_figure(self, fig): """ Set the class:`~matplotlib.axes.Axes` figure accepts a class:`~matplotlib.figure.Figure` instance """ martist.Artist.set_figure(self, fig) self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure) #these will be updated later as data is added self.dataLim = mtransforms.Bbox.unit() self.viewLim = mtransforms.Bbox.unit() self.transScale = mtransforms.TransformWrapper( mtransforms.IdentityTransform()) self._set_lim_and_transforms() def _set_lim_and_transforms(self): """ set the *dataLim* and *viewLim* :class:`~matplotlib.transforms.Bbox` attributes and the *transScale*, *transData*, *transLimits* and *transAxes* transformations. """ self.transAxes = mtransforms.BboxTransformTo(self.bbox) # Transforms the x and y axis separately by a scale factor # It is assumed that this part will have non-linear components self.transScale = mtransforms.TransformWrapper( mtransforms.IdentityTransform()) # An affine transformation on the data, generally to limit the # range of the axes self.transLimits = mtransforms.BboxTransformFrom( mtransforms.TransformedBbox(self.viewLim, self.transScale)) # The parentheses are important for efficiency here -- they # group the last two (which are usually affines) separately # from the first (which, with log-scaling can be non-affine). self.transData = self.transScale + (self.transLimits + self.transAxes) self._xaxis_transform = mtransforms.blended_transform_factory( self.axes.transData, self.axes.transAxes) self._yaxis_transform = mtransforms.blended_transform_factory( self.axes.transAxes, self.axes.transData) def get_xaxis_transform(self): """ Get the transformation used for drawing x-axis labels, ticks and gridlines. The x-direction is in data coordinates and the y-direction is in axis coordinates. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return self._xaxis_transform def get_xaxis_text1_transform(self, pad_points): """ Get the transformation used for drawing x-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._xaxis_transform + mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0, self.figure.dpi_scale_trans), "top", "center") def get_xaxis_text2_transform(self, pad_points): """ Get the transformation used for drawing the secondary x-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in data coordinates and the y-direction is in axis coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._xaxis_transform + mtransforms.ScaledTranslation(0, pad_points / 72.0, self.figure.dpi_scale_trans), "bottom", "center") def get_yaxis_transform(self): """ Get the transformation used for drawing y-axis labels, ticks and gridlines. The x-direction is in axis coordinates and the y-direction is in data coordinates. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return self._yaxis_transform def get_yaxis_text1_transform(self, pad_points): """ Get the transformation used for drawing y-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._yaxis_transform + mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0, self.figure.dpi_scale_trans), "center", "right") def get_yaxis_text2_transform(self, pad_points): """ Get the transformation used for drawing the secondary y-axis labels, which will add the given amount of padding (in points) between the axes and the label. The x-direction is in axis coordinates and the y-direction is in data coordinates. Returns a 3-tuple of the form:: (transform, valign, halign) where *valign* and *halign* are requested alignments for the text. .. note:: This transformation is primarily used by the :class:`~matplotlib.axis.Axis` class, and is meant to be overridden by new kinds of projections that may need to place axis elements in different locations. """ return (self._yaxis_transform + mtransforms.ScaledTranslation(pad_points / 72.0, 0, self.figure.dpi_scale_trans), "center", "left") def _update_transScale(self): self.transScale.set( mtransforms.blended_transform_factory( self.xaxis.get_transform(), self.yaxis.get_transform())) if hasattr(self, "lines"): for line in self.lines: line._transformed_path.invalidate() def get_position(self, original=False): 'Return the a copy of the axes rectangle as a Bbox' if original: return self._originalPosition.frozen() else: return self._position.frozen() def set_position(self, pos, which='both'): """ Set the axes position with:: pos = [left, bottom, width, height] in relative 0,1 coords, or *pos* can be a :class:`~matplotlib.transforms.Bbox` There are two position variables: one which is ultimately used, but which may be modified by :meth:`apply_aspect`, and a second which is the starting point for :meth:`apply_aspect`. Optional keyword arguments: *which* ========== ==================== value description ========== ==================== 'active' to change the first 'original' to change the second 'both' to change both ========== ==================== """ if not isinstance(pos, mtransforms.BboxBase): pos = mtransforms.Bbox.from_bounds(*pos) if which in ('both', 'active'): self._position.set(pos) if which in ('both', 'original'): self._originalPosition.set(pos) def reset_position(self): 'Make the original position the active position' pos = self.get_position(original=True) self.set_position(pos, which='active') def _set_artist_props(self, a): 'set the boilerplate props for artists added to axes' a.set_figure(self.figure) if not a.is_transform_set(): a.set_transform(self.transData) a.set_axes(self) def _gen_axes_patch(self): """ Returns the patch used to draw the background of the axes. It is also used as the clipping path for any data elements on the axes. In the standard axes, this is a rectangle, but in other projections it may not be. .. note:: Intended to be overridden by new projection types. """ return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0) def cla(self): 'Clear the current axes' # Note: this is called by Axes.__init__() self.xaxis.cla() self.yaxis.cla() self.ignore_existing_data_limits = True self.callbacks = cbook.CallbackRegistry(('xlim_changed', 'ylim_changed')) if self._sharex is not None: # major and minor are class instances with # locator and formatter attributes self.xaxis.major = self._sharex.xaxis.major self.xaxis.minor = self._sharex.xaxis.minor x0, x1 = self._sharex.get_xlim() self.set_xlim(x0, x1, emit=False) self.xaxis.set_scale(self._sharex.xaxis.get_scale()) else: self.xaxis.set_scale('linear') if self._sharey is not None: self.yaxis.major = self._sharey.yaxis.major self.yaxis.minor = self._sharey.yaxis.minor y0, y1 = self._sharey.get_ylim() self.set_ylim(y0, y1, emit=False) self.yaxis.set_scale(self._sharey.yaxis.get_scale()) else: self.yaxis.set_scale('linear') self._autoscaleon = True self._update_transScale() # needed? self._get_lines = _process_plot_var_args(self) self._get_patches_for_fill = _process_plot_var_args(self, 'fill') self._gridOn = rcParams['axes.grid'] self.lines = [] self.patches = [] self.texts = [] self.tables = [] self.artists = [] self.images = [] self.legend_ = None self.collections = [] # collection.Collection instances self.grid(self._gridOn) props = font_manager.FontProperties(size=rcParams['axes.titlesize']) self.titleOffsetTrans = mtransforms.ScaledTranslation( 0.0, 5.0 / 72.0, self.figure.dpi_scale_trans) self.title = mtext.Text( x=0.5, y=1.0, text='', fontproperties=props, verticalalignment='bottom', horizontalalignment='center', ) self.title.set_transform(self.transAxes + self.titleOffsetTrans) self.title.set_clip_box(None) self._set_artist_props(self.title) # the patch draws the background of the axes. we want this to # be below the other artists; the axesPatch name is # deprecated. We use the frame to draw the edges so we are # setting the edgecolor to None self.patch = self.axesPatch = self._gen_axes_patch() self.patch.set_figure(self.figure) self.patch.set_facecolor(self._axisbg) self.patch.set_edgecolor('None') self.patch.set_linewidth(0) self.patch.set_transform(self.transAxes) # the frame draws the border around the axes and we want this # above. this is a place holder for a more sophisticated # artist that might just draw a left, bottom frame, or a # centered frame, etc the axesFrame name is deprecated self.frame = self.axesFrame = self._gen_axes_patch() self.frame.set_figure(self.figure) self.frame.set_facecolor('none') self.frame.set_edgecolor(rcParams['axes.edgecolor']) self.frame.set_linewidth(rcParams['axes.linewidth']) self.frame.set_transform(self.transAxes) self.frame.set_zorder(2.5) self.axison = True self.xaxis.set_clip_path(self.patch) self.yaxis.set_clip_path(self.patch) self._shared_x_axes.clean() self._shared_y_axes.clean() def clear(self): 'clear the axes' self.cla() def set_color_cycle(self, clist): """ Set the color cycle for any future plot commands on this Axes. clist is a list of mpl color specifiers. """ self._get_lines.set_color_cycle(clist) def ishold(self): 'return the HOLD status of the axes' return self._hold def hold(self, b=None): """ call signature:: hold(b=None) Set the hold state. If *hold* is *None* (default), toggle the *hold* state. Else set the *hold* state to boolean value *b*. Examples: * toggle hold: >>> hold() * turn hold on: >>> hold(True) * turn hold off >>> hold(False) When hold is True, subsequent plot commands will be added to the current axes. When hold is False, the current axes and figure will be cleared on the next plot command """ if b is None: self._hold = not self._hold else: self._hold = b def get_aspect(self): return self._aspect def set_aspect(self, aspect, adjustable=None, anchor=None): """ *aspect* ======== ================================================ value description ======== ================================================ 'auto' automatic; fill position rectangle with data 'normal' same as 'auto'; deprecated 'equal' same scaling from data to plot units for x and y num a circle will be stretched such that the height is num times the width. aspect=1 is the same as aspect='equal'. ======== ================================================ *adjustable* ========= ============================ value description ========= ============================ 'box' change physical size of axes 'datalim' change xlim or ylim ========= ============================ *anchor* ===== ===================== value description ===== ===================== 'C' centered 'SW' lower left corner 'S' middle of bottom edge 'SE' lower right corner etc. ===== ===================== """ if aspect in ('normal', 'auto'): self._aspect = 'auto' elif aspect == 'equal': self._aspect = 'equal' else: self._aspect = float(aspect) # raise ValueError if necessary if adjustable is not None: self.set_adjustable(adjustable) if anchor is not None: self.set_anchor(anchor) def get_adjustable(self): return self._adjustable def set_adjustable(self, adjustable): """ ACCEPTS: [ 'box' | 'datalim' ] """ if adjustable in ('box', 'datalim'): if self in self._shared_x_axes or self in self._shared_y_axes: if adjustable == 'box': raise ValueError( 'adjustable must be "datalim" for shared axes') self._adjustable = adjustable else: raise ValueError('argument must be "box", or "datalim"') def get_anchor(self): return self._anchor def set_anchor(self, anchor): """ *anchor* ===== ============ value description ===== ============ 'C' Center 'SW' bottom left 'S' bottom 'SE' bottom right 'E' right 'NE' top right 'N' top 'NW' top left 'W' left ===== ============ """ if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2: self._anchor = anchor else: raise ValueError('argument must be among %s' % ', '.join(mtransforms.BBox.coefs.keys())) def get_data_ratio(self): """ Returns the aspect ratio of the raw data. This method is intended to be overridden by new projection types. """ xmin,xmax = self.get_xbound() xsize = max(math.fabs(xmax-xmin), 1e-30) ymin,ymax = self.get_ybound() ysize = max(math.fabs(ymax-ymin), 1e-30) return ysize/xsize def apply_aspect(self, position=None): ''' Use :meth:`_aspect` and :meth:`_adjustable` to modify the axes box or the view limits. ''' if position is None: position = self.get_position(original=True) aspect = self.get_aspect() if aspect == 'auto': self.set_position( position , which='active') return if aspect == 'equal': A = 1 else: A = aspect #Ensure at drawing time that any Axes involved in axis-sharing # does not have its position changed. if self in self._shared_x_axes or self in self._shared_y_axes: if self._adjustable == 'box': self._adjustable = 'datalim' warnings.warn( 'shared axes: "adjustable" is being changed to "datalim"') figW,figH = self.get_figure().get_size_inches() fig_aspect = figH/figW if self._adjustable == 'box': box_aspect = A * self.get_data_ratio() pb = position.frozen() pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect) self.set_position(pb1.anchored(self.get_anchor(), pb), 'active') return # reset active to original in case it had been changed # by prior use of 'box' self.set_position(position, which='active') xmin,xmax = self.get_xbound() xsize = max(math.fabs(xmax-xmin), 1e-30) ymin,ymax = self.get_ybound() ysize = max(math.fabs(ymax-ymin), 1e-30) l,b,w,h = position.bounds box_aspect = fig_aspect * (h/w) data_ratio = box_aspect / A y_expander = (data_ratio*xsize/ysize - 1.0) #print 'y_expander', y_expander # If y_expander > 0, the dy/dx viewLim ratio needs to increase if abs(y_expander) < 0.005: #print 'good enough already' return dL = self.dataLim xr = 1.05 * dL.width yr = 1.05 * dL.height xmarg = xsize - xr ymarg = ysize - yr Ysize = data_ratio * xsize Xsize = ysize / data_ratio Xmarg = Xsize - xr Ymarg = Ysize - yr xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help. ym = 0 #print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax #print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize changex = (self in self._shared_y_axes and self not in self._shared_x_axes) changey = (self in self._shared_x_axes and self not in self._shared_y_axes) if changex and changey: warnings.warn("adjustable='datalim' cannot work with shared " "x and y axes") return if changex: adjust_y = False else: #print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg if xmarg > xm and ymarg > ym: adjy = ((Ymarg > 0 and y_expander < 0) or (Xmarg < 0 and y_expander > 0)) else: adjy = y_expander > 0 #print 'y_expander, adjy', y_expander, adjy adjust_y = changey or adjy #(Ymarg > xmarg) if adjust_y: yc = 0.5*(ymin+ymax) y0 = yc - Ysize/2.0 y1 = yc + Ysize/2.0 self.set_ybound((y0, y1)) #print 'New y0, y1:', y0, y1 #print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize else: xc = 0.5*(xmin+xmax) x0 = xc - Xsize/2.0 x1 = xc + Xsize/2.0 self.set_xbound((x0, x1)) #print 'New x0, x1:', x0, x1 #print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0) def axis(self, *v, **kwargs): ''' Convenience method for manipulating the x and y view limits and the aspect ratio of the plot. *kwargs* are passed on to :meth:`set_xlim` and :meth:`set_ylim` ''' if len(v)==1 and is_string_like(v[0]): s = v[0].lower() if s=='on': self.set_axis_on() elif s=='off': self.set_axis_off() elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'): self.set_autoscale_on(True) self.set_aspect('auto') self.autoscale_view() # self.apply_aspect() if s=='equal': self.set_aspect('equal', adjustable='datalim') elif s == 'scaled': self.set_aspect('equal', adjustable='box', anchor='C') self.set_autoscale_on(False) # Req. by Mark Bakker elif s=='tight': self.autoscale_view(tight=True) self.set_autoscale_on(False) elif s == 'image': self.autoscale_view(tight=True) self.set_autoscale_on(False) self.set_aspect('equal', adjustable='box', anchor='C') else: raise ValueError('Unrecognized string %s to axis; ' 'try on or off' % s) xmin, xmax = self.get_xlim() ymin, ymax = self.get_ylim() return xmin, xmax, ymin, ymax try: v[0] except IndexError: emit = kwargs.get('emit', True) xmin = kwargs.get('xmin', None) xmax = kwargs.get('xmax', None) xmin, xmax = self.set_xlim(xmin, xmax, emit) ymin = kwargs.get('ymin', None) ymax = kwargs.get('ymax', None) ymin, ymax = self.set_ylim(ymin, ymax, emit) return xmin, xmax, ymin, ymax v = v[0] if len(v) != 4: raise ValueError('v must contain [xmin xmax ymin ymax]') self.set_xlim([v[0], v[1]]) self.set_ylim([v[2], v[3]]) return v def get_child_artists(self): """ Return a list of artists the axes contains. .. deprecated:: 0.98 """ raise DeprecationWarning('Use get_children instead') def get_frame(self): 'Return the axes Rectangle frame' warnings.warn('use ax.patch instead', DeprecationWarning) return self.patch def get_legend(self): 'Return the legend.Legend instance, or None if no legend is defined' return self.legend_ def get_images(self): 'return a list of Axes images contained by the Axes' return cbook.silent_list('AxesImage', self.images) def get_lines(self): 'Return a list of lines contained by the Axes' return cbook.silent_list('Line2D', self.lines) def get_xaxis(self): 'Return the XAxis instance' return self.xaxis def get_xgridlines(self): 'Get the x grid lines as a list of Line2D instances' return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines()) def get_xticklines(self): 'Get the xtick lines as a list of Line2D instances' return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines()) def get_yaxis(self): 'Return the YAxis instance' return self.yaxis def get_ygridlines(self): 'Get the y grid lines as a list of Line2D instances' return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines()) def get_yticklines(self): 'Get the ytick lines as a list of Line2D instances' return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines()) #### Adding and tracking artists def has_data(self): '''Return *True* if any artists have been added to axes. This should not be used to determine whether the *dataLim* need to be updated, and may not actually be useful for anything. ''' return ( len(self.collections) + len(self.images) + len(self.lines) + len(self.patches))>0 def add_artist(self, a): 'Add any :class:`~matplotlib.artist.Artist` to the axes' a.set_axes(self) self.artists.append(a) self._set_artist_props(a) a.set_clip_path(self.patch) a._remove_method = lambda h: self.artists.remove(h) def add_collection(self, collection, autolim=True): ''' add a :class:`~matplotlib.collections.Collection` instance to the axes ''' label = collection.get_label() if not label: collection.set_label('collection%d'%len(self.collections)) self.collections.append(collection) self._set_artist_props(collection) collection.set_clip_path(self.patch) if autolim: if collection._paths and len(collection._paths): self.update_datalim(collection.get_datalim(self.transData)) collection._remove_method = lambda h: self.collections.remove(h) def add_line(self, line): ''' Add a :class:`~matplotlib.lines.Line2D` to the list of plot lines ''' self._set_artist_props(line) line.set_clip_path(self.patch) self._update_line_limits(line) if not line.get_label(): line.set_label('_line%d'%len(self.lines)) self.lines.append(line) line._remove_method = lambda h: self.lines.remove(h) def _update_line_limits(self, line): p = line.get_path() if p.vertices.size > 0: self.dataLim.update_from_path(p, self.ignore_existing_data_limits, updatex=line.x_isdata, updatey=line.y_isdata) self.ignore_existing_data_limits = False def add_patch(self, p): """ Add a :class:`~matplotlib.patches.Patch` *p* to the list of axes patches; the clipbox will be set to the Axes clipping box. If the transform is not set, it will be set to :attr:`transData`. """ self._set_artist_props(p) p.set_clip_path(self.patch) self._update_patch_limits(p) self.patches.append(p) p._remove_method = lambda h: self.patches.remove(h) def _update_patch_limits(self, patch): 'update the data limits for patch *p*' # hist can add zero height Rectangles, which is useful to keep # the bins, counts and patches lined up, but it throws off log # scaling. We'll ignore rects with zero height or width in # the auto-scaling if (isinstance(patch, mpatches.Rectangle) and (patch.get_width()==0 or patch.get_height()==0)): return vertices = patch.get_path().vertices if vertices.size > 0: xys = patch.get_patch_transform().transform(vertices) if patch.get_data_transform() != self.transData: transform = (patch.get_data_transform() + self.transData.inverted()) xys = transform.transform(xys) self.update_datalim(xys, updatex=patch.x_isdata, updatey=patch.y_isdata) def add_table(self, tab): ''' Add a :class:`~matplotlib.tables.Table` instance to the list of axes tables ''' self._set_artist_props(tab) self.tables.append(tab) tab.set_clip_path(self.patch) tab._remove_method = lambda h: self.tables.remove(h) def relim(self): 'recompute the data limits based on current artists' # Collections are deliberately not supported (yet); see # the TODO note in artists.py. self.dataLim.ignore(True) self.ignore_existing_data_limits = True for line in self.lines: self._update_line_limits(line) for p in self.patches: self._update_patch_limits(p) def update_datalim(self, xys, updatex=True, updatey=True): 'Update the data lim bbox with seq of xy tups or equiv. 2-D array' # if no data is set currently, the bbox will ignore its # limits and set the bound to be the bounds of the xydata. # Otherwise, it will compute the bounds of it's current data # and the data in xydata if iterable(xys) and not len(xys): return if not ma.isMaskedArray(xys): xys = np.asarray(xys) self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits, updatex=updatex, updatey=updatey) self.ignore_existing_data_limits = False def update_datalim_numerix(self, x, y): 'Update the data lim bbox with seq of xy tups' # if no data is set currently, the bbox will ignore it's # limits and set the bound to be the bounds of the xydata. # Otherwise, it will compute the bounds of it's current data # and the data in xydata if iterable(x) and not len(x): return self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits) self.ignore_existing_data_limits = False def update_datalim_bounds(self, bounds): ''' Update the datalim to include the given :class:`~matplotlib.transforms.Bbox` *bounds* ''' self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds])) def _process_unit_info(self, xdata=None, ydata=None, kwargs=None): 'look for unit *kwargs* and update the axis instances as necessary' if self.xaxis is None or self.yaxis is None: return #print 'processing', self.get_geometry() if xdata is not None: # we only need to update if there is nothing set yet. if not self.xaxis.have_units(): self.xaxis.update_units(xdata) #print '\tset from xdata', self.xaxis.units if ydata is not None: # we only need to update if there is nothing set yet. if not self.yaxis.have_units(): self.yaxis.update_units(ydata) #print '\tset from ydata', self.yaxis.units # process kwargs 2nd since these will override default units if kwargs is not None: xunits = kwargs.pop( 'xunits', self.xaxis.units) if xunits!=self.xaxis.units: #print '\tkw setting xunits', xunits self.xaxis.set_units(xunits) # If the units being set imply a different converter, # we need to update. if xdata is not None: self.xaxis.update_units(xdata) yunits = kwargs.pop('yunits', self.yaxis.units) if yunits!=self.yaxis.units: #print '\tkw setting yunits', yunits self.yaxis.set_units(yunits) # If the units being set imply a different converter, # we need to update. if ydata is not None: self.yaxis.update_units(ydata) def in_axes(self, mouseevent): ''' return *True* if the given *mouseevent* (in display coords) is in the Axes ''' return self.patch.contains(mouseevent)[0] def get_autoscale_on(self): """ Get whether autoscaling is applied on plot commands """ return self._autoscaleon def set_autoscale_on(self, b): """ Set whether autoscaling is applied on plot commands accepts: [ *True* | *False* ] """ self._autoscaleon = b def autoscale_view(self, tight=False, scalex=True, scaley=True): """ autoscale the view limits using the data limits. You can selectively autoscale only a single axis, eg, the xaxis by setting *scaley* to *False*. The autoscaling preserves any axis direction reversal that has already been done. """ # if image data only just use the datalim if not self._autoscaleon: return if scalex: xshared = self._shared_x_axes.get_siblings(self) dl = [ax.dataLim for ax in xshared] bb = mtransforms.BboxBase.union(dl) x0, x1 = bb.intervalx if scaley: yshared = self._shared_y_axes.get_siblings(self) dl = [ax.dataLim for ax in yshared] bb = mtransforms.BboxBase.union(dl) y0, y1 = bb.intervaly if (tight or (len(self.images)>0 and len(self.lines)==0 and len(self.patches)==0)): if scalex: self.set_xbound(x0, x1) if scaley: self.set_ybound(y0, y1) return if scalex: XL = self.xaxis.get_major_locator().view_limits(x0, x1) self.set_xbound(XL) if scaley: YL = self.yaxis.get_major_locator().view_limits(y0, y1) self.set_ybound(YL) #### Drawing def draw(self, renderer=None, inframe=False): "Draw everything (plot lines, axes, labels)" if renderer is None: renderer = self._cachedRenderer if renderer is None: raise RuntimeError('No renderer defined') if not self.get_visible(): return renderer.open_group('axes') self.apply_aspect() # the patch draws the background rectangle -- the frame below # will draw the edges if self.axison and self._frameon: self.patch.draw(renderer) artists = [] if len(self.images)<=1 or renderer.option_image_nocomposite(): for im in self.images: im.draw(renderer) else: # make a composite image blending alpha # list of (mimage.Image, ox, oy) mag = renderer.get_image_magnification() ims = [(im.make_image(mag),0,0) for im in self.images if im.get_visible()] l, b, r, t = self.bbox.extents width = mag*((round(r) + 0.5) - (round(l) - 0.5)) height = mag*((round(t) + 0.5) - (round(b) - 0.5)) im = mimage.from_images(height, width, ims) im.is_grayscale = False l, b, w, h = self.bbox.bounds # composite images need special args so they will not # respect z-order for now renderer.draw_image( round(l), round(b), im, self.bbox, self.patch.get_path(), self.patch.get_transform()) artists.extend(self.collections) artists.extend(self.patches) artists.extend(self.lines) artists.extend(self.texts) artists.extend(self.artists) if self.axison and not inframe: if self._axisbelow: self.xaxis.set_zorder(0.5) self.yaxis.set_zorder(0.5) else: self.xaxis.set_zorder(2.5) self.yaxis.set_zorder(2.5) artists.extend([self.xaxis, self.yaxis]) if not inframe: artists.append(self.title) artists.extend(self.tables) if self.legend_ is not None: artists.append(self.legend_) # the frame draws the edges around the axes patch -- we # decouple these so the patch can be in the background and the # frame in the foreground. if self.axison and self._frameon: artists.append(self.frame) dsu = [ (a.zorder, i, a) for i, a in enumerate(artists) if not a.get_animated() ] dsu.sort() for zorder, i, a in dsu: a.draw(renderer) renderer.close_group('axes') self._cachedRenderer = renderer def draw_artist(self, a): """ This method can only be used after an initial draw which caches the renderer. It is used to efficiently update Axes data (axis ticks, labels, etc are not updated) """ assert self._cachedRenderer is not None a.draw(self._cachedRenderer) def redraw_in_frame(self): """ This method can only be used after an initial draw which caches the renderer. It is used to efficiently update Axes data (axis ticks, labels, etc are not updated) """ assert self._cachedRenderer is not None self.draw(self._cachedRenderer, inframe=True) def get_renderer_cache(self): return self._cachedRenderer def __draw_animate(self): # ignore for now; broken if self._lastRenderer is None: raise RuntimeError('You must first call ax.draw()') dsu = [(a.zorder, a) for a in self.animated.keys()] dsu.sort() renderer = self._lastRenderer renderer.blit() for tmp, a in dsu: a.draw(renderer) #### Axes rectangle characteristics def get_frame_on(self): """ Get whether the axes rectangle patch is drawn """ return self._frameon def set_frame_on(self, b): """ Set whether the axes rectangle patch is drawn ACCEPTS: [ *True* | *False* ] """ self._frameon = b def get_axisbelow(self): """ Get whether axis below is true or not """ return self._axisbelow def set_axisbelow(self, b): """ Set whether the axis ticks and gridlines are above or below most artists ACCEPTS: [ *True* | *False* ] """ self._axisbelow = b def grid(self, b=None, **kwargs): """ call signature:: grid(self, b=None, **kwargs) Set the axes grids on or off; *b* is a boolean If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If *kwargs* are supplied, it is assumed that you want a grid and *b* is thus set to *True* *kawrgs* are used to set the grid line properties, eg:: ax.grid(color='r', linestyle='-', linewidth=2) Valid :class:`~matplotlib.lines.Line2D` kwargs are %(Line2D)s """ if len(kwargs): b = True self.xaxis.grid(b, **kwargs) self.yaxis.grid(b, **kwargs) grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd def ticklabel_format(self, **kwargs): """ Convenience method for manipulating the ScalarFormatter used by default for linear axes. Optional keyword arguments: ============ ===================================== Keyword Description ============ ===================================== *style* [ 'sci' (or 'scientific') | 'plain' ] plain turns off scientific notation *scilimits* (m, n), pair of integers; if *style* is 'sci', scientific notation will be used for numbers outside the range 10`-m`:sup: to 10`n`:sup:. Use (0,0) to include all numbers. *axis* [ 'x' | 'y' | 'both' ] ============ ===================================== Only the major ticks are affected. If the method is called when the :class:`~matplotlib.ticker.ScalarFormatter` is not the :class:`~matplotlib.ticker.Formatter` being used, an :exc:`AttributeError` will be raised. """ style = kwargs.pop('style', '').lower() scilimits = kwargs.pop('scilimits', None) if scilimits is not None: try: m, n = scilimits m+n+1 # check that both are numbers except (ValueError, TypeError): raise ValueError("scilimits must be a sequence of 2 integers") axis = kwargs.pop('axis', 'both').lower() if style[:3] == 'sci': sb = True elif style in ['plain', 'comma']: sb = False if style == 'plain': cb = False else: cb = True raise NotImplementedError, "comma style remains to be added" elif style == '': sb = None else: raise ValueError, "%s is not a valid style value" try: if sb is not None: if axis == 'both' or axis == 'x': self.xaxis.major.formatter.set_scientific(sb) if axis == 'both' or axis == 'y': self.yaxis.major.formatter.set_scientific(sb) if scilimits is not None: if axis == 'both' or axis == 'x': self.xaxis.major.formatter.set_powerlimits(scilimits) if axis == 'both' or axis == 'y': self.yaxis.major.formatter.set_powerlimits(scilimits) except AttributeError: raise AttributeError( "This method only works with the ScalarFormatter.") def set_axis_off(self): """turn off the axis""" self.axison = False def set_axis_on(self): """turn on the axis""" self.axison = True def get_axis_bgcolor(self): 'Return the axis background color' return self._axisbg def set_axis_bgcolor(self, color): """ set the axes background color ACCEPTS: any matplotlib color - see :func:`~matplotlib.pyplot.colors` """ self._axisbg = color self.patch.set_facecolor(color) ### data limits, ticks, tick labels, and formatting def invert_xaxis(self): "Invert the x-axis." left, right = self.get_xlim() self.set_xlim(right, left) def xaxis_inverted(self): 'Returns True if the x-axis is inverted.' left, right = self.get_xlim() return right < left def get_xbound(self): """ Returns the x-axis numerical bounds where:: lowerBound < upperBound """ left, right = self.get_xlim() if left < right: return left, right else: return right, left def set_xbound(self, lower=None, upper=None): """ Set the lower and upper numerical bounds of the x-axis. This method will honor axes inversion regardless of parameter order. """ if upper is None and iterable(lower): lower,upper = lower old_lower,old_upper = self.get_xbound() if lower is None: lower = old_lower if upper is None: upper = old_upper if self.xaxis_inverted(): if lower < upper: self.set_xlim(upper, lower) else: self.set_xlim(lower, upper) else: if lower < upper: self.set_xlim(lower, upper) else: self.set_xlim(upper, lower) def get_xlim(self): """ Get the x-axis range [*xmin*, *xmax*] """ return tuple(self.viewLim.intervalx) def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs): """ call signature:: set_xlim(self, *args, **kwargs) Set the limits for the xaxis Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*] Examples:: set_xlim((valmin, valmax)) set_xlim(valmin, valmax) set_xlim(xmin=1) # xmax unchanged set_xlim(xmax=1) # xmin unchanged Keyword arguments: *ymin*: scalar the min of the ylim *ymax*: scalar the max of the ylim *emit*: [ True | False ] notify observers of lim change ACCEPTS: len(2) sequence of floats """ if xmax is None and iterable(xmin): xmin,xmax = xmin self._process_unit_info(xdata=(xmin, xmax)) if xmin is not None: xmin = self.convert_xunits(xmin) if xmax is not None: xmax = self.convert_xunits(xmax) old_xmin,old_xmax = self.get_xlim() if xmin is None: xmin = old_xmin if xmax is None: xmax = old_xmax xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False) xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax) self.viewLim.intervalx = (xmin, xmax) if emit: self.callbacks.process('xlim_changed', self) # Call all of the other x-axes that are shared with this one for other in self._shared_x_axes.get_siblings(self): if other is not self: other.set_xlim(self.viewLim.intervalx, emit=False) if (other.figure != self.figure and other.figure.canvas is not None): other.figure.canvas.draw_idle() return xmin, xmax def get_xscale(self): 'return the xaxis scale string: %s' % ( ", ".join(mscale.get_scale_names())) return self.xaxis.get_scale() def set_xscale(self, value, **kwargs): """ call signature:: set_xscale(value) Set the scaling of the x-axis: %(scale)s ACCEPTS: [%(scale)s] Different kwargs are accepted, depending on the scale: %(scale_docs)s """ self.xaxis.set_scale(value, **kwargs) self.autoscale_view() self._update_transScale() set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % { 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]), 'scale_docs': mscale.get_scale_docs().strip()} def get_xticks(self, minor=False): 'Return the x ticks as a list of locations' return self.xaxis.get_ticklocs(minor=minor) def set_xticks(self, ticks, minor=False): """ Set the x ticks with list of *ticks* ACCEPTS: sequence of floats """ return self.xaxis.set_ticks(ticks, minor=minor) def get_xmajorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_majorticklabels()) def get_xminorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_minorticklabels()) def get_xticklabels(self, minor=False): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text xticklabel', self.xaxis.get_ticklabels(minor=minor)) def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs): """ call signature:: set_xticklabels(labels, fontdict=None, minor=False, **kwargs) Set the xtick labels with list of strings *labels*. Return a list of axis text instances. *kwargs* set the :class:`~matplotlib.text.Text` properties. Valid properties are %(Text)s ACCEPTS: sequence of strings """ return self.xaxis.set_ticklabels(labels, fontdict, minor=minor, **kwargs) set_xticklabels.__doc__ = cbook.dedent( set_xticklabels.__doc__) % martist.kwdocd def invert_yaxis(self): "Invert the y-axis." left, right = self.get_ylim() self.set_ylim(right, left) def yaxis_inverted(self): 'Returns True if the y-axis is inverted.' left, right = self.get_ylim() return right < left def get_ybound(self): "Return y-axis numerical bounds in the form of lowerBound < upperBound" left, right = self.get_ylim() if left < right: return left, right else: return right, left def set_ybound(self, lower=None, upper=None): """Set the lower and upper numerical bounds of the y-axis. This method will honor axes inversion regardless of parameter order. """ if upper is None and iterable(lower): lower,upper = lower old_lower,old_upper = self.get_ybound() if lower is None: lower = old_lower if upper is None: upper = old_upper if self.yaxis_inverted(): if lower < upper: self.set_ylim(upper, lower) else: self.set_ylim(lower, upper) else: if lower < upper: self.set_ylim(lower, upper) else: self.set_ylim(upper, lower) def get_ylim(self): """ Get the y-axis range [*ymin*, *ymax*] """ return tuple(self.viewLim.intervaly) def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs): """ call signature:: set_ylim(self, *args, **kwargs): Set the limits for the yaxis; v = [ymin, ymax]:: set_ylim((valmin, valmax)) set_ylim(valmin, valmax) set_ylim(ymin=1) # ymax unchanged set_ylim(ymax=1) # ymin unchanged Keyword arguments: *ymin*: scalar the min of the ylim *ymax*: scalar the max of the ylim *emit*: [ True | False ] notify observers of lim change Returns the current ylimits as a length 2 tuple ACCEPTS: len(2) sequence of floats """ if ymax is None and iterable(ymin): ymin,ymax = ymin if ymin is not None: ymin = self.convert_yunits(ymin) if ymax is not None: ymax = self.convert_yunits(ymax) old_ymin,old_ymax = self.get_ylim() if ymin is None: ymin = old_ymin if ymax is None: ymax = old_ymax ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False) ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax) self.viewLim.intervaly = (ymin, ymax) if emit: self.callbacks.process('ylim_changed', self) # Call all of the other y-axes that are shared with this one for other in self._shared_y_axes.get_siblings(self): if other is not self: other.set_ylim(self.viewLim.intervaly, emit=False) if (other.figure != self.figure and other.figure.canvas is not None): other.figure.canvas.draw_idle() return ymin, ymax def get_yscale(self): 'return the xaxis scale string: %s' % ( ", ".join(mscale.get_scale_names())) return self.yaxis.get_scale() def set_yscale(self, value, **kwargs): """ call signature:: set_yscale(value) Set the scaling of the y-axis: %(scale)s ACCEPTS: [%(scale)s] Different kwargs are accepted, depending on the scale: %(scale_docs)s """ self.yaxis.set_scale(value, **kwargs) self.autoscale_view() self._update_transScale() set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % { 'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]), 'scale_docs': mscale.get_scale_docs().strip()} def get_yticks(self, minor=False): 'Return the y ticks as a list of locations' return self.yaxis.get_ticklocs(minor=minor) def set_yticks(self, ticks, minor=False): """ Set the y ticks with list of *ticks* ACCEPTS: sequence of floats Keyword arguments: *minor*: [ False | True ] Sets the minor ticks if True """ return self.yaxis.set_ticks(ticks, minor=minor) def get_ymajorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_majorticklabels()) def get_yminorticklabels(self): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_minorticklabels()) def get_yticklabels(self, minor=False): 'Get the xtick labels as a list of Text instances' return cbook.silent_list('Text yticklabel', self.yaxis.get_ticklabels(minor=minor)) def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs): """ call signature:: set_yticklabels(labels, fontdict=None, minor=False, **kwargs) Set the ytick labels with list of strings *labels*. Return a list of :class:`~matplotlib.text.Text` instances. *kwargs* set :class:`~matplotlib.text.Text` properties for the labels. Valid properties are %(Text)s ACCEPTS: sequence of strings """ return self.yaxis.set_ticklabels(labels, fontdict, minor=minor, **kwargs) set_yticklabels.__doc__ = cbook.dedent( set_yticklabels.__doc__) % martist.kwdocd def xaxis_date(self, tz=None): """Sets up x-axis ticks and labels that treat the x data as dates. *tz* is the time zone to use in labeling dates. Defaults to rc value. """ xmin, xmax = self.dataLim.intervalx if xmin==0.: # no data has been added - let's set the default datalim. # We should probably use a better proxy for the datalim # have been updated than the ignore setting dmax = today = datetime.date.today() dmin = today-datetime.timedelta(days=10) self._process_unit_info(xdata=(dmin, dmax)) dmin, dmax = self.convert_xunits([dmin, dmax]) self.viewLim.intervalx = dmin, dmax self.dataLim.intervalx = dmin, dmax locator = self.xaxis.get_major_locator() if not isinstance(locator, mdates.DateLocator): locator = mdates.AutoDateLocator(tz) self.xaxis.set_major_locator(locator) # the autolocator uses the viewlim to pick the right date # locator, but it may not have correct viewlim before an # autoscale. If the viewlim is still zero..1, set it to the # datalim and the autoscaler will update it on request if self.viewLim.intervalx[0]==0.: self.viewLim.intervalx = tuple(self.dataLim.intervalx) locator.refresh() formatter = self.xaxis.get_major_formatter() if not isinstance(formatter, mdates.DateFormatter): formatter = mdates.AutoDateFormatter(locator, tz) self.xaxis.set_major_formatter(formatter) def yaxis_date(self, tz=None): """Sets up y-axis ticks and labels that treat the y data as dates. *tz* is the time zone to use in labeling dates. Defaults to rc value. """ ymin, ymax = self.dataLim.intervaly if ymin==0.: # no data has been added - let's set the default datalim. # We should probably use a better proxy for the datalim # have been updated than the ignore setting dmax = today = datetime.date.today() dmin = today-datetime.timedelta(days=10) self._process_unit_info(ydata=(dmin, dmax)) dmin, dmax = self.convert_yunits([dmin, dmax]) self.viewLim.intervaly = dmin, dmax self.dataLim.intervaly = dmin, dmax locator = self.yaxis.get_major_locator() if not isinstance(locator, mdates.DateLocator): locator = mdates.AutoDateLocator(tz) self.yaxis.set_major_locator(locator) # the autolocator uses the viewlim to pick the right date # locator, but it may not have correct viewlim before an # autoscale. If the viewlim is still zero..1, set it to the # datalim and the autoscaler will update it on request if self.viewLim.intervaly[0]==0.: self.viewLim.intervaly = tuple(self.dataLim.intervaly) locator.refresh() formatter = self.xaxis.get_major_formatter() if not isinstance(formatter, mdates.DateFormatter): formatter = mdates.AutoDateFormatter(locator, tz) self.yaxis.set_major_formatter(formatter) def format_xdata(self, x): """ Return *x* string formatted. This function will use the attribute self.fmt_xdata if it is callable, else will fall back on the xaxis major formatter """ try: return self.fmt_xdata(x) except TypeError: func = self.xaxis.get_major_formatter().format_data_short val = func(x) return val def format_ydata(self, y): """ Return y string formatted. This function will use the :attr:`fmt_ydata` attribute if it is callable, else will fall back on the yaxis major formatter """ try: return self.fmt_ydata(y) except TypeError: func = self.yaxis.get_major_formatter().format_data_short val = func(y) return val def format_coord(self, x, y): 'return a format string formatting the *x*, *y* coord' if x is None: x = '???' if y is None: y = '???' xs = self.format_xdata(x) ys = self.format_ydata(y) return 'x=%s, y=%s'%(xs,ys) #### Interactive manipulation def can_zoom(self): """ Return *True* if this axes support the zoom box """ return True def get_navigate(self): """ Get whether the axes responds to navigation commands """ return self._navigate def set_navigate(self, b): """ Set whether the axes responds to navigation toolbar commands ACCEPTS: [ True | False ] """ self._navigate = b def get_navigate_mode(self): """ Get the navigation toolbar button status: 'PAN', 'ZOOM', or None """ return self._navigate_mode def set_navigate_mode(self, b): """ Set the navigation toolbar button status; .. warning:: this is not a user-API function. """ self._navigate_mode = b def start_pan(self, x, y, button): """ Called when a pan operation has started. *x*, *y* are the mouse coordinates in display coords. button is the mouse button number: * 1: LEFT * 2: MIDDLE * 3: RIGHT .. note:: Intended to be overridden by new projection types. """ self._pan_start = cbook.Bunch( lim = self.viewLim.frozen(), trans = self.transData.frozen(), trans_inverse = self.transData.inverted().frozen(), bbox = self.bbox.frozen(), x = x, y = y ) def end_pan(self): """ Called when a pan operation completes (when the mouse button is up.) .. note:: Intended to be overridden by new projection types. """ del self._pan_start def drag_pan(self, button, key, x, y): """ Called when the mouse moves during a pan operation. *button* is the mouse button number: * 1: LEFT * 2: MIDDLE * 3: RIGHT *key* is a "shift" key *x*, *y* are the mouse coordinates in display coords. .. note:: Intended to be overridden by new projection types. """ def format_deltas(key, dx, dy): if key=='control': if(abs(dx)>abs(dy)): dy = dx else: dx = dy elif key=='x': dy = 0 elif key=='y': dx = 0 elif key=='shift': if 2*abs(dx) < abs(dy): dx=0 elif 2*abs(dy) < abs(dx): dy=0 elif(abs(dx)>abs(dy)): dy=dy/abs(dy)*abs(dx) else: dx=dx/abs(dx)*abs(dy) return (dx,dy) p = self._pan_start dx = x - p.x dy = y - p.y if dx == 0 and dy == 0: return if button == 1: dx, dy = format_deltas(key, dx, dy) result = p.bbox.translated(-dx, -dy) \ .transformed(p.trans_inverse) elif button == 3: try: dx = -dx / float(self.bbox.width) dy = -dy / float(self.bbox.height) dx, dy = format_deltas(key, dx, dy) if self.get_aspect() != 'auto': dx = 0.5 * (dx + dy) dy = dx alpha = np.power(10.0, (dx, dy)) start = p.trans_inverse.transform_point((p.x, p.y)) lim_points = p.lim.get_points() result = start + alpha * (lim_points - start) result = mtransforms.Bbox(result) except OverflowError: warnings.warn('Overflow while panning') return self.set_xlim(*result.intervalx) self.set_ylim(*result.intervaly) def get_cursor_props(self): """ return the cursor propertiess as a (*linewidth*, *color*) tuple, where *linewidth* is a float and *color* is an RGBA tuple """ return self._cursorProps def set_cursor_props(self, *args): """ Set the cursor property as:: ax.set_cursor_props(linewidth, color) or:: ax.set_cursor_props((linewidth, color)) ACCEPTS: a (*float*, *color*) tuple """ if len(args)==1: lw, c = args[0] elif len(args)==2: lw, c = args else: raise ValueError('args must be a (linewidth, color) tuple') c =mcolors.colorConverter.to_rgba(c) self._cursorProps = lw, c def connect(self, s, func): """ Register observers to be notified when certain events occur. Register with callback functions with the following signatures. The function has the following signature:: func(ax) # where ax is the instance making the callback. The following events can be connected to: 'xlim_changed','ylim_changed' The connection id is is returned - you can use this with disconnect to disconnect from the axes event """ raise DeprecationWarning('use the callbacks CallbackRegistry instance ' 'instead') def disconnect(self, cid): 'disconnect from the Axes event.' raise DeprecationWarning('use the callbacks CallbackRegistry instance ' 'instead') def get_children(self): 'return a list of child artists' children = [] children.append(self.xaxis) children.append(self.yaxis) children.extend(self.lines) children.extend(self.patches) children.extend(self.texts) children.extend(self.tables) children.extend(self.artists) children.extend(self.images) if self.legend_ is not None: children.append(self.legend_) children.extend(self.collections) children.append(self.title) children.append(self.patch) children.append(self.frame) return children def contains(self,mouseevent): """Test whether the mouse event occured in the axes. Returns T/F, {} """ if callable(self._contains): return self._contains(self,mouseevent) return self.patch.contains(mouseevent) def pick(self, *args): """ call signature:: pick(mouseevent) each child artist will fire a pick event if mouseevent is over the artist and the artist has picker set """ if len(args)>1: raise DeprecationWarning('New pick API implemented -- ' 'see API_CHANGES in the src distribution') martist.Artist.pick(self,args[0]) def __pick(self, x, y, trans=None, among=None): """ Return the artist under point that is closest to the *x*, *y*. If *trans* is *None*, *x*, and *y* are in window coords, (0,0 = lower left). Otherwise, *trans* is a :class:`~matplotlib.transforms.Transform` that specifies the coordinate system of *x*, *y*. The selection of artists from amongst which the pick function finds an artist can be narrowed using the optional keyword argument *among*. If provided, this should be either a sequence of permitted artists or a function taking an artist as its argument and returning a true value if and only if that artist can be selected. Note this algorithm calculates distance to the vertices of the polygon, so if you want to pick a patch, click on the edge! """ # MGDTODO: Needs updating if trans is not None: xywin = trans.transform_point((x,y)) else: xywin = x,y def dist_points(p1, p2): 'return the distance between two points' x1, y1 = p1 x2, y2 = p2 return math.sqrt((x1-x2)**2+(y1-y2)**2) def dist_x_y(p1, x, y): '*x* and *y* are arrays; return the distance to the closest point' x1, y1 = p1 return min(np.sqrt((x-x1)**2+(y-y1)**2)) def dist(a): if isinstance(a, Text): bbox = a.get_window_extent() l,b,w,h = bbox.bounds verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b) xt, yt = zip(*verts) elif isinstance(a, Patch): path = a.get_path() tverts = a.get_transform().transform_path(path) xt, yt = zip(*tverts) elif isinstance(a, mlines.Line2D): xdata = a.get_xdata(orig=False) ydata = a.get_ydata(orig=False) xt, yt = a.get_transform().numerix_x_y(xdata, ydata) return dist_x_y(xywin, np.asarray(xt), np.asarray(yt)) artists = self.lines + self.patches + self.texts if callable(among): artists = filter(test, artists) elif iterable(among): amongd = dict([(k,1) for k in among]) artists = [a for a in artists if a in amongd] elif among is None: pass else: raise ValueError('among must be callable or iterable') if not len(artists): return None ds = [ (dist(a),a) for a in artists] ds.sort() return ds[0][1] #### Labelling def get_title(self): """ Get the title text string. """ return self.title.get_text() def set_title(self, label, fontdict=None, **kwargs): """ call signature:: set_title(label, fontdict=None, **kwargs): Set the title for the axes. kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ default = { 'fontsize':rcParams['axes.titlesize'], 'verticalalignment' : 'bottom', 'horizontalalignment' : 'center' } self.title.set_text(label) self.title.update(default) if fontdict is not None: self.title.update(fontdict) self.title.update(kwargs) return self.title set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd def get_xlabel(self): """ Get the xlabel text string. """ label = self.xaxis.get_label() return label.get_text() def set_xlabel(self, xlabel, fontdict=None, **kwargs): """ call signature:: set_xlabel(xlabel, fontdict=None, **kwargs) Set the label for the xaxis. Valid kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ label = self.xaxis.get_label() label.set_text(xlabel) if fontdict is not None: label.update(fontdict) label.update(kwargs) return label set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd def get_ylabel(self): """ Get the ylabel text string. """ label = self.yaxis.get_label() return label.get_text() def set_ylabel(self, ylabel, fontdict=None, **kwargs): """ call signature:: set_ylabel(ylabel, fontdict=None, **kwargs) Set the label for the yaxis Valid kwargs are Text properties: %(Text)s ACCEPTS: str .. seealso:: :meth:`text`: for information on how override and the optional args work """ label = self.yaxis.get_label() label.set_text(ylabel) if fontdict is not None: label.update(fontdict) label.update(kwargs) return label set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd def text(self, x, y, s, fontdict=None, withdash=False, **kwargs): """ call signature:: text(x, y, s, fontdict=None, **kwargs) Add text in string *s* to axis at location *x*, *y*, data coordinates. Keyword arguments: *fontdict*: A dictionary to override the default text properties. If *fontdict* is *None*, the defaults are determined by your rc parameters. *withdash*: [ False | True ] Creates a :class:`~matplotlib.text.TextWithDash` instance instead of a :class:`~matplotlib.text.Text` instance. Individual keyword arguments can be used to override any given parameter:: text(x, y, s, fontsize=12) The default transform specifies that text is in data coords, alternatively, you can specify text in axis coords (0,0 is lower-left and 1,1 is upper-right). The example below places text in the center of the axes:: text(0.5, 0.5,'matplotlib', horizontalalignment='center', verticalalignment='center', transform = ax.transAxes) You can put a rectangular box around the text instance (eg. to set a background color) by using the keyword *bbox*. *bbox* is a dictionary of :class:`matplotlib.patches.Rectangle` properties. For example:: text(x, y, s, bbox=dict(facecolor='red', alpha=0.5)) Valid kwargs are :class:`matplotlib.text.Text` properties: %(Text)s """ default = { 'verticalalignment' : 'bottom', 'horizontalalignment' : 'left', #'verticalalignment' : 'top', 'transform' : self.transData, } # At some point if we feel confident that TextWithDash # is robust as a drop-in replacement for Text and that # the performance impact of the heavier-weight class # isn't too significant, it may make sense to eliminate # the withdash kwarg and simply delegate whether there's # a dash to TextWithDash and dashlength. if withdash: t = mtext.TextWithDash( x=x, y=y, text=s, ) else: t = mtext.Text( x=x, y=y, text=s, ) self._set_artist_props(t) t.update(default) if fontdict is not None: t.update(fontdict) t.update(kwargs) self.texts.append(t) t._remove_method = lambda h: self.texts.remove(h) #if t.get_clip_on(): t.set_clip_box(self.bbox) if 'clip_on' in kwargs: t.set_clip_box(self.bbox) return t text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd def annotate(self, *args, **kwargs): """ call signature:: annotate(s, xy, xytext=None, xycoords='data', textcoords='data', arrowprops=None, **kwargs) Keyword arguments: %(Annotation)s .. plot:: mpl_examples/pylab_examples/annotation_demo2.py """ a = mtext.Annotation(*args, **kwargs) a.set_transform(mtransforms.IdentityTransform()) self._set_artist_props(a) if kwargs.has_key('clip_on'): a.set_clip_path(self.patch) self.texts.append(a) return a annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd #### Lines and spans def axhline(self, y=0, xmin=0, xmax=1, **kwargs): """ call signature:: axhline(y=0, xmin=0, xmax=1, **kwargs) Axis Horizontal Line Draw a horizontal line at *y* from *xmin* to *xmax*. With the default values of *xmin* = 0 and *xmax* = 1, this line will always span the horizontal extent of the axes, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the horizontal extent is in axes coords: 0=left, 0.5=middle, 1.0=right but the *y* location is in data coordinates. Return value is the :class:`~matplotlib.lines.Line2D` instance. kwargs are the same as kwargs to plot, and can be used to control the line properties. Eg., * draw a thick red hline at *y* = 0 that spans the xrange >>> axhline(linewidth=4, color='r') * draw a default hline at *y* = 1 that spans the xrange >>> axhline(y=1) * draw a default hline at *y* = .5 that spans the the middle half of the xrange >>> axhline(y=.5, xmin=0.25, xmax=0.75) Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`axhspan`: for example plot and source code """ ymin, ymax = self.get_ybound() # We need to strip away the units for comparison with # non-unitized bounds yy = self.convert_yunits( y ) scaley = (yy<ymin) or (yy>ymax) trans = mtransforms.blended_transform_factory( self.transAxes, self.transData) l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs) l.x_isdata = False self.add_line(l) self.autoscale_view(scalex=False, scaley=scaley) return l axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd def axvline(self, x=0, ymin=0, ymax=1, **kwargs): """ call signature:: axvline(x=0, ymin=0, ymax=1, **kwargs) Axis Vertical Line Draw a vertical line at *x* from *ymin* to *ymax*. With the default values of *ymin* = 0 and *ymax* = 1, this line will always span the vertical extent of the axes, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the vertical extent is in axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location is in data coordinates. Return value is the :class:`~matplotlib.lines.Line2D` instance. kwargs are the same as kwargs to plot, and can be used to control the line properties. Eg., * draw a thick red vline at *x* = 0 that spans the yrange >>> axvline(linewidth=4, color='r') * draw a default vline at *x* = 1 that spans the yrange >>> axvline(x=1) * draw a default vline at *x* = .5 that spans the the middle half of the yrange >>> axvline(x=.5, ymin=0.25, ymax=0.75) Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`axhspan`: for example plot and source code """ xmin, xmax = self.get_xbound() # We need to strip away the units for comparison with # non-unitized bounds xx = self.convert_xunits( x ) scalex = (xx<xmin) or (xx>xmax) trans = mtransforms.blended_transform_factory( self.transData, self.transAxes) l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs) l.y_isdata = False self.add_line(l) self.autoscale_view(scalex=scalex, scaley=False) return l axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs): """ call signature:: axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs) Axis Horizontal Span. *y* coords are in data units and *x* coords are in axes (relative 0-1) units. Draw a horizontal span (rectangle) from *ymin* to *ymax*. With the default values of *xmin* = 0 and *xmax* = 1, this always spans the xrange, regardless of the xlim settings, even if you change them, eg. with the :meth:`set_xlim` command. That is, the horizontal extent is in axes coords: 0=left, 0.5=middle, 1.0=right but the *y* location is in data coordinates. Return value is a :class:`matplotlib.patches.Polygon` instance. Examples: * draw a gray rectangle from *y* = 0.25-0.75 that spans the horizontal extent of the axes >>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5) Valid kwargs are :class:`~matplotlib.patches.Polygon` properties: %(Polygon)s **Example:** .. plot:: mpl_examples/pylab_examples/axhspan_demo.py """ trans = mtransforms.blended_transform_factory( self.transAxes, self.transData) # process the unit information self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs ) # first we need to strip away the units xmin, xmax = self.convert_xunits( [xmin, xmax] ) ymin, ymax = self.convert_yunits( [ymin, ymax] ) verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin) p = mpatches.Polygon(verts, **kwargs) p.set_transform(trans) p.x_isdata = False self.add_patch(p) return p axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs): """ call signature:: axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs) Axis Vertical Span. *x* coords are in data units and *y* coords are in axes (relative 0-1) units. Draw a vertical span (rectangle) from *xmin* to *xmax*. With the default values of *ymin* = 0 and *ymax* = 1, this always spans the yrange, regardless of the ylim settings, even if you change them, eg. with the :meth:`set_ylim` command. That is, the vertical extent is in axes coords: 0=bottom, 0.5=middle, 1.0=top but the *y* location is in data coordinates. Return value is the :class:`matplotlib.patches.Polygon` instance. Examples: * draw a vertical green translucent rectangle from x=1.25 to 1.55 that spans the yrange of the axes >>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5) Valid kwargs are :class:`~matplotlib.patches.Polygon` properties: %(Polygon)s .. seealso:: :meth:`axhspan`: for example plot and source code """ trans = mtransforms.blended_transform_factory( self.transData, self.transAxes) # process the unit information self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs ) # first we need to strip away the units xmin, xmax = self.convert_xunits( [xmin, xmax] ) ymin, ymax = self.convert_yunits( [ymin, ymax] ) verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)] p = mpatches.Polygon(verts, **kwargs) p.set_transform(trans) p.y_isdata = False self.add_patch(p) return p axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd def hlines(self, y, xmin, xmax, colors='k', linestyles='solid', label='', **kwargs): """ call signature:: hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs) Plot horizontal lines at each *y* from *xmin* to *xmax*. Returns the :class:`~matplotlib.collections.LineCollection` that was added. Required arguments: *y*: a 1-D numpy array or iterable. *xmin* and *xmax*: can be scalars or ``len(x)`` numpy arrays. If they are scalars, then the respective values are constant, else the widths of the lines are determined by *xmin* and *xmax*. Optional keyword arguments: *colors*: a line collections color argument, either a single color or a ``len(y)`` list of colors *linestyles*: [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] **Example:** .. plot:: mpl_examples/pylab_examples/hline_demo.py """ if kwargs.get('fmt') is not None: raise DeprecationWarning('hlines now uses a ' 'collections.LineCollection and not a ' 'list of Line2D to draw; see API_CHANGES') # We do the conversion first since not all unitized data is uniform y = self.convert_yunits( y ) xmin = self.convert_xunits( xmin ) xmax = self.convert_xunits( xmax ) if not iterable(y): y = [y] if not iterable(xmin): xmin = [xmin] if not iterable(xmax): xmax = [xmax] y = np.asarray(y) xmin = np.asarray(xmin) xmax = np.asarray(xmax) if len(xmin)==1: xmin = np.resize( xmin, y.shape ) if len(xmax)==1: xmax = np.resize( xmax, y.shape ) if len(xmin)!=len(y): raise ValueError, 'xmin and y are unequal sized sequences' if len(xmax)!=len(y): raise ValueError, 'xmax and y are unequal sized sequences' verts = [ ((thisxmin, thisy), (thisxmax, thisy)) for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)] coll = mcoll.LineCollection(verts, colors=colors, linestyles=linestyles, label=label) self.add_collection(coll) coll.update(kwargs) minx = min(xmin.min(), xmax.min()) maxx = max(xmin.max(), xmax.max()) miny = y.min() maxy = y.max() corners = (minx, miny), (maxx, maxy) self.update_datalim(corners) self.autoscale_view() return coll hlines.__doc__ = cbook.dedent(hlines.__doc__) def vlines(self, x, ymin, ymax, colors='k', linestyles='solid', label='', **kwargs): """ call signature:: vlines(x, ymin, ymax, color='k', linestyles='solid') Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin* or *ymax* can be scalars or len(*x*) numpy arrays. If they are scalars, then the respective values are constant, else the heights of the lines are determined by *ymin* and *ymax*. *colors* a line collections color args, either a single color or a len(*x*) list of colors *linestyles* one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ] Returns the :class:`matplotlib.collections.LineCollection` that was added. kwargs are :class:`~matplotlib.collections.LineCollection` properties: %(LineCollection)s """ if kwargs.get('fmt') is not None: raise DeprecationWarning('vlines now uses a ' 'collections.LineCollection and not a ' 'list of Line2D to draw; see API_CHANGES') self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs) # We do the conversion first since not all unitized data is uniform x = self.convert_xunits( x ) ymin = self.convert_yunits( ymin ) ymax = self.convert_yunits( ymax ) if not iterable(x): x = [x] if not iterable(ymin): ymin = [ymin] if not iterable(ymax): ymax = [ymax] x = np.asarray(x) ymin = np.asarray(ymin) ymax = np.asarray(ymax) if len(ymin)==1: ymin = np.resize( ymin, x.shape ) if len(ymax)==1: ymax = np.resize( ymax, x.shape ) if len(ymin)!=len(x): raise ValueError, 'ymin and x are unequal sized sequences' if len(ymax)!=len(x): raise ValueError, 'ymax and x are unequal sized sequences' Y = np.array([ymin, ymax]).T verts = [ ((thisx, thisymin), (thisx, thisymax)) for thisx, (thisymin, thisymax) in zip(x,Y)] #print 'creating line collection' coll = mcoll.LineCollection(verts, colors=colors, linestyles=linestyles, label=label) self.add_collection(coll) coll.update(kwargs) minx = min( x ) maxx = max( x ) miny = min( min(ymin), min(ymax) ) maxy = max( max(ymin), max(ymax) ) corners = (minx, miny), (maxx, maxy) self.update_datalim(corners) self.autoscale_view() return coll vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd #### Basic plotting def plot(self, *args, **kwargs): """ Plot lines and/or markers to the :class:`~matplotlib.axes.Axes`. *args* is a variable length argument, allowing for multiple *x*, *y* pairs with an optional format string. For example, each of the following is legal:: plot(x, y) # plot x and y using default line style and color plot(x, y, 'bo') # plot x and y using blue circle markers plot(y) # plot y using x as index array 0..N-1 plot(y, 'r+') # ditto, but with red plusses If *x* and/or *y* is 2-dimensional, then the corresponding columns will be plotted. An arbitrary number of *x*, *y*, *fmt* groups can be specified, as in:: a.plot(x1, y1, 'g^', x2, y2, 'g-') Return value is a list of lines that were added. The following format string characters are accepted to control the line style or marker: ================ =============================== character description ================ =============================== '-' solid line style '--' dashed line style '-.' dash-dot line style ':' dotted line style '.' point marker ',' pixel marker 'o' circle marker 'v' triangle_down marker '^' triangle_up marker '<' triangle_left marker '>' triangle_right marker '1' tri_down marker '2' tri_up marker '3' tri_left marker '4' tri_right marker 's' square marker 'p' pentagon marker '*' star marker 'h' hexagon1 marker 'H' hexagon2 marker '+' plus marker 'x' x marker 'D' diamond marker 'd' thin_diamond marker '|' vline marker '_' hline marker ================ =============================== The following color abbreviations are supported: ========== ======== character color ========== ======== 'b' blue 'g' green 'r' red 'c' cyan 'm' magenta 'y' yellow 'k' black 'w' white ========== ======== In addition, you can specify colors in many weird and wonderful ways, including full names (``'green'``), hex strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or grayscale intensities as a string (``'0.8'``). Of these, the string specifications can be used in place of a ``fmt`` group, but the tuple forms can be used only as ``kwargs``. Line styles and colors are combined in a single format string, as in ``'bo'`` for blue circles. The *kwargs* can be used to set line properties (any property that has a ``set_*`` method). You can use this to set a line label (for auto legends), linewidth, anitialising, marker face color, etc. Here is an example:: plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2) plot([1,2,3], [1,4,9], 'rs', label='line 2') axis([0, 4, 0, 10]) legend() If you make multiple lines with one plot command, the kwargs apply to all those lines, e.g.:: plot(x1, y1, x2, y2, antialised=False) Neither line will be antialiased. You do not need to use format strings, which are just abbreviations. All of the line properties can be controlled by keyword arguments. For example, you can set the color, marker, linestyle, and markercolor with:: plot(x, y, color='green', linestyle='dashed', marker='o', markerfacecolor='blue', markersize=12). See :class:`~matplotlib.lines.Line2D` for details. The kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s kwargs *scalex* and *scaley*, if defined, are passed on to :meth:`~matplotlib.axes.Axes.autoscale_view` to determine whether the *x* and *y* axes are autoscaled; the default is *True*. """ scalex = kwargs.pop( 'scalex', True) scaley = kwargs.pop( 'scaley', True) if not self._hold: self.cla() lines = [] for line in self._get_lines(*args, **kwargs): self.add_line(line) lines.append(line) self.autoscale_view(scalex=scalex, scaley=scaley) return lines plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs): """ call signature:: plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs) Similar to the :func:`~matplotlib.pyplot.plot` command, except the *x* or *y* (or both) data is considered to be dates, and the axis is labeled accordingly. *x* and/or *y* can be a sequence of dates represented as float days since 0001-01-01 UTC. Keyword arguments: *fmt*: string The plot format string. *tz*: [ None | timezone string ] The time zone to use in labeling dates. If *None*, defaults to rc value. *xdate*: [ True | False ] If *True*, the *x*-axis will be labeled with dates. *ydate*: [ False | True ] If *True*, the *y*-axis will be labeled with dates. Note if you are using custom date tickers and formatters, it may be necessary to set the formatters/locators after the call to :meth:`plot_date` since :meth:`plot_date` will set the default tick locator to :class:`matplotlib.ticker.AutoDateLocator` (if the tick locator is not already set to a :class:`matplotlib.ticker.DateLocator` instance) and the default tick formatter to :class:`matplotlib.ticker.AutoDateFormatter` (if the tick formatter is not already set to a :class:`matplotlib.ticker.DateFormatter` instance). Valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :mod:`~matplotlib.dates`: for helper functions :func:`~matplotlib.dates.date2num`, :func:`~matplotlib.dates.num2date` and :func:`~matplotlib.dates.drange`: for help on creating the required floating point dates. """ if not self._hold: self.cla() ret = self.plot(x, y, fmt, **kwargs) if xdate: self.xaxis_date(tz) if ydate: self.yaxis_date(tz) self.autoscale_view() return ret plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd def loglog(self, *args, **kwargs): """ call signature:: loglog(*args, **kwargs) Make a plot with log scaling on the *x* and *y* axis. :func:`~matplotlib.pyplot.loglog` supports all the keyword arguments of :func:`~matplotlib.pyplot.plot` and :meth:`matplotlib.axes.Axes.set_xscale` / :meth:`matplotlib.axes.Axes.set_yscale`. Notable keyword arguments: *basex*/*basey*: scalar > 1 base of the *x*/*y* logarithm *subsx*/*subsy*: [ None | sequence ] the location of the minor *x*/*y* ticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`matplotlib.axes.Axes.set_xscale` / :meth:`matplotlib.axes.Axes.set_yscale` for details The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/log_demo.py """ if not self._hold: self.cla() dx = {'basex': kwargs.pop('basex', 10), 'subsx': kwargs.pop('subsx', None), } dy = {'basey': kwargs.pop('basey', 10), 'subsy': kwargs.pop('subsy', None), } self.set_xscale('log', **dx) self.set_yscale('log', **dy) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd def semilogx(self, *args, **kwargs): """ call signature:: semilogx(*args, **kwargs) Make a plot with log scaling on the *x* axis. :func:`semilogx` supports all the keyword arguments of :func:`~matplotlib.pyplot.plot` and :meth:`matplotlib.axes.Axes.set_xscale`. Notable keyword arguments: *basex*: scalar > 1 base of the *x* logarithm *subsx*: [ None | sequence ] The location of the minor xticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for details. The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`loglog`: For example code and figure """ if not self._hold: self.cla() d = {'basex': kwargs.pop( 'basex', 10), 'subsx': kwargs.pop( 'subsx', None), } self.set_xscale('log', **d) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd def semilogy(self, *args, **kwargs): """ call signature:: semilogy(*args, **kwargs) Make a plot with log scaling on the *y* axis. :func:`semilogy` supports all the keyword arguments of :func:`~matplotlib.pylab.plot` and :meth:`matplotlib.axes.Axes.set_yscale`. Notable keyword arguments: *basey*: scalar > 1 Base of the *y* logarithm *subsy*: [ None | sequence ] The location of the minor yticks; *None* defaults to autosubs, which depend on the number of decades in the plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for details. The remaining valid kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s .. seealso:: :meth:`loglog`: For example code and figure """ if not self._hold: self.cla() d = {'basey': kwargs.pop('basey', 10), 'subsy': kwargs.pop('subsy', None), } self.set_yscale('log', **d) b = self._hold self._hold = True # we've already processed the hold l = self.plot(*args, **kwargs) self._hold = b # restore the hold return l semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd def acorr(self, x, **kwargs): """ call signature:: acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False, maxlags=None, **kwargs) Plot the autocorrelation of *x*. If *normed* = *True*, normalize the data by the autocorrelation at 0-th lag. *x* is detrended by the *detrend* callable (default no normalization). Data are plotted as ``plot(lags, c, **kwargs)`` Return value is a tuple (*lags*, *c*, *line*) where: - *lags* are a length 2*maxlags+1 lag vector - *c* is the 2*maxlags+1 auto correlation vector - *line* is a :class:`~matplotlib.lines.Line2D` instance returned by :meth:`plot` The default *linestyle* is None and the default *marker* is ``'o'``, though these can be overridden with keyword args. The cross correlation is performed with :func:`numpy.correlate` with *mode* = 2. If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines` rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw vertical lines from the origin to the acorr. Otherwise, the plot style is determined by the kwargs, which are :class:`~matplotlib.lines.Line2D` properties. *maxlags* is a positive integer detailing the number of lags to show. The default value of *None* will return all :math:`2 \mathrm{len}(x) - 1` lags. The return value is a tuple (*lags*, *c*, *linecol*, *b*) where - *linecol* is the :class:`~matplotlib.collections.LineCollection` - *b* is the *x*-axis. .. seealso:: :meth:`~matplotlib.axes.Axes.plot` or :meth:`~matplotlib.axes.Axes.vlines`: For documentation on valid kwargs. **Example:** :func:`~matplotlib.pyplot.xcorr` above, and :func:`~matplotlib.pyplot.acorr` below. **Example:** .. plot:: mpl_examples/pylab_examples/xcorr_demo.py """ return self.xcorr(x, x, **kwargs) acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none, usevlines=False, maxlags=None, **kwargs): """ call signature:: xcorr(x, y, normed=False, detrend=mlab.detrend_none, usevlines=False, **kwargs): Plot the cross correlation between *x* and *y*. If *normed* = *True*, normalize the data by the cross correlation at 0-th lag. *x* and y are detrended by the *detrend* callable (default no normalization). *x* and *y* must be equal length. Data are plotted as ``plot(lags, c, **kwargs)`` Return value is a tuple (*lags*, *c*, *line*) where: - *lags* are a length ``2*maxlags+1`` lag vector - *c* is the ``2*maxlags+1`` auto correlation vector - *line* is a :class:`~matplotlib.lines.Line2D` instance returned by :func:`~matplotlib.pyplot.plot`. The default *linestyle* is *None* and the default *marker* is 'o', though these can be overridden with keyword args. The cross correlation is performed with :func:`numpy.correlate` with *mode* = 2. If *usevlines* is *True*: :func:`~matplotlib.pyplot.vlines` rather than :func:`~matplotlib.pyplot.plot` is used to draw vertical lines from the origin to the xcorr. Otherwise the plotstyle is determined by the kwargs, which are :class:`~matplotlib.lines.Line2D` properties. The return value is a tuple (*lags*, *c*, *linecol*, *b*) where *linecol* is the :class:`matplotlib.collections.LineCollection` instance and *b* is the *x*-axis. *maxlags* is a positive integer detailing the number of lags to show. The default value of *None* will return all ``(2*len(x)-1)`` lags. **Example:** :func:`~matplotlib.pyplot.xcorr` above, and :func:`~matplotlib.pyplot.acorr` below. **Example:** .. plot:: mpl_examples/pylab_examples/xcorr_demo.py """ Nx = len(x) if Nx!=len(y): raise ValueError('x and y must be equal length') x = detrend(np.asarray(x)) y = detrend(np.asarray(y)) c = np.correlate(x, y, mode=2) if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y)) if maxlags is None: maxlags = Nx - 1 if maxlags >= Nx or maxlags < 1: raise ValueError('maglags must be None or strictly ' 'positive < %d'%Nx) lags = np.arange(-maxlags,maxlags+1) c = c[Nx-1-maxlags:Nx+maxlags] if usevlines: a = self.vlines(lags, [0], c, **kwargs) b = self.axhline(**kwargs) else: kwargs.setdefault('marker', 'o') kwargs.setdefault('linestyle', 'None') a, = self.plot(lags, c, **kwargs) b = None return lags, c, a, b xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd def legend(self, *args, **kwargs): """ call signature:: legend(*args, **kwargs) Place a legend on the current axes at location *loc*. Labels are a sequence of strings and *loc* can be a string or an integer specifying the legend location. To make a legend with existing lines:: legend() :meth:`legend` by itself will try and build a legend using the label property of the lines/patches/collections. You can set the label of a line by doing:: plot(x, y, label='my data') or:: line.set_label('my data'). If label is set to '_nolegend_', the item will not be shown in legend. To automatically generate the legend from labels:: legend( ('label1', 'label2', 'label3') ) To make a legend for a list of lines and labels:: legend( (line1, line2, line3), ('label1', 'label2', 'label3') ) To make a legend at a given location, using a location argument:: legend( ('label1', 'label2', 'label3'), loc='upper left') or:: legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2) The location codes are =============== ============= Location String Location Code =============== ============= 'best' 0 'upper right' 1 'upper left' 2 'lower left' 3 'lower right' 4 'right' 5 'center left' 6 'center right' 7 'lower center' 8 'upper center' 9 'center' 10 =============== ============= If none of these are locations are suitable, loc can be a 2-tuple giving x,y in axes coords, ie:: loc = 0, 1 # left top loc = 0.5, 0.5 # center Keyword arguments: *isaxes*: [ True | False ] Indicates that this is an axes legend *numpoints*: integer The number of points in the legend line, default is 4 *prop*: [ None | FontProperties ] A :class:`matplotlib.font_manager.FontProperties` instance, or *None* to use rc settings. *pad*: [ None | scalar ] The fractional whitespace inside the legend border, between 0 and 1. If *None*, use rc settings. *markerscale*: [ None | scalar ] The relative size of legend markers vs. original. If *None*, use rc settings. *shadow*: [ None | False | True ] If *True*, draw a shadow behind legend. If *None*, use rc settings. *labelsep*: [ None | scalar ] The vertical space between the legend entries. If *None*, use rc settings. *handlelen*: [ None | scalar ] The length of the legend lines. If *None*, use rc settings. *handletextsep*: [ None | scalar ] The space between the legend line and legend text. If *None*, use rc settings. *axespad*: [ None | scalar ] The border between the axes and legend edge. If *None*, use rc settings. **Example:** .. plot:: mpl_examples/api/legend_demo.py """ def get_handles(): handles = self.lines[:] handles.extend(self.patches) handles.extend([c for c in self.collections if isinstance(c, mcoll.LineCollection)]) handles.extend([c for c in self.collections if isinstance(c, mcoll.RegularPolyCollection)]) return handles if len(args)==0: handles = [] labels = [] for handle in get_handles(): label = handle.get_label() if (label is not None and label != '' and not label.startswith('_')): handles.append(handle) labels.append(label) if len(handles) == 0: warnings.warn("No labeled objects found. " "Use label='...' kwarg on individual plots.") return None elif len(args)==1: # LABELS labels = args[0] handles = [h for h, label in zip(get_handles(), labels)] elif len(args)==2: if is_string_like(args[1]) or isinstance(args[1], int): # LABELS, LOC labels, loc = args handles = [h for h, label in zip(get_handles(), labels)] kwargs['loc'] = loc else: # LINES, LABELS handles, labels = args elif len(args)==3: # LINES, LABELS, LOC handles, labels, loc = args kwargs['loc'] = loc else: raise TypeError('Invalid arguments to legend') handles = cbook.flatten(handles) self.legend_ = mlegend.Legend(self, handles, labels, **kwargs) return self.legend_ #### Specialized plotting def step(self, x, y, *args, **kwargs): ''' call signature:: step(x, y, *args, **kwargs) Make a step plot. Additional keyword args to :func:`step` are the same as those for :func:`~matplotlib.pyplot.plot`. *x* and *y* must be 1-D sequences, and it is assumed, but not checked, that *x* is uniformly increasing. Keyword arguments: *where*: [ 'pre' | 'post' | 'mid' ] If 'pre', the interval from x[i] to x[i+1] has level y[i] If 'post', that interval has level y[i+1] If 'mid', the jumps in *y* occur half-way between the *x*-values. ''' where = kwargs.pop('where', 'pre') if where not in ('pre', 'post', 'mid'): raise ValueError("'where' argument to step must be " "'pre', 'post' or 'mid'") kwargs['linestyle'] = 'steps-' + where return self.plot(x, y, *args, **kwargs) def bar(self, left, height, width=0.8, bottom=None, color=None, edgecolor=None, linewidth=None, yerr=None, xerr=None, ecolor=None, capsize=3, align='edge', orientation='vertical', log=False, **kwargs ): """ call signature:: bar(left, height, width=0.8, bottom=0, color=None, edgecolor=None, linewidth=None, yerr=None, xerr=None, ecolor=None, capsize=3, align='edge', orientation='vertical', log=False) Make a bar plot with rectangles bounded by: *left*, *left* + *width*, *bottom*, *bottom* + *height* (left, right, bottom and top edges) *left*, *height*, *width*, and *bottom* can be either scalars or sequences Return value is a list of :class:`matplotlib.patches.Rectangle` instances. Required arguments: ======== =============================================== Argument Description ======== =============================================== *left* the x coordinates of the left sides of the bars *height* the heights of the bars ======== =============================================== Optional keyword arguments: =============== ========================================== Keyword Description =============== ========================================== *width* the widths of the bars *bottom* the y coordinates of the bottom edges of the bars *color* the colors of the bars *edgecolor* the colors of the bar edges *linewidth* width of bar edges; None means use default linewidth; 0 means don't draw edges. *xerr* if not None, will be used to generate errorbars on the bar chart *yerr* if not None, will be used to generate errorbars on the bar chart *ecolor* specifies the color of any errorbar *capsize* (default 3) determines the length in points of the error bar caps *align* 'edge' (default) | 'center' *orientation* 'vertical' | 'horizontal' *log* [False|True] False (default) leaves the orientation axis as-is; True sets it to log scale =============== ========================================== For vertical bars, *align* = 'edge' aligns bars by their left edges in left, while *align* = 'center' interprets these values as the *x* coordinates of the bar centers. For horizontal bars, *align* = 'edge' aligns bars by their bottom edges in bottom, while *align* = 'center' interprets these values as the *y* coordinates of the bar centers. The optional arguments *color*, *edgecolor*, *linewidth*, *xerr*, and *yerr* can be either scalars or sequences of length equal to the number of bars. This enables you to use bar as the basis for stacked bar charts, or candlestick plots. Other optional kwargs: %(Rectangle)s **Example:** A stacked bar chart. .. plot:: mpl_examples/pylab_examples/bar_stacked.py """ if not self._hold: self.cla() label = kwargs.pop('label', '') def make_iterable(x): if not iterable(x): return [x] else: return x # make them safe to take len() of _left = left left = make_iterable(left) height = make_iterable(height) width = make_iterable(width) _bottom = bottom bottom = make_iterable(bottom) linewidth = make_iterable(linewidth) adjust_ylim = False adjust_xlim = False if orientation == 'vertical': self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs) if log: self.set_yscale('log') # size width and bottom according to length of left if _bottom is None: if self.get_yscale() == 'log': bottom = [1e-100] adjust_ylim = True else: bottom = [0] nbars = len(left) if len(width) == 1: width *= nbars if len(bottom) == 1: bottom *= nbars elif orientation == 'horizontal': self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs) if log: self.set_xscale('log') # size left and height according to length of bottom if _left is None: if self.get_xscale() == 'log': left = [1e-100] adjust_xlim = True else: left = [0] nbars = len(bottom) if len(left) == 1: left *= nbars if len(height) == 1: height *= nbars else: raise ValueError, 'invalid orientation: %s' % orientation # do not convert to array here as unit info is lost #left = np.asarray(left) #height = np.asarray(height) #width = np.asarray(width) #bottom = np.asarray(bottom) if len(linewidth) < nbars: linewidth *= nbars if color is None: color = [None] * nbars else: color = list(mcolors.colorConverter.to_rgba_array(color)) if len(color) < nbars: color *= nbars if edgecolor is None: edgecolor = [None] * nbars else: edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor)) if len(edgecolor) < nbars: edgecolor *= nbars if yerr is not None: if not iterable(yerr): yerr = [yerr]*nbars if xerr is not None: if not iterable(xerr): xerr = [xerr]*nbars # FIXME: convert the following to proper input validation # raising ValueError; don't use assert for this. assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars assert len(height)==nbars, ("argument 'height' must be %d or scalar" % nbars) assert len(width)==nbars, ("argument 'width' must be %d or scalar" % nbars) assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" % nbars) if yerr is not None and len(yerr)!=nbars: raise ValueError( "bar() argument 'yerr' must be len(%s) or scalar" % nbars) if xerr is not None and len(xerr)!=nbars: raise ValueError( "bar() argument 'xerr' must be len(%s) or scalar" % nbars) patches = [] # lets do some conversions now since some types cannot be # subtracted uniformly if self.xaxis is not None: xconv = self.xaxis.converter if xconv is not None: units = self.xaxis.get_units() left = xconv.convert( left, units ) width = xconv.convert( width, units ) if self.yaxis is not None: yconv = self.yaxis.converter if yconv is not None : units = self.yaxis.get_units() bottom = yconv.convert( bottom, units ) height = yconv.convert( height, units ) if align == 'edge': pass elif align == 'center': if orientation == 'vertical': left = [left[i] - width[i]/2. for i in xrange(len(left))] elif orientation == 'horizontal': bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))] else: raise ValueError, 'invalid alignment: %s' % align args = zip(left, bottom, width, height, color, edgecolor, linewidth) for l, b, w, h, c, e, lw in args: if h<0: b += h h = abs(h) if w<0: l += w w = abs(w) r = mpatches.Rectangle( xy=(l, b), width=w, height=h, facecolor=c, edgecolor=e, linewidth=lw, label=label ) label = '_nolegend_' r.update(kwargs) #print r.get_label(), label, 'label' in kwargs self.add_patch(r) patches.append(r) holdstate = self._hold self.hold(True) # ensure hold is on before plotting errorbars if xerr is not None or yerr is not None: if orientation == 'vertical': # using list comps rather than arrays to preserve unit info x = [l+0.5*w for l, w in zip(left, width)] y = [b+h for b,h in zip(bottom, height)] elif orientation == 'horizontal': # using list comps rather than arrays to preserve unit info x = [l+w for l,w in zip(left, width)] y = [b+0.5*h for b,h in zip(bottom, height)] self.errorbar( x, y, yerr=yerr, xerr=xerr, fmt=None, ecolor=ecolor, capsize=capsize) self.hold(holdstate) # restore previous hold state if adjust_xlim: xmin, xmax = self.dataLim.intervalx xmin = np.amin(width[width!=0]) # filter out the 0 width rects if xerr is not None: xmin = xmin - np.amax(xerr) xmin = max(xmin*0.9, 1e-100) self.dataLim.intervalx = (xmin, xmax) if adjust_ylim: ymin, ymax = self.dataLim.intervaly ymin = np.amin(height[height!=0]) # filter out the 0 height rects if yerr is not None: ymin = ymin - np.amax(yerr) ymin = max(ymin*0.9, 1e-100) self.dataLim.intervaly = (ymin, ymax) self.autoscale_view() return patches bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd def barh(self, bottom, width, height=0.8, left=None, **kwargs): """ call signature:: barh(bottom, width, height=0.8, left=0, **kwargs) Make a horizontal bar plot with rectangles bounded by: *left*, *left* + *width*, *bottom*, *bottom* + *height* (left, right, bottom and top edges) *bottom*, *width*, *height*, and *left* can be either scalars or sequences Return value is a list of :class:`matplotlib.patches.Rectangle` instances. Required arguments: ======== ====================================================== Argument Description ======== ====================================================== *bottom* the vertical positions of the bottom edges of the bars *width* the lengths of the bars ======== ====================================================== Optional keyword arguments: =============== ========================================== Keyword Description =============== ========================================== *height* the heights (thicknesses) of the bars *left* the x coordinates of the left edges of the bars *color* the colors of the bars *edgecolor* the colors of the bar edges *linewidth* width of bar edges; None means use default linewidth; 0 means don't draw edges. *xerr* if not None, will be used to generate errorbars on the bar chart *yerr* if not None, will be used to generate errorbars on the bar chart *ecolor* specifies the color of any errorbar *capsize* (default 3) determines the length in points of the error bar caps *align* 'edge' (default) | 'center' *log* [False|True] False (default) leaves the horizontal axis as-is; True sets it to log scale =============== ========================================== Setting *align* = 'edge' aligns bars by their bottom edges in bottom, while *align* = 'center' interprets these values as the *y* coordinates of the bar centers. The optional arguments *color*, *edgecolor*, *linewidth*, *xerr*, and *yerr* can be either scalars or sequences of length equal to the number of bars. This enables you to use barh as the basis for stacked bar charts, or candlestick plots. other optional kwargs: %(Rectangle)s """ patches = self.bar(left=left, height=height, width=width, bottom=bottom, orientation='horizontal', **kwargs) return patches barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd def broken_barh(self, xranges, yrange, **kwargs): """ call signature:: broken_barh(self, xranges, yrange, **kwargs) A collection of horizontal bars spanning *yrange* with a sequence of *xranges*. Required arguments: ========= ============================== Argument Description ========= ============================== *xranges* sequence of (*xmin*, *xwidth*) *yrange* sequence of (*ymin*, *ywidth*) ========= ============================== kwargs are :class:`matplotlib.collections.BrokenBarHCollection` properties: %(BrokenBarHCollection)s these can either be a single argument, ie:: facecolors = 'black' or a sequence of arguments for the various bars, ie:: facecolors = ('black', 'red', 'green') **Example:** .. plot:: mpl_examples/pylab_examples/broken_barh.py """ col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs) self.add_collection(col, autolim=True) self.autoscale_view() return col broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'): """ call signature:: stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-') A stem plot plots vertical lines (using *linefmt*) at each *x* location from the baseline to *y*, and places a marker there using *markerfmt*. A horizontal line at 0 is is plotted using *basefmt*. Return value is a tuple (*markerline*, *stemlines*, *baseline*). .. seealso:: `this document`__ for details :file:`examples/pylab_examples/stem_plot.py`: for a demo __ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html """ remember_hold=self._hold if not self._hold: self.cla() self.hold(True) markerline, = self.plot(x, y, markerfmt) stemlines = [] for thisx, thisy in zip(x, y): l, = self.plot([thisx,thisx], [0, thisy], linefmt) stemlines.append(l) baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt) self.hold(remember_hold) return markerline, stemlines, baseline def pie(self, x, explode=None, labels=None, colors=None, autopct=None, pctdistance=0.6, shadow=False, labeldistance=1.1): r""" call signature:: pie(x, explode=None, labels=None, colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'), autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False) Make a pie chart of array *x*. The fractional area of each wedge is given by x/sum(x). If sum(x) <= 1, then the values of x give the fractional area directly and the array will not be normalized. Keyword arguments: *explode*: [ None | len(x) sequence ] If not *None*, is a len(*x*) array which specifies the fraction of the radius with which to offset each wedge. *colors*: [ None | color sequence ] A sequence of matplotlib color args through which the pie chart will cycle. *labels*: [ None | len(x) sequence of strings ] A sequence of strings providing the labels for each wedge *autopct*: [ None | format string | format function ] If not *None*, is a string or function used to label the wedges with their numeric value. The label will be placed inside the wedge. If it is a format string, the label will be ``fmt%pct``. If it is a function, it will be called. *pctdistance*: scalar The ratio between the center of each pie slice and the start of the text generated by *autopct*. Ignored if *autopct* is *None*; default is 0.6. *labeldistance*: scalar The radial distance at which the pie labels are drawn *shadow*: [ False | True ] Draw a shadow beneath the pie. The pie chart will probably look best if the figure and axes are square. Eg.:: figure(figsize=(8,8)) ax = axes([0.1, 0.1, 0.8, 0.8]) Return value: If *autopct* is None, return the tuple (*patches*, *texts*): - *patches* is a sequence of :class:`matplotlib.patches.Wedge` instances - *texts* is a list of the label :class:`matplotlib.text.Text` instances. If *autopct* is not *None*, return the tuple (*patches*, *texts*, *autotexts*), where *patches* and *texts* are as above, and *autotexts* is a list of :class:`~matplotlib.text.Text` instances for the numeric labels. """ self.set_frame_on(False) x = np.asarray(x).astype(np.float32) sx = float(x.sum()) if sx>1: x = np.divide(x,sx) if labels is None: labels = ['']*len(x) if explode is None: explode = [0]*len(x) assert(len(x)==len(labels)) assert(len(x)==len(explode)) if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w') center = 0,0 radius = 1 theta1 = 0 i = 0 texts = [] slices = [] autotexts = [] for frac, label, expl in cbook.safezip(x,labels, explode): x, y = center theta2 = theta1 + frac thetam = 2*math.pi*0.5*(theta1+theta2) x += expl*math.cos(thetam) y += expl*math.sin(thetam) w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2, facecolor=colors[i%len(colors)]) slices.append(w) self.add_patch(w) w.set_label(label) if shadow: # make sure to add a shadow after the call to # add_patch so the figure and transform props will be # set shad = mpatches.Shadow(w, -0.02, -0.02, #props={'facecolor':w.get_facecolor()} ) shad.set_zorder(0.9*w.get_zorder()) self.add_patch(shad) xt = x + labeldistance*radius*math.cos(thetam) yt = y + labeldistance*radius*math.sin(thetam) label_alignment = xt > 0 and 'left' or 'right' t = self.text(xt, yt, label, size=rcParams['xtick.labelsize'], horizontalalignment=label_alignment, verticalalignment='center') texts.append(t) if autopct is not None: xt = x + pctdistance*radius*math.cos(thetam) yt = y + pctdistance*radius*math.sin(thetam) if is_string_like(autopct): s = autopct%(100.*frac) elif callable(autopct): s = autopct(100.*frac) else: raise TypeError( 'autopct must be callable or a format string') t = self.text(xt, yt, s, horizontalalignment='center', verticalalignment='center') autotexts.append(t) theta1 = theta2 i += 1 self.set_xlim((-1.25, 1.25)) self.set_ylim((-1.25, 1.25)) self.set_xticks([]) self.set_yticks([]) if autopct is None: return slices, texts else: return slices, texts, autotexts def errorbar(self, x, y, yerr=None, xerr=None, fmt='-', ecolor=None, elinewidth=None, capsize=3, barsabove=False, lolims=False, uplims=False, xlolims=False, xuplims=False, **kwargs): """ call signature:: errorbar(x, y, yerr=None, xerr=None, fmt='-', ecolor=None, elinewidth=None, capsize=3, barsabove=False, lolims=False, uplims=False, xlolims=False, xuplims=False) Plot *x* versus *y* with error deltas in *yerr* and *xerr*. Vertical errorbars are plotted if *yerr* is not *None*. Horizontal errorbars are plotted if *xerr* is not *None*. *x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a single error bar at *x*, *y*. Optional keyword arguments: *xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ] If a scalar number, len(N) array-like object, or an Nx1 array-like object, errorbars are drawn +/- value. If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and +column2 *fmt*: '-' The plot format symbol for *y*. If *fmt* is *None*, just plot the errorbars with no line symbols. This can be useful for creating a bar plot with errorbars. *ecolor*: [ None | mpl color ] a matplotlib color arg which gives the color the errorbar lines; if *None*, use the marker color. *elinewidth*: scalar the linewidth of the errorbar lines. If *None*, use the linewidth. *capsize*: scalar the size of the error bar caps in points *barsabove*: [ True | False ] if *True*, will plot the errorbars above the plot symbols. Default is below. *lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ] These arguments can be used to indicate that a value gives only upper/lower limits. In that case a caret symbol is used to indicate this. lims-arguments may be of the same type as *xerr* and *yerr*. All other keyword arguments are passed on to the plot command for the markers, so you can add additional key=value pairs to control the errorbar markers. For example, this code makes big red squares with thick green edges:: x,y,yerr = rand(3,10) errorbar(x, y, yerr, marker='s', mfc='red', mec='green', ms=20, mew=4) where *mfc*, *mec*, *ms* and *mew* are aliases for the longer property names, *markerfacecolor*, *markeredgecolor*, *markersize* and *markeredgewith*. valid kwargs for the marker properties are %(Line2D)s Return value is a length 3 tuple. The first element is the :class:`~matplotlib.lines.Line2D` instance for the *y* symbol lines. The second element is a list of error bar cap lines, the third element is a list of :class:`~matplotlib.collections.LineCollection` instances for the horizontal and vertical error ranges. **Example:** .. plot:: mpl_examples/pylab_examples/errorbar_demo.py """ self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) if not self._hold: self.cla() # make sure all the args are iterable; use lists not arrays to # preserve units if not iterable(x): x = [x] if not iterable(y): y = [y] if xerr is not None: if not iterable(xerr): xerr = [xerr]*len(x) if yerr is not None: if not iterable(yerr): yerr = [yerr]*len(y) l0 = None if barsabove and fmt is not None: l0, = self.plot(x,y,fmt,**kwargs) barcols = [] caplines = [] lines_kw = {'label':'_nolegend_'} if elinewidth: lines_kw['linewidth'] = elinewidth else: if 'linewidth' in kwargs: lines_kw['linewidth']=kwargs['linewidth'] if 'lw' in kwargs: lines_kw['lw']=kwargs['lw'] if 'transform' in kwargs: lines_kw['transform'] = kwargs['transform'] # arrays fine here, they are booleans and hence not units if not iterable(lolims): lolims = np.asarray([lolims]*len(x), bool) else: lolims = np.asarray(lolims, bool) if not iterable(uplims): uplims = np.array([uplims]*len(x), bool) else: uplims = np.asarray(uplims, bool) if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool) else: xlolims = np.asarray(xlolims, bool) if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool) else: xuplims = np.asarray(xuplims, bool) def xywhere(xs, ys, mask): """ return xs[mask], ys[mask] where mask is True but xs and ys are not arrays """ assert len(xs)==len(ys) assert len(xs)==len(mask) xs = [thisx for thisx, b in zip(xs, mask) if b] ys = [thisy for thisy, b in zip(ys, mask) if b] return xs, ys if capsize > 0: plot_kw = { 'ms':2*capsize, 'label':'_nolegend_'} if 'markeredgewidth' in kwargs: plot_kw['markeredgewidth']=kwargs['markeredgewidth'] if 'mew' in kwargs: plot_kw['mew']=kwargs['mew'] if 'transform' in kwargs: plot_kw['transform'] = kwargs['transform'] if xerr is not None: if (iterable(xerr) and len(xerr)==2 and iterable(xerr[0]) and iterable(xerr[1])): # using list comps rather than arrays to preserve units left = [thisx-thiserr for (thisx, thiserr) in cbook.safezip(x,xerr[0])] right = [thisx+thiserr for (thisx, thiserr) in cbook.safezip(x,xerr[1])] else: # using list comps rather than arrays to preserve units left = [thisx-thiserr for (thisx, thiserr) in cbook.safezip(x,xerr)] right = [thisx+thiserr for (thisx, thiserr) in cbook.safezip(x,xerr)] barcols.append( self.hlines(y, left, right, **lines_kw ) ) if capsize > 0: if xlolims.any(): # can't use numpy logical indexing since left and # y are lists leftlo, ylo = xywhere(left, y, xlolims) caplines.extend( self.plot(leftlo, ylo, ls='None', marker=mlines.CARETLEFT, **plot_kw) ) xlolims = ~xlolims leftlo, ylo = xywhere(left, y, xlolims) caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) ) else: caplines.extend( self.plot(left, y, 'k|', **plot_kw) ) if xuplims.any(): rightup, yup = xywhere(right, y, xuplims) caplines.extend( self.plot(rightup, yup, ls='None', marker=mlines.CARETRIGHT, **plot_kw) ) xuplims = ~xuplims rightup, yup = xywhere(right, y, xuplims) caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) ) else: caplines.extend( self.plot(right, y, 'k|', **plot_kw) ) if yerr is not None: if (iterable(yerr) and len(yerr)==2 and iterable(yerr[0]) and iterable(yerr[1])): # using list comps rather than arrays to preserve units lower = [thisy-thiserr for (thisy, thiserr) in cbook.safezip(y,yerr[0])] upper = [thisy+thiserr for (thisy, thiserr) in cbook.safezip(y,yerr[1])] else: # using list comps rather than arrays to preserve units lower = [thisy-thiserr for (thisy, thiserr) in cbook.safezip(y,yerr)] upper = [thisy+thiserr for (thisy, thiserr) in cbook.safezip(y,yerr)] barcols.append( self.vlines(x, lower, upper, **lines_kw) ) if capsize > 0: if lolims.any(): xlo, lowerlo = xywhere(x, lower, lolims) caplines.extend( self.plot(xlo, lowerlo, ls='None', marker=mlines.CARETDOWN, **plot_kw) ) lolims = ~lolims xlo, lowerlo = xywhere(x, lower, lolims) caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) ) else: caplines.extend( self.plot(x, lower, 'k_', **plot_kw) ) if uplims.any(): xup, upperup = xywhere(x, upper, uplims) caplines.extend( self.plot(xup, upperup, ls='None', marker=mlines.CARETUP, **plot_kw) ) uplims = ~uplims xup, upperup = xywhere(x, upper, uplims) caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) ) else: caplines.extend( self.plot(x, upper, 'k_', **plot_kw) ) if not barsabove and fmt is not None: l0, = self.plot(x,y,fmt,**kwargs) if ecolor is None: if l0 is None: ecolor = self._get_lines._get_next_cycle_color() else: ecolor = l0.get_color() for l in barcols: l.set_color(ecolor) for l in caplines: l.set_color(ecolor) self.autoscale_view() return (l0, caplines, barcols) errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5, positions=None, widths=None): """ call signature:: boxplot(x, notch=0, sym='+', vert=1, whis=1.5, positions=None, widths=None) Make a box and whisker plot for each column of *x* or each vector in sequence *x*. The box extends from the lower to upper quartile values of the data, with a line at the median. The whiskers extend from the box to show the range of the data. Flier points are those past the end of the whiskers. - *notch* = 0 (default) produces a rectangular box plot. - *notch* = 1 will produce a notched box plot *sym* (default 'b+') is the default symbol for flier points. Enter an empty string ('') if you don't want to show fliers. - *vert* = 1 (default) makes the boxes vertical. - *vert* = 0 makes horizontal boxes. This seems goofy, but that's how Matlab did it. *whis* (default 1.5) defines the length of the whiskers as a function of the inner quartile range. They extend to the most extreme data point within ( ``whis*(75%-25%)`` ) data range. *positions* (default 1,2,...,n) sets the horizontal positions of the boxes. The ticks and limits are automatically set to match the positions. *widths* is either a scalar or a vector and sets the width of each box. The default is 0.5, or ``0.15*(distance between extreme positions)`` if that is smaller. *x* is an array or a sequence of vectors. Returns a dictionary mapping each component of the boxplot to a list of the :class:`matplotlib.lines.Line2D` instances created. **Example:** .. plot:: pyplots/boxplot_demo.py """ if not self._hold: self.cla() holdStatus = self._hold whiskers, caps, boxes, medians, fliers = [], [], [], [], [] # convert x to a list of vectors if hasattr(x, 'shape'): if len(x.shape) == 1: if hasattr(x[0], 'shape'): x = list(x) else: x = [x,] elif len(x.shape) == 2: nr, nc = x.shape if nr == 1: x = [x] elif nc == 1: x = [x.ravel()] else: x = [x[:,i] for i in xrange(nc)] else: raise ValueError, "input x can have no more than 2 dimensions" if not hasattr(x[0], '__len__'): x = [x] col = len(x) # get some plot info if positions is None: positions = range(1, col + 1) if widths is None: distance = max(positions) - min(positions) widths = min(0.15*max(distance,1.0), 0.5) if isinstance(widths, float) or isinstance(widths, int): widths = np.ones((col,), float) * widths # loop through columns, adding each to plot self.hold(True) for i,pos in enumerate(positions): d = np.ravel(x[i]) row = len(d) # get median and quartiles q1, med, q3 = mlab.prctile(d,[25,50,75]) # get high extreme iq = q3 - q1 hi_val = q3 + whis*iq wisk_hi = np.compress( d <= hi_val , d ) if len(wisk_hi) == 0: wisk_hi = q3 else: wisk_hi = max(wisk_hi) # get low extreme lo_val = q1 - whis*iq wisk_lo = np.compress( d >= lo_val, d ) if len(wisk_lo) == 0: wisk_lo = q1 else: wisk_lo = min(wisk_lo) # get fliers - if we are showing them flier_hi = [] flier_lo = [] flier_hi_x = [] flier_lo_x = [] if len(sym) != 0: flier_hi = np.compress( d > wisk_hi, d ) flier_lo = np.compress( d < wisk_lo, d ) flier_hi_x = np.ones(flier_hi.shape[0]) * pos flier_lo_x = np.ones(flier_lo.shape[0]) * pos # get x locations for fliers, whisker, whisker cap and box sides box_x_min = pos - widths[i] * 0.5 box_x_max = pos + widths[i] * 0.5 wisk_x = np.ones(2) * pos cap_x_min = pos - widths[i] * 0.25 cap_x_max = pos + widths[i] * 0.25 cap_x = [cap_x_min, cap_x_max] # get y location for median med_y = [med, med] # calculate 'regular' plot if notch == 0: # make our box vectors box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ] box_y = [q1, q1, q3, q3, q1 ] # make our median line vectors med_x = [box_x_min, box_x_max] # calculate 'notch' plot else: notch_max = med + 1.57*iq/np.sqrt(row) notch_min = med - 1.57*iq/np.sqrt(row) if notch_max > q3: notch_max = q3 if notch_min < q1: notch_min = q1 # make our notched box vectors box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max, box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min, box_x_min ] box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max, med, notch_min, q1] # make our median line vectors med_x = [cap_x_min, cap_x_max] med_y = [med, med] # vertical or horizontal plot? if vert: def doplot(*args): return self.plot(*args) else: def doplot(*args): shuffled = [] for i in xrange(0, len(args), 3): shuffled.extend([args[i+1], args[i], args[i+2]]) return self.plot(*shuffled) whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--', wisk_x, [q3, wisk_hi], 'b--')) caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-', cap_x, [wisk_lo, wisk_lo], 'k-')) boxes.extend(doplot(box_x, box_y, 'b-')) medians.extend(doplot(med_x, med_y, 'r-')) fliers.extend(doplot(flier_hi_x, flier_hi, sym, flier_lo_x, flier_lo, sym)) # fix our axes/ticks up a little if 1 == vert: setticks, setlim = self.set_xticks, self.set_xlim else: setticks, setlim = self.set_yticks, self.set_ylim newlimits = min(positions)-0.5, max(positions)+0.5 setlim(newlimits) setticks(positions) # reset hold status self.hold(holdStatus) return dict(whiskers=whiskers, caps=caps, boxes=boxes, medians=medians, fliers=fliers) def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, faceted=True, verts=None, **kwargs): """ call signatures:: scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, verts=None, **kwargs) Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D sequences of the same length, *N*. Keyword arguments: *s*: size in points^2. It is a scalar or an array of the same length as *x* and *y*. *c*: a color. *c* can be a single color format string, or a sequence of color specifications of length *N*, or a sequence of *N* numbers to be mapped to colors using the *cmap* and *norm* specified via kwargs (see below). Note that *c* should not be a single numeric RGB or RGBA sequence because that is indistinguishable from an array of values to be colormapped. *c* can be a 2-D array in which the rows are RGB or RGBA, however. *marker*: can be one of: ===== ============== Value Description ===== ============== 's' square 'o' circle '^' triangle up '>' triangle right 'v' triangle down '<' triangle left 'd' diamond 'p' pentagram 'h' hexagon '8' octagon '+' plus 'x' cross ===== ============== The marker can also be a tuple (*numsides*, *style*, *angle*), which will create a custom, regular symbol. *numsides*: the number of sides *style*: the style of the regular symbol: ===== ============================================= Value Description ===== ============================================= 0 a regular polygon 1 a star-like symbol 2 an asterisk 3 a circle (*numsides* and *angle* is ignored) ===== ============================================= *angle*: the angle of rotation of the symbol Finally, *marker* can be (*verts*, 0): *verts* is a sequence of (*x*, *y*) vertices for a custom scatter symbol. Alternatively, use the kwarg combination *marker* = *None*, *verts* = *verts*. Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in which case all masks will be combined and only unmasked points will be plotted. Other keyword arguments: the color mapping and normalization arguments will be used only if *c* is an array of floats. *cmap*: [ None | Colormap ] A :class:`matplotlib.colors.Colormap` instance. If *None*, defaults to rc ``image.cmap``. *cmap* is only used if *c* is an array of floats. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0, 1. If *None*, use the default :func:`normalize`. *norm* is only used if *c* is an array of floats. *vmin*/*vmax*: *vmin* and *vmax* are used in conjunction with norm to normalize luminance data. If either are None, the min and max of the color array *C* is used. Note if you pass a *norm* instance, your settings for *vmin* and *vmax* will be ignored. *alpha*: 0 <= scalar <= 1 The alpha value for the patches *linewidths*: [ None | scalar | sequence ] If *None*, defaults to (lines.linewidth,). Note that this is a tuple, and if you set the linewidths argument you must set it as a sequence of floats, as required by :class:`~matplotlib.collections.RegularPolyCollection`. Optional kwargs control the :class:`~matplotlib.collections.Collection` properties; in particular: *edgecolors*: 'none' to plot faces with no outlines *facecolors*: 'none' to plot unfilled outlines Here are the standard descriptions of all the :class:`~matplotlib.collections.Collection` kwargs: %(Collection)s A :class:`~matplotlib.collections.Collection` instance is returned. """ if not self._hold: self.cla() syms = { # a dict from symbol to (numsides, angle) 's' : (4,math.pi/4.0,0), # square 'o' : (20,3,0), # circle '^' : (3,0,0), # triangle up '>' : (3,math.pi/2.0,0), # triangle right 'v' : (3,math.pi,0), # triangle down '<' : (3,3*math.pi/2.0,0), # triangle left 'd' : (4,0,0), # diamond 'p' : (5,0,0), # pentagram 'h' : (6,0,0), # hexagon '8' : (8,0,0), # octagon '+' : (4,0,2), # plus 'x' : (4,math.pi/4.0,2) # cross } self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) x, y, s, c = cbook.delete_masked_points(x, y, s, c) if is_string_like(c) or cbook.is_sequence_of_strings(c): colors = mcolors.colorConverter.to_rgba_array(c, alpha) else: sh = np.shape(c) # The inherent ambiguity is resolved in favor of color # mapping, not interpretation as rgb or rgba: if len(sh) == 1 and sh[0] == len(x): colors = None # use cmap, norm after collection is created else: colors = mcolors.colorConverter.to_rgba_array(c, alpha) if not iterable(s): scales = (s,) else: scales = s if faceted: edgecolors = None else: edgecolors = 'none' warnings.warn( '''replace "faceted=False" with "edgecolors='none'"''', DeprecationWarning) #2008/04/18 sym = None symstyle = 0 # to be API compatible if marker is None and not (verts is None): marker = (verts, 0) verts = None if is_string_like(marker): # the standard way to define symbols using a string character sym = syms.get(marker) if sym is None and verts is None: raise ValueError('Unknown marker symbol to scatter') numsides, rotation, symstyle = syms[marker] elif iterable(marker): # accept marker to be: # (numsides, style, [angle]) # or # (verts[], style, [angle]) if len(marker)<2 or len(marker)>3: raise ValueError('Cannot create markersymbol from marker') if cbook.is_numlike(marker[0]): # (numsides, style, [angle]) if len(marker)==2: numsides, rotation = marker[0], 0. elif len(marker)==3: numsides, rotation = marker[0], marker[2] sym = True if marker[1] in (1,2): symstyle = marker[1] else: verts = np.asarray(marker[0]) if sym is not None: if symstyle==0: collection = mcoll.RegularPolyCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==1: collection = mcoll.StarPolygonCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==2: collection = mcoll.AsteriskPolygonCollection( numsides, rotation, scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) elif symstyle==3: collection = mcoll.CircleCollection( scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) else: rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2)) verts /= rescale collection = mcoll.PolyCollection( (verts,), scales, facecolors = colors, edgecolors = edgecolors, linewidths = linewidths, offsets = zip(x,y), transOffset = self.transData, ) collection.set_transform(mtransforms.IdentityTransform()) collection.set_alpha(alpha) collection.update(kwargs) if colors is None: if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_array(np.asarray(c)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() temp_x = x temp_y = y minx = np.amin(temp_x) maxx = np.amax(temp_x) miny = np.amin(temp_y) maxy = np.amax(temp_y) w = maxx-minx h = maxy-miny # the pad is a little hack to deal with the fact that we don't # want to transform all the symbols whose scales are in points # to data coords to get the exact bounding box for efficiency # reasons. It can be done right if this is deemed important padx, pady = 0.05*w, 0.05*h corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady) self.update_datalim( corners) self.autoscale_view() # add the collection last self.add_collection(collection) return collection scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd def hexbin(self, x, y, C = None, gridsize = 100, bins = None, xscale = 'linear', yscale = 'linear', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, edgecolors='none', reduce_C_function = np.mean, **kwargs): """ call signature:: hexbin(x, y, C = None, gridsize = 100, bins = None, xscale = 'linear', yscale = 'linear', cmap=None, norm=None, vmin=None, vmax=None, alpha=1.0, linewidths=None, edgecolors='none' reduce_C_function = np.mean, **kwargs) Make a hexagonal binning plot of *x* versus *y*, where *x*, *y* are 1-D sequences of the same length, *N*. If *C* is None (the default), this is a histogram of the number of occurences of the observations at (x[i],y[i]). If *C* is specified, it specifies values at the coordinate (x[i],y[i]). These values are accumulated for each hexagonal bin and then reduced according to *reduce_C_function*, which defaults to numpy's mean function (np.mean). (If *C* is specified, it must also be a 1-D sequence of the same length as *x* and *y*.) *x*, *y* and/or *C* may be masked arrays, in which case only unmasked points will be plotted. Optional keyword arguments: *gridsize*: [ 100 | integer ] The number of hexagons in the *x*-direction, default is 100. The corresponding number of hexagons in the *y*-direction is chosen such that the hexagons are approximately regular. Alternatively, gridsize can be a tuple with two elements specifying the number of hexagons in the *x*-direction and the *y*-direction. *bins*: [ None | 'log' | integer | sequence ] If *None*, no binning is applied; the color of each hexagon directly corresponds to its count value. If 'log', use a logarithmic scale for the color map. Internally, :math:`log_{10}(i+1)` is used to determine the hexagon color. If an integer, divide the counts in the specified number of bins, and color the hexagons accordingly. If a sequence of values, the values of the lower bound of the bins to be used. *xscale*: [ 'linear' | 'log' ] Use a linear or log10 scale on the horizontal axis. *scale*: [ 'linear' | 'log' ] Use a linear or log10 scale on the vertical axis. Other keyword arguments controlling color mapping and normalization arguments: *cmap*: [ None | Colormap ] a :class:`matplotlib.cm.Colormap` instance. If *None*, defaults to rc ``image.cmap``. *norm*: [ None | Normalize ] :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. *vmin*/*vmax*: scalar *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. Note if you pass a norm instance, your settings for *vmin* and *vmax* will be ignored. *alpha*: scalar the alpha value for the patches *linewidths*: [ None | scalar ] If *None*, defaults to rc lines.linewidth. Note that this is a tuple, and if you set the linewidths argument you must set it as a sequence of floats, as required by :class:`~matplotlib.collections.RegularPolyCollection`. Other keyword arguments controlling the Collection properties: *edgecolors*: [ None | mpl color | color sequence ] If 'none', draws the edges in the same color as the fill color. This is the default, as it avoids unsightly unpainted pixels between the hexagons. If *None*, draws the outlines in the default color. If a matplotlib color arg or sequence of rgba tuples, draws the outlines in the specified color. Here are the standard descriptions of all the :class:`~matplotlib.collections.Collection` kwargs: %(Collection)s The return value is a :class:`~matplotlib.collections.PolyCollection` instance; use :meth:`~matplotlib.collection.PolyCollection.get_array` on this :class:`~matplotlib.collections.PolyCollection` to get the counts in each hexagon. **Example:** .. plot:: mpl_examples/pylab_examples/hexbin_demo.py """ if not self._hold: self.cla() self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs) x, y, C = cbook.delete_masked_points(x, y, C) # Set the size of the hexagon grid if iterable(gridsize): nx, ny = gridsize else: nx = gridsize ny = int(nx/math.sqrt(3)) # Count the number of data in each hexagon x = np.array(x, float) y = np.array(y, float) if xscale=='log': x = np.log10(x) if yscale=='log': y = np.log10(y) xmin = np.amin(x) xmax = np.amax(x) ymin = np.amin(y) ymax = np.amax(y) # In the x-direction, the hexagons exactly cover the region from # xmin to xmax. Need some padding to avoid roundoff errors. padding = 1.e-9 * (xmax - xmin) xmin -= padding xmax += padding sx = (xmax-xmin) / nx sy = (ymax-ymin) / ny x = (x-xmin)/sx y = (y-ymin)/sy ix1 = np.round(x).astype(int) iy1 = np.round(y).astype(int) ix2 = np.floor(x).astype(int) iy2 = np.floor(y).astype(int) nx1 = nx + 1 ny1 = ny + 1 nx2 = nx ny2 = ny n = nx1*ny1+nx2*ny2 d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2 d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2 bdist = (d1<d2) if C is None: accum = np.zeros(n) # Create appropriate views into "accum" array. lattice1 = accum[:nx1*ny1] lattice2 = accum[nx1*ny1:] lattice1.shape = (nx1,ny1) lattice2.shape = (nx2,ny2) for i in xrange(len(x)): if bdist[i]: lattice1[ix1[i], iy1[i]]+=1 else: lattice2[ix2[i], iy2[i]]+=1 else: # create accumulation arrays lattice1 = np.empty((nx1,ny1),dtype=object) for i in xrange(nx1): for j in xrange(ny1): lattice1[i,j] = [] lattice2 = np.empty((nx2,ny2),dtype=object) for i in xrange(nx2): for j in xrange(ny2): lattice2[i,j] = [] for i in xrange(len(x)): if bdist[i]: lattice1[ix1[i], iy1[i]].append( C[i] ) else: lattice2[ix2[i], iy2[i]].append( C[i] ) for i in xrange(nx1): for j in xrange(ny1): vals = lattice1[i,j] if len(vals): lattice1[i,j] = reduce_C_function( vals ) else: lattice1[i,j] = np.nan for i in xrange(nx2): for j in xrange(ny2): vals = lattice2[i,j] if len(vals): lattice2[i,j] = reduce_C_function( vals ) else: lattice2[i,j] = np.nan accum = np.hstack(( lattice1.astype(float).ravel(), lattice2.astype(float).ravel())) good_idxs = ~np.isnan(accum) px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0]) py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0 polygons = np.zeros((6, n, 2), float) polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1) polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1) polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2) polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5 if C is not None: # remove accumulation bins with no data polygons = polygons[:,good_idxs,:] accum = accum[good_idxs] polygons = np.transpose(polygons, axes=[1,0,2]) polygons[:,:,0] *= sx polygons[:,:,1] *= sy polygons[:,:,0] += px polygons[:,:,1] += py if xscale=='log': polygons[:,:,0] = 10**(polygons[:,:,0]) xmin = 10**xmin xmax = 10**xmax self.set_xscale('log') if yscale=='log': polygons[:,:,1] = 10**(polygons[:,:,1]) ymin = 10**ymin ymax = 10**ymax self.set_yscale('log') if edgecolors=='none': edgecolors = 'face' collection = mcoll.PolyCollection( polygons, edgecolors = edgecolors, linewidths = linewidths, transOffset = self.transData, ) # Transform accum if needed if bins=='log': accum = np.log10(accum+1) elif bins!=None: if not iterable(bins): minimum, maximum = min(accum), max(accum) bins-=1 # one less edge than bins bins = minimum + (maximum-minimum)*np.arange(bins)/bins bins = np.sort(bins) accum = bins.searchsorted(accum) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_array(accum) collection.set_cmap(cmap) collection.set_norm(norm) collection.set_alpha(alpha) collection.update(kwargs) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() corners = ((xmin, ymin), (xmax, ymax)) self.update_datalim( corners) self.autoscale_view() # add the collection last self.add_collection(collection) return collection hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd def arrow(self, x, y, dx, dy, **kwargs): """ call signature:: arrow(x, y, dx, dy, **kwargs) Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*, *y* + *dy*). Optional kwargs control the arrow properties: %(FancyArrow)s **Example:** .. plot:: mpl_examples/pylab_examples/arrow_demo.py """ a = mpatches.FancyArrow(x, y, dx, dy, **kwargs) self.add_artist(a) return a arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd def quiverkey(self, *args, **kw): qk = mquiver.QuiverKey(*args, **kw) self.add_artist(qk) return qk quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc def quiver(self, *args, **kw): if not self._hold: self.cla() q = mquiver.Quiver(self, *args, **kw) self.add_collection(q, False) self.update_datalim(q.XY) self.autoscale_view() return q quiver.__doc__ = mquiver.Quiver.quiver_doc def barbs(self, *args, **kw): """ %(barbs_doc)s **Example:** .. plot:: mpl_examples/pylab_examples/barb_demo.py """ if not self._hold: self.cla() b = mquiver.Barbs(self, *args, **kw) self.add_collection(b) self.update_datalim(b.get_offsets()) self.autoscale_view() return b barbs.__doc__ = cbook.dedent(barbs.__doc__) % { 'barbs_doc': mquiver.Barbs.barbs_doc} def fill(self, *args, **kwargs): """ call signature:: fill(*args, **kwargs) Plot filled polygons. *args* is a variable length argument, allowing for multiple *x*, *y* pairs with an optional color format string; see :func:`~matplotlib.pyplot.plot` for details on the argument parsing. For example, to plot a polygon with vertices at *x*, *y* in blue.:: ax.fill(x,y, 'b' ) An arbitrary number of *x*, *y*, *color* groups can be specified:: ax.fill(x1, y1, 'g', x2, y2, 'r') Return value is a list of :class:`~matplotlib.patches.Patch` instances that were added. The same color strings that :func:`~matplotlib.pyplot.plot` supports are supported by the fill format string. If you would like to fill below a curve, eg. shade a region between 0 and *y* along *x*, use :meth:`fill_between` The *closed* kwarg will close the polygon when *True* (default). kwargs control the Polygon properties: %(Polygon)s **Example:** .. plot:: mpl_examples/pylab_examples/fill_demo.py """ if not self._hold: self.cla() patches = [] for poly in self._get_patches_for_fill(*args, **kwargs): self.add_patch( poly ) patches.append( poly ) self.autoscale_view() return patches fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd def fill_between(self, x, y1, y2=0, where=None, **kwargs): """ call signature:: fill_between(x, y1, y2=0, where=None, **kwargs) Create a :class:`~matplotlib.collections.PolyCollection` filling the regions between *y1* and *y2* where ``where==True`` *x* an N length np array of the x data *y1* an N length scalar or np array of the x data *y2* an N length scalar or np array of the x data *where* if None, default to fill between everywhere. If not None, it is a a N length numpy boolean array and the fill will only happen over the regions where ``where==True`` *kwargs* keyword args passed on to the :class:`PolyCollection` kwargs control the Polygon properties: %(PolyCollection)s .. plot:: mpl_examples/pylab_examples/fill_between.py """ # Handle united data, such as dates self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs) self._process_unit_info(ydata=y2) # Convert the arrays so we can work with them x = np.asarray(self.convert_xunits(x)) y1 = np.asarray(self.convert_yunits(y1)) y2 = np.asarray(self.convert_yunits(y2)) if not cbook.iterable(y1): y1 = np.ones_like(x)*y1 if not cbook.iterable(y2): y2 = np.ones_like(x)*y2 if where is None: where = np.ones(len(x), np.bool) where = np.asarray(where) assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where)) polys = [] for ind0, ind1 in mlab.contiguous_regions(where): theseverts = [] xslice = x[ind0:ind1] y1slice = y1[ind0:ind1] y2slice = y2[ind0:ind1] if not len(xslice): continue N = len(xslice) X = np.zeros((2*N+2, 2), np.float) # the purpose of the next two lines is for when y2 is a # scalar like 0 and we want the fill to go all the way # down to 0 even if none of the y1 sample points do X[0] = xslice[0], y2slice[0] X[N+1] = xslice[-1], y2slice[-1] X[1:N+1,0] = xslice X[1:N+1,1] = y1slice X[N+2:,0] = xslice[::-1] X[N+2:,1] = y2slice[::-1] polys.append(X) collection = mcoll.PolyCollection(polys, **kwargs) # now update the datalim and autoscale XY1 = np.array([x[where], y1[where]]).T XY2 = np.array([x[where], y2[where]]).T self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits, updatex=True, updatey=True) self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits, updatex=False, updatey=True) self.add_collection(collection) self.autoscale_view() return collection fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd #### plotting z(x,y): imshow, pcolor and relatives, contour def imshow(self, X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=1.0, vmin=None, vmax=None, origin=None, extent=None, shape=None, filternorm=1, filterrad=4.0, imlim=None, resample=None, url=None, **kwargs): """ call signature:: imshow(X, cmap=None, norm=None, aspect=None, interpolation=None, alpha=1.0, vmin=None, vmax=None, origin=None, extent=None, **kwargs) Display the image in *X* to current axes. *X* may be a float array, a uint8 array or a PIL image. If *X* is an array, *X* can have the following shapes: * MxN -- luminance (grayscale, float array only) * MxNx3 -- RGB (float or uint8 array) * MxNx4 -- RGBA (float or uint8 array) The value for each component of MxNx3 and MxNx4 float arrays should be in the range 0.0 to 1.0; MxN float arrays may be normalised. An :class:`matplotlib.image.AxesImage` instance is returned. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet. If *None*, default to rc ``image.cmap`` value. *cmap* is ignored when *X* has RGB(A) information *aspect*: [ None | 'auto' | 'equal' | scalar ] If 'auto', changes the image aspect ratio to match that of the axes If 'equal', and *extent* is *None*, changes the axes aspect ratio to match that of the image. If *extent* is not *None*, the axes aspect ratio is changed to match that of the extent. If *None*, default to rc ``image.aspect`` value. *interpolation*: Acceptable values are *None*, 'nearest', 'bilinear', 'bicubic', 'spline16', 'spline36', 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian', 'bessel', 'mitchell', 'sinc', 'lanczos', If *interpolation* is *None*, default to rc ``image.interpolation``. See also the *filternorm* and *filterrad* parameters *norm*: [ None | Normalize ] An :class:`matplotlib.colors.Normalize` instance; if *None*, default is ``normalization()``. This scales luminance -> 0-1 *norm* is only used for an MxN float array. *vmin*/*vmax*: [ None | scalar ] Used to scale a luminance image to 0-1. If either is *None*, the min and max of the luminance values will be used. Note if *norm* is not *None*, the settings for *vmin* and *vmax* will be ignored. *alpha*: scalar The alpha blending value, between 0 (transparent) and 1 (opaque) *origin*: [ None | 'upper' | 'lower' ] Place the [0,0] index of the array in the upper left or lower left corner of the axes. If *None*, default to rc ``image.origin``. *extent*: [ None | scalars (left, right, bottom, top) ] Eata values of the axes. The default assigns zero-based row, column indices to the *x*, *y* centers of the pixels. *shape*: [ None | scalars (columns, rows) ] For raw buffer images *filternorm*: A parameter for the antigrain image resize filter. From the antigrain documentation, if *filternorm* = 1, the filter normalizes integer values and corrects the rounding errors. It doesn't do anything with the source floating point values, it corrects only integers according to the rule of 1.0 which means that any sum of pixel weights must be equal to 1.0. So, the filter function must produce a graph of the proper shape. *filterrad*: The filter radius for filters that have a radius parameter, i.e. when interpolation is one of: 'sinc', 'lanczos' or 'blackman' Additional kwargs are :class:`~matplotlib.artist.Artist` properties: %(Artist)s **Example:** .. plot:: mpl_examples/pylab_examples/image_demo.py """ if not self._hold: self.cla() if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) if aspect is None: aspect = rcParams['image.aspect'] self.set_aspect(aspect) im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent, filternorm=filternorm, filterrad=filterrad, resample=resample, **kwargs) im.set_data(X) im.set_alpha(alpha) self._set_artist_props(im) im.set_clip_path(self.patch) #if norm is None and shape is None: # im.set_clim(vmin, vmax) if vmin is not None or vmax is not None: im.set_clim(vmin, vmax) else: im.autoscale_None() im.set_url(url) xmin, xmax, ymin, ymax = im.get_extent() corners = (xmin, ymin), (xmax, ymax) self.update_datalim(corners) if self._autoscaleon: self.set_xlim((xmin, xmax)) self.set_ylim((ymin, ymax)) self.images.append(im) return im imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd def _pcolorargs(self, funcname, *args): if len(args)==1: C = args[0] numRows, numCols = C.shape X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) ) elif len(args)==3: X, Y, C = args else: raise TypeError( 'Illegal arguments to %s; see help(%s)' % (funcname, funcname)) Nx = X.shape[-1] Ny = Y.shape[0] if len(X.shape) <> 2 or X.shape[0] == 1: x = X.reshape(1,Nx) X = x.repeat(Ny, axis=0) if len(Y.shape) <> 2 or Y.shape[1] == 1: y = Y.reshape(Ny, 1) Y = y.repeat(Nx, axis=1) if X.shape != Y.shape: raise TypeError( 'Incompatible X, Y inputs to %s; see help(%s)' % ( funcname, funcname)) return X, Y, C def pcolor(self, *args, **kwargs): """ call signatures:: pcolor(C, **kwargs) pcolor(X, Y, C, **kwargs) Create a pseudocolor plot of a 2-D array. *C* is the array of color values. *X* and *Y*, if given, specify the (*x*, *y*) coordinates of the colored quadrilaterals; the quadrilateral for C[i,j] has corners at:: (X[i, j], Y[i, j]), (X[i, j+1], Y[i, j+1]), (X[i+1, j], Y[i+1, j]), (X[i+1, j+1], Y[i+1, j+1]). Ideally the dimensions of *X* and *Y* should be one greater than those of *C*; if the dimensions are the same, then the last row and column of *C* will be ignored. Note that the the column index corresponds to the *x*-coordinate, and the row index corresponds to *y*; for details, see the :ref:`Grid Orientation <axes-pcolor-grid-orientation>` section below. If either or both of *X* and *Y* are 1-D arrays or column vectors, they will be expanded as needed into the appropriate 2-D arrays, making a rectangular grid. *X*, *Y* and *C* may be masked arrays. If either C[i, j], or one of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j], [i, j+1],[i+1, j+1]) is masked, nothing is plotted. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance. If *None*, use rc settings. norm: [ None | Normalize ] An :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. If *None*, defaults to :func:`normalize`. *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a *norm* instance, *vmin* and *vmax* will be ignored. *shading*: [ 'flat' | 'faceted' ] If 'faceted', a black grid is drawn around each rectangle; if 'flat', edges are not drawn. Default is 'flat', contrary to Matlab(TM). This kwarg is deprecated; please use 'edgecolors' instead: * shading='flat' -- edgecolors='None' * shading='faceted -- edgecolors='k' *edgecolors*: [ None | 'None' | color | color sequence] If *None*, the rc setting is used by default. If 'None', edges will not be visible. An mpl color or sequence of colors will set the edge color *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is a :class:`matplotlib.collection.Collection` instance. .. _axes-pcolor-grid-orientation: The grid orientation follows the Matlab(TM) convention: an array *C* with shape (*nrows*, *ncolumns*) is plotted with the column number as *X* and the row number as *Y*, increasing up; hence it is plotted the way the array would be printed, except that the *Y* axis is reversed. That is, *C* is taken as *C*(*y*, *x*). Similarly for :func:`~matplotlib.pyplot.meshgrid`:: x = np.arange(5) y = np.arange(3) X, Y = meshgrid(x,y) is equivalent to: X = array([[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]]) Y = array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2]]) so if you have:: C = rand( len(x), len(y)) then you need:: pcolor(X, Y, C.T) or:: pcolor(C.T) Matlab :func:`pcolor` always discards the last row and column of *C*, but matplotlib displays the last row and column if *X* and *Y* are not specified, or if *X* and *Y* have one more row and column than *C*. kwargs can be used to control the :class:`~matplotlib.collection.PolyCollection` properties: %(PolyCollection)s """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) shading = kwargs.pop('shading', 'flat') X, Y, C = self._pcolorargs('pcolor', *args) Ny, Nx = X.shape # convert to MA, if necessary. C = ma.asarray(C) X = ma.asarray(X) Y = ma.asarray(Y) mask = ma.getmaskarray(X)+ma.getmaskarray(Y) xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1] # don't plot if C or any of the surrounding vertices are masked. mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask newaxis = np.newaxis compress = np.compress ravelmask = (mask==0).ravel() X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel()) Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel()) X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel()) Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel()) X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel()) Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel()) X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel()) Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel()) npoly = len(X1) xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis], X2[:,newaxis], Y2[:,newaxis], X3[:,newaxis], Y3[:,newaxis], X4[:,newaxis], Y4[:,newaxis], X1[:,newaxis], Y1[:,newaxis]), axis=1) verts = xy.reshape((npoly, 5, 2)) #verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4)) C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel()) if shading == 'faceted': edgecolors = (0,0,0,1), linewidths = (0.25,) else: edgecolors = 'face' linewidths = (1.0,) kwargs.setdefault('edgecolors', edgecolors) kwargs.setdefault('antialiaseds', (0,)) kwargs.setdefault('linewidths', linewidths) collection = mcoll.PolyCollection(verts, **kwargs) collection.set_alpha(alpha) collection.set_array(C) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() self.grid(False) x = X.compressed() y = Y.compressed() minx = np.amin(x) maxx = np.amax(x) miny = np.amin(y) maxy = np.amax(y) corners = (minx, miny), (maxx, maxy) self.update_datalim( corners) self.autoscale_view() self.add_collection(collection) return collection pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd def pcolormesh(self, *args, **kwargs): """ call signatures:: pcolormesh(C) pcolormesh(X, Y, C) pcolormesh(C, **kwargs) *C* may be a masked array, but *X* and *Y* may not. Masked array support is implemented via *cmap* and *norm*; in contrast, :func:`~matplotlib.pyplot.pcolor` simply does not draw quadrilaterals with masked colors or vertices. Keyword arguments: *cmap*: [ None | Colormap ] A :class:`matplotlib.cm.Colormap` instance. If None, use rc settings. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance is used to scale luminance data to 0,1. If None, defaults to :func:`normalize`. *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with *norm* to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a *norm* instance, *vmin* and *vmax* will be ignored. *shading*: [ 'flat' | 'faceted' ] If 'faceted', a black grid is drawn around each rectangle; if 'flat', edges are not drawn. Default is 'flat', contrary to Matlab(TM). This kwarg is deprecated; please use 'edgecolors' instead: * shading='flat' -- edgecolors='None' * shading='faceted -- edgecolors='k' *edgecolors*: [ None | 'None' | color | color sequence] If None, the rc setting is used by default. If 'None', edges will not be visible. An mpl color or sequence of colors will set the edge color *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is a :class:`matplotlib.collection.QuadMesh` object. kwargs can be used to control the :class:`matplotlib.collections.QuadMesh` properties: %(QuadMesh)s .. seealso:: :func:`~matplotlib.pyplot.pcolor`: For an explanation of the grid orientation and the expansion of 1-D *X* and/or *Y* to 2-D arrays. """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) shading = kwargs.pop('shading', 'flat') edgecolors = kwargs.pop('edgecolors', 'None') antialiased = kwargs.pop('antialiased', False) X, Y, C = self._pcolorargs('pcolormesh', *args) Ny, Nx = X.shape # convert to one dimensional arrays C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at # lower left corner X = X.ravel() Y = Y.ravel() coords = np.zeros(((Nx * Ny), 2), dtype=float) coords[:, 0] = X coords[:, 1] = Y if shading == 'faceted' or edgecolors != 'None': showedges = 1 else: showedges = 0 collection = mcoll.QuadMesh( Nx - 1, Ny - 1, coords, showedges, antialiased=antialiased) # kwargs are not used collection.set_alpha(alpha) collection.set_array(C) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) collection.set_cmap(cmap) collection.set_norm(norm) if vmin is not None or vmax is not None: collection.set_clim(vmin, vmax) else: collection.autoscale_None() self.grid(False) minx = np.amin(X) maxx = np.amax(X) miny = np.amin(Y) maxy = np.amax(Y) corners = (minx, miny), (maxx, maxy) self.update_datalim( corners) self.autoscale_view() self.add_collection(collection) return collection pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd def pcolorfast(self, *args, **kwargs): """ pseudocolor plot of a 2-D array Experimental; this is a version of pcolor that does not draw lines, that provides the fastest possible rendering with the Agg backend, and that can handle any quadrilateral grid. Call signatures:: pcolor(C, **kwargs) pcolor(xr, yr, C, **kwargs) pcolor(x, y, C, **kwargs) pcolor(X, Y, C, **kwargs) C is the 2D array of color values corresponding to quadrilateral cells. Let (nr, nc) be its shape. C may be a masked array. ``pcolor(C, **kwargs)`` is equivalent to ``pcolor([0,nc], [0,nr], C, **kwargs)`` *xr*, *yr* specify the ranges of *x* and *y* corresponding to the rectangular region bounding *C*. If:: xr = [x0, x1] and:: yr = [y0,y1] then *x* goes from *x0* to *x1* as the second index of *C* goes from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of cell (0,0), and (*x1*, *y1*) is the outermost corner of cell (*nr*-1, *nc*-1). All cells are rectangles of the same size. This is the fastest version. *x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively, giving the x and y boundaries of the cells. Hence the cells are rectangular but the grid may be nonuniform. The speed is intermediate. (The grid is checked, and if found to be uniform the fast version is used.) *X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify the (x,y) coordinates of the corners of the colored quadrilaterals; the quadrilateral for C[i,j] has corners at (X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]), (X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular. This is the most general, but the slowest to render. It may produce faster and more compact output using ps, pdf, and svg backends, however. Note that the the column index corresponds to the x-coordinate, and the row index corresponds to y; for details, see the "Grid Orientation" section below. Optional keyword arguments: *cmap*: [ None | Colormap ] A cm Colormap instance from cm. If None, use rc settings. *norm*: [ None | Normalize ] An mcolors.Normalize instance is used to scale luminance data to 0,1. If None, defaults to normalize() *vmin*/*vmax*: [ None | scalar ] *vmin* and *vmax* are used in conjunction with norm to normalize luminance data. If either are *None*, the min and max of the color array *C* is used. If you pass a norm instance, *vmin* and *vmax* will be *None*. *alpha*: 0 <= scalar <= 1 the alpha blending value Return value is an image if a regular or rectangular grid is specified, and a QuadMesh collection in the general quadrilateral case. """ if not self._hold: self.cla() alpha = kwargs.pop('alpha', 1.0) norm = kwargs.pop('norm', None) cmap = kwargs.pop('cmap', None) vmin = kwargs.pop('vmin', None) vmax = kwargs.pop('vmax', None) if norm is not None: assert(isinstance(norm, mcolors.Normalize)) if cmap is not None: assert(isinstance(cmap, mcolors.Colormap)) C = args[-1] nr, nc = C.shape if len(args) == 1: style = "image" x = [0, nc] y = [0, nr] elif len(args) == 3: x, y = args[:2] x = np.asarray(x) y = np.asarray(y) if x.ndim == 1 and y.ndim == 1: if x.size == 2 and y.size == 2: style = "image" else: dx = np.diff(x) dy = np.diff(y) if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and np.ptp(dy) < 0.01*np.abs(dy.mean())): style = "image" else: style = "pcolorimage" elif x.ndim == 2 and y.ndim == 2: style = "quadmesh" else: raise TypeError("arguments do not match valid signatures") else: raise TypeError("need 1 argument or 3 arguments") if style == "quadmesh": # convert to one dimensional arrays # This should also be moved to the QuadMesh class C = ma.ravel(C) # data point in each cell is value # at lower left corner X = x.ravel() Y = y.ravel() Nx = nc+1 Ny = nr+1 # The following needs to be cleaned up; the renderer # requires separate contiguous arrays for X and Y, # but the QuadMesh class requires the 2D array. coords = np.empty(((Nx * Ny), 2), np.float64) coords[:, 0] = X coords[:, 1] = Y # The QuadMesh class can also be changed to # handle relevant superclass kwargs; the initializer # should do much more than it does now. collection = mcoll.QuadMesh(nc, nr, coords, 0) collection.set_alpha(alpha) collection.set_array(C) collection.set_cmap(cmap) collection.set_norm(norm) self.add_collection(collection) xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max() ret = collection else: # One of the image styles: xl, xr, yb, yt = x[0], x[-1], y[0], y[-1] if style == "image": im = mimage.AxesImage(self, cmap, norm, interpolation='nearest', origin='lower', extent=(xl, xr, yb, yt), **kwargs) im.set_data(C) im.set_alpha(alpha) self.images.append(im) ret = im if style == "pcolorimage": im = mimage.PcolorImage(self, x, y, C, cmap=cmap, norm=norm, alpha=alpha, **kwargs) self.images.append(im) ret = im self._set_artist_props(ret) if vmin is not None or vmax is not None: ret.set_clim(vmin, vmax) else: ret.autoscale_None() self.update_datalim(np.array([[xl, yb], [xr, yt]])) self.autoscale_view(tight=True) return ret def contour(self, *args, **kwargs): if not self._hold: self.cla() kwargs['filled'] = False return mcontour.ContourSet(self, *args, **kwargs) contour.__doc__ = mcontour.ContourSet.contour_doc def contourf(self, *args, **kwargs): if not self._hold: self.cla() kwargs['filled'] = True return mcontour.ContourSet(self, *args, **kwargs) contourf.__doc__ = mcontour.ContourSet.contour_doc def clabel(self, CS, *args, **kwargs): return CS.clabel(*args, **kwargs) clabel.__doc__ = mcontour.ContourSet.clabel.__doc__ def table(self, **kwargs): """ call signature:: table(cellText=None, cellColours=None, cellLoc='right', colWidths=None, rowLabels=None, rowColours=None, rowLoc='left', colLabels=None, colColours=None, colLoc='center', loc='bottom', bbox=None): Add a table to the current axes. Returns a :class:`matplotlib.table.Table` instance. For finer grained control over tables, use the :class:`~matplotlib.table.Table` class and add it to the axes with :meth:`~matplotlib.axes.Axes.add_table`. Thanks to John Gill for providing the class and table. kwargs control the :class:`~matplotlib.table.Table` properties: %(Table)s """ return mtable.table(self, **kwargs) table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd def twinx(self): """ call signature:: ax = twinx() create a twin of Axes for generating a plot with a sharex x-axis but independent y axis. The y-axis of self will have ticks on left and the returned axes will have ticks on the right """ ax2 = self.figure.add_axes(self.get_position(True), sharex=self, frameon=False) ax2.yaxis.tick_right() ax2.yaxis.set_label_position('right') self.yaxis.tick_left() return ax2 def twiny(self): """ call signature:: ax = twiny() create a twin of Axes for generating a plot with a shared y-axis but independent x axis. The x-axis of self will have ticks on bottom and the returned axes will have ticks on the top """ ax2 = self.figure.add_axes(self.get_position(True), sharey=self, frameon=False) ax2.xaxis.tick_top() ax2.xaxis.set_label_position('top') self.xaxis.tick_bottom() return ax2 def get_shared_x_axes(self): 'Return a copy of the shared axes Grouper object for x axes' return self._shared_x_axes def get_shared_y_axes(self): 'Return a copy of the shared axes Grouper object for y axes' return self._shared_y_axes #### Data analysis def hist(self, x, bins=10, range=None, normed=False, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, **kwargs): """ call signature:: hist(x, bins=10, range=None, normed=False, cumulative=False, bottom=None, histtype='bar', align='mid', orientation='vertical', rwidth=None, log=False, **kwargs) Compute and draw the histogram of *x*. The return value is a tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*, [*patches0*, *patches1*,...]) if the input contains multiple data. Keyword arguments: *bins*: Either an integer number of bins or a sequence giving the bins. *x* are the data to be binned. *x* can be an array, a 2D array with multiple data in its columns, or a list of arrays with data of different length. Note, if *bins* is an integer input argument=numbins, *bins* + 1 bin edges will be returned, compatible with the semantics of :func:`numpy.histogram` with the *new* = True argument. Unequally spaced bins are supported if *bins* is a sequence. *range*: The lower and upper range of the bins. Lower and upper outliers are ignored. If not provided, *range* is (x.min(), x.max()). Range has no effect if *bins* is a sequence. If *bins* is a sequence or *range* is specified, autoscaling is set off (*autoscale_on* is set to *False*) and the xaxis limits are set to encompass the full specified bin range. *normed*: If *True*, the first element of the return tuple will be the counts normalized to form a probability density, i.e., ``n/(len(x)*dbin)``. In a probability density, the integral of the histogram should be 1; you can verify that with a trapezoidal integration of the probability density function:: pdf, bins, patches = ax.hist(...) print np.sum(pdf * np.diff(bins)) *cumulative*: If *True*, then a histogram is computed where each bin gives the counts in that bin plus all bins for smaller values. The last bin gives the total number of datapoints. If *normed* is also *True* then the histogram is normalized such that the last bin equals 1. If *cumulative* evaluates to less than 0 (e.g. -1), the direction of accumulation is reversed. In this case, if *normed* is also *True*, then the histogram is normalized such that the first bin equals 1. *histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ] The type of histogram to draw. - 'bar' is a traditional bar-type histogram. If multiple data are given the bars are aranged side by side. - 'barstacked' is a bar-type histogram where multiple data are stacked on top of each other. - 'step' generates a lineplot that is by default unfilled. - 'stepfilled' generates a lineplot that is by default filled. *align*: ['left' | 'mid' | 'right' ] Controls how the histogram is plotted. - 'left': bars are centered on the left bin edges. - 'mid': bars are centered between the bin edges. - 'right': bars are centered on the right bin edges. *orientation*: [ 'horizontal' | 'vertical' ] If 'horizontal', :func:`~matplotlib.pyplot.barh` will be used for bar-type histograms and the *bottom* kwarg will be the left edges. *rwidth*: The relative width of the bars as a fraction of the bin width. If *None*, automatically compute the width. Ignored if *histtype* = 'step' or 'stepfilled'. *log*: If *True*, the histogram axis will be set to a log scale. If *log* is *True* and *x* is a 1D array, empty bins will be filtered out and only the non-empty (*n*, *bins*, *patches*) will be returned. kwargs are used to update the properties of the hist :class:`~matplotlib.patches.Rectangle` instances: %(Rectangle)s You can use labels for your histogram, and only the first :class:`~matplotlib.patches.Rectangle` gets the label (the others get the magic string '_nolegend_'. This will make the histograms work in the intuitive way for bar charts:: ax.hist(10+2*np.random.randn(1000), label='men') ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5) ax.legend() **Example:** .. plot:: mpl_examples/pylab_examples/histogram_demo.py """ if not self._hold: self.cla() # NOTE: the range keyword overwrites the built-in func range !!! # needs to be fixed in with numpy !!! if kwargs.get('width') is not None: raise DeprecationWarning( 'hist now uses the rwidth to give relative width ' 'and not absolute width') try: # make sure a copy is created: don't use asarray x = np.transpose(np.array(x)) if len(x.shape)==1: x.shape = (1,x.shape[0]) elif len(x.shape)==2 and x.shape[1]<x.shape[0]: warnings.warn('2D hist should be nsamples x nvariables; ' 'this looks transposed') except ValueError: # multiple hist with data of different length if iterable(x[0]) and not is_string_like(x[0]): tx = [] for i in xrange(len(x)): tx.append( np.array(x[i]) ) x = tx else: raise ValueError, 'Can not use providet data to create a histogram' # Check whether bins or range are given explicitly. In that # case do not autoscale axes. binsgiven = (cbook.iterable(bins) or range != None) # check the version of the numpy if np.__version__ < "1.3": # version 1.1 and 1.2 hist_kwargs = dict(range=range, normed=bool(normed), new=True) else: # version 1.3 and later, drop new=True hist_kwargs = dict(range=range, normed=bool(normed)) n = [] for i in xrange(len(x)): # this will automatically overwrite bins, # so that each histogram uses the same bins m, bins = np.histogram(x[i], bins, **hist_kwargs) n.append(m) if cumulative: slc = slice(None) if cbook.is_numlike(cumulative) and cumulative < 0: slc = slice(None,None,-1) if normed: n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n] else: n = [m[slc].cumsum()[slc] for m in n] patches = [] if histtype.startswith('bar'): totwidth = np.diff(bins) stacked = False if rwidth is not None: dr = min(1., max(0., rwidth)) elif len(n)>1: dr = 0.8 else: dr = 1.0 if histtype=='bar': width = dr*totwidth/len(n) dw = width if len(n)>1: boffset = -0.5*dr*totwidth*(1.-1./len(n)) else: boffset = 0.0 elif histtype=='barstacked': width = dr*totwidth boffset, dw = 0.0, 0.0 stacked = True else: raise ValueError, 'invalid histtype: %s' % histtype if align == 'mid' or align == 'edge': boffset += 0.5*totwidth elif align == 'right': boffset += totwidth elif align != 'left' and align != 'center': raise ValueError, 'invalid align: %s' % align if orientation == 'horizontal': for m in n: color = self._get_lines._get_next_cycle_color() patch = self.barh(bins[:-1]+boffset, m, height=width, left=bottom, align='center', log=log, color=color) patches.append(patch) if stacked: if bottom is None: bottom = 0.0 bottom += m boffset += dw elif orientation == 'vertical': for m in n: color = self._get_lines._get_next_cycle_color() patch = self.bar(bins[:-1]+boffset, m, width=width, bottom=bottom, align='center', log=log, color=color) patches.append(patch) if stacked: if bottom is None: bottom = 0.0 bottom += m boffset += dw else: raise ValueError, 'invalid orientation: %s' % orientation elif histtype.startswith('step'): x = np.zeros( 2*len(bins), np.float ) y = np.zeros( 2*len(bins), np.float ) x[0::2], x[1::2] = bins, bins if align == 'left' or align == 'center': x -= 0.5*(bins[1]-bins[0]) elif align == 'right': x += 0.5*(bins[1]-bins[0]) elif align != 'mid' and align != 'edge': raise ValueError, 'invalid align: %s' % align if log: y[0],y[-1] = 1e-100, 1e-100 if orientation == 'horizontal': self.set_xscale('log') elif orientation == 'vertical': self.set_yscale('log') fill = False if histtype == 'stepfilled': fill = True elif histtype != 'step': raise ValueError, 'invalid histtype: %s' % histtype for m in n: y[1:-1:2], y[2::2] = m, m if orientation == 'horizontal': x,y = y,x elif orientation != 'vertical': raise ValueError, 'invalid orientation: %s' % orientation color = self._get_lines._get_next_cycle_color() if fill: patches.append( self.fill(x, y, closed=False, facecolor=color) ) else: patches.append( self.fill(x, y, closed=False, edgecolor=color, fill=False) ) # adopted from adjust_x/ylim part of the bar method if orientation == 'horizontal': xmin, xmax = 0, self.dataLim.intervalx[1] for m in n: xmin = np.amin(m[m!=0]) # filter out the 0 height bins xmin = max(xmin*0.9, 1e-100) self.dataLim.intervalx = (xmin, xmax) elif orientation == 'vertical': ymin, ymax = 0, self.dataLim.intervaly[1] for m in n: ymin = np.amin(m[m!=0]) # filter out the 0 height bins ymin = max(ymin*0.9, 1e-100) self.dataLim.intervaly = (ymin, ymax) self.autoscale_view() else: raise ValueError, 'invalid histtype: %s' % histtype label = kwargs.pop('label', '') for patch in patches: for p in patch: p.update(kwargs) p.set_label(label) label = '_nolegend_' if binsgiven: self.set_autoscale_on(False) if orientation == 'vertical': self.autoscale_view(scalex=False, scaley=True) XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1]) self.set_xbound(XL) else: self.autoscale_view(scalex=True, scaley=False) YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1]) self.set_ybound(YL) if len(n)==1: return n[0], bins, cbook.silent_list('Patch', patches[0]) else: return n, bins, cbook.silent_list('Lists of Patches', patches) hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) The power spectral density by Welch's average periodogram method. The vector *x* is divided into *NFFT* length segments. Each segment is detrended by function *detrend* and windowed by function *window*. *noverlap* gives the length of the overlap between segments. The :math:`|\mathrm{fft}(i)|^2` of each segment :math:`i` are averaged to compute *Pxx*, with a scaling to correct for power loss due to windowing. *Fs* is the sampling frequency. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. Returns the tuple (*Pxx*, *freqs*). For plotting, the power is plotted as :math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself is returned. References: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/psd_demo.py """ if not self._hold: self.cla() pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) pxx.shape = len(freqs), freqs += Fc if scale_by_freq in (None, True): psd_units = 'dB/Hz' else: psd_units = 'dB' self.plot(freqs, 10*np.log10(pxx), **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Power Spectral Density (%s)' % psd_units) self.grid(True) vmin, vmax = self.viewLim.intervaly intv = vmax-vmin logi = int(np.log10(intv)) if logi==0: logi=.1 step = 10*logi #print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1 ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step) self.set_yticks(ticks) return pxx, freqs psd_doc_dict = dict() psd_doc_dict.update(martist.kwdocd) psd_doc_dict.update(mlab.kwdocd) psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD']) psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) The cross spectral density :math:`P_{xy}` by Welch's average periodogram method. The vectors *x* and *y* are divided into *NFFT* length segments. Each segment is detrended by function *detrend* and windowed by function *window*. The product of the direct FFTs of *x* and *y* are averaged over each segment to compute :math:`P_{xy}`, with a scaling to correct for power loss due to windowing. Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum (complex valued), and :math:`10\log_{10}|P_{xy}|` is plotted. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. References: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the Line2D properties: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/csd_demo.py .. seealso: :meth:`psd` For a description of the optional parameters. """ if not self._hold: self.cla() pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) pxy.shape = len(freqs), # pxy is complex freqs += Fc self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Cross Spectrum Magnitude (dB)') self.grid(True) vmin, vmax = self.viewLim.intervaly intv = vmax-vmin step = 10*int(np.log10(intv)) ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step) self.set_yticks(ticks) return pxy, freqs csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs): """ call signature:: cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none, window = mlab.window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None, **kwargs) cohere the coherence between *x* and *y*. Coherence is the normalized cross spectral density: .. math:: C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}} %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the x extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. The return value is a tuple (*Cxy*, *f*), where *f* are the frequencies of the coherence vector. kwargs are applied to the lines. References: * Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) kwargs control the :class:`~matplotlib.lines.Line2D` properties of the coherence plot: %(Line2D)s **Example:** .. plot:: mpl_examples/pylab_examples/cohere_demo.py """ if not self._hold: self.cla() cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap, scale_by_freq) freqs += Fc self.plot(freqs, cxy, **kwargs) self.set_xlabel('Frequency') self.set_ylabel('Coherence') self.grid(True) return cxy, freqs cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None, pad_to=None, sides='default', scale_by_freq=None): """ call signature:: specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none, window=mlab.window_hanning, noverlap=128, cmap=None, xextent=None, pad_to=None, sides='default', scale_by_freq=None) Compute a spectrogram of data in *x*. Data are split into *NFFT* length segments and the PSD of each section is computed. The windowing function *window* is applied to each segment, and the amount of overlap of each segment is specified with *noverlap*. %(PSD)s *Fc*: integer The center frequency of *x* (defaults to 0), which offsets the y extents of the plot to reflect the frequency range used when a signal is acquired and then filtered and downsampled to baseband. *cmap*: A :class:`matplotlib.cm.Colormap` instance; if *None* use default determined by rc *xextent*: The image extent along the x-axis. xextent = (xmin,xmax) The default is (0,max(bins)), where bins is the return value from :func:`mlab.specgram` Return value is (*Pxx*, *freqs*, *bins*, *im*): - *bins* are the time points the spectrogram is calculated over - *freqs* is an array of frequencies - *Pxx* is a len(times) x len(freqs) array of power - *im* is a :class:`matplotlib.image.AxesImage` instance Note: If *x* is real (i.e. non-complex), only the positive spectrum is shown. If *x* is complex, both positive and negative parts of the spectrum are shown. This can be overridden using the *sides* keyword argument. **Example:** .. plot:: mpl_examples/pylab_examples/specgram_demo.py """ if not self._hold: self.cla() Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Z = 10. * np.log10(Pxx) Z = np.flipud(Z) if xextent is None: xextent = 0, np.amax(bins) xmin, xmax = xextent freqs += Fc extent = xmin, xmax, freqs[0], freqs[-1] im = self.imshow(Z, cmap, extent=extent) self.axis('auto') return Pxx, freqs, bins, im specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict del psd_doc_dict #So that this does not become an Axes attribute def spy(self, Z, precision=0, marker=None, markersize=None, aspect='equal', **kwargs): """ call signature:: spy(Z, precision=0, marker=None, markersize=None, aspect='equal', **kwargs) ``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*. If *precision* is 0, any non-zero value will be plotted; else, values of :math:`|Z| > precision` will be plotted. For :class:`scipy.sparse.spmatrix` instances, there is a special case: if *precision* is 'present', any value present in the array will be plotted, even if it is identically zero. The array will be plotted as it would be printed, with the first index (row) increasing down and the second index (column) increasing to the right. By default aspect is 'equal', so that each array element occupies a square space; set the aspect kwarg to 'auto' to allow the plot to fill the plot box, or to any scalar number to specify the aspect ratio of an array element directly. Two plotting styles are available: image or marker. Both are available for full arrays, but only the marker style works for :class:`scipy.sparse.spmatrix` instances. If *marker* and *markersize* are *None*, an image will be returned and any remaining kwargs are passed to :func:`~matplotlib.pyplot.imshow`; else, a :class:`~matplotlib.lines.Line2D` object will be returned with the value of marker determining the marker type, and any remaining kwargs passed to the :meth:`~matplotlib.axes.Axes.plot` method. If *marker* and *markersize* are *None*, useful kwargs include: * *cmap* * *alpha* .. seealso:: :func:`~matplotlib.pyplot.imshow` For controlling colors, e.g. cyan background and red marks, use:: cmap = mcolors.ListedColormap(['c','r']) If *marker* or *markersize* is not *None*, useful kwargs include: * *marker* * *markersize* * *color* Useful values for *marker* include: * 's' square (default) * 'o' circle * '.' point * ',' pixel .. seealso:: :func:`~matplotlib.pyplot.plot` """ if precision is None: precision = 0 warnings.DeprecationWarning("Use precision=0 instead of None") # 2008/10/03 if marker is None and markersize is None and hasattr(Z, 'tocoo'): marker = 's' if marker is None and markersize is None: Z = np.asarray(Z) mask = np.absolute(Z)>precision if 'cmap' not in kwargs: kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'], name='binary') nr, nc = Z.shape extent = [-0.5, nc-0.5, nr-0.5, -0.5] ret = self.imshow(mask, interpolation='nearest', aspect=aspect, extent=extent, origin='upper', **kwargs) else: if hasattr(Z, 'tocoo'): c = Z.tocoo() if precision == 'present': y = c.row x = c.col else: nonzero = np.absolute(c.data) > precision y = c.row[nonzero] x = c.col[nonzero] else: Z = np.asarray(Z) nonzero = np.absolute(Z)>precision y, x = np.nonzero(nonzero) if marker is None: marker = 's' if markersize is None: markersize = 10 marks = mlines.Line2D(x, y, linestyle='None', marker=marker, markersize=markersize, **kwargs) self.add_line(marks) nr, nc = Z.shape self.set_xlim(xmin=-0.5, xmax=nc-0.5) self.set_ylim(ymin=nr-0.5, ymax=-0.5) self.set_aspect(aspect) ret = marks self.title.set_y(1.05) self.xaxis.tick_top() self.xaxis.set_ticks_position('both') self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) return ret def matshow(self, Z, **kwargs): ''' Plot a matrix or array as an image. The matrix will be shown the way it would be printed, with the first row at the top. Row and column numbering is zero-based. Argument: *Z* anything that can be interpreted as a 2-D array kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`. :meth:`matshow` sets defaults for *extent*, *origin*, *interpolation*, and *aspect*; use care in overriding the *extent* and *origin* kwargs, because they interact. (Also, if you want to change them, you probably should be using imshow directly in your own version of matshow.) Returns: an :class:`matplotlib.image.AxesImage` instance. ''' Z = np.asarray(Z) nr, nc = Z.shape extent = [-0.5, nc-0.5, nr-0.5, -0.5] kw = {'extent': extent, 'origin': 'upper', 'interpolation': 'nearest', 'aspect': 'equal'} # (already the imshow default) kw.update(kwargs) im = self.imshow(Z, **kw) self.title.set_y(1.05) self.xaxis.tick_top() self.xaxis.set_ticks_position('both') self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9, steps=[1, 2, 5, 10], integer=True)) return im class SubplotBase: """ Base class for subplots, which are :class:`Axes` instances with additional methods to facilitate generating and manipulating a set of :class:`Axes` within a figure. """ def __init__(self, fig, *args, **kwargs): """ *fig* is a :class:`matplotlib.figure.Figure` instance. *args* is the tuple (*numRows*, *numCols*, *plotNum*), where the array of subplots in the figure has dimensions *numRows*, *numCols*, and where *plotNum* is the number of the subplot being created. *plotNum* starts at 1 in the upper left corner and increases to the right. If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*. """ self.figure = fig if len(args)==1: s = str(args[0]) if len(s) != 3: raise ValueError('Argument to subplot must be a 3 digits long') rows, cols, num = map(int, s) elif len(args)==3: rows, cols, num = args else: raise ValueError( 'Illegal argument to subplot') total = rows*cols num -= 1 # convert from matlab to python indexing # ie num in range(0,total) if num >= total: raise ValueError( 'Subplot number exceeds total subplots') self._rows = rows self._cols = cols self._num = num self.update_params() # _axes_class is set in the subplot_class_factory self._axes_class.__init__(self, fig, self.figbox, **kwargs) def get_geometry(self): 'get the subplot geometry, eg 2,2,3' return self._rows, self._cols, self._num+1 # COVERAGE NOTE: Never used internally or from examples def change_geometry(self, numrows, numcols, num): 'change subplot geometry, eg. from 1,1,1 to 2,2,3' self._rows = numrows self._cols = numcols self._num = num-1 self.update_params() self.set_position(self.figbox) def update_params(self): 'update the subplot position from fig.subplotpars' rows = self._rows cols = self._cols num = self._num pars = self.figure.subplotpars left = pars.left right = pars.right bottom = pars.bottom top = pars.top wspace = pars.wspace hspace = pars.hspace totWidth = right-left totHeight = top-bottom figH = totHeight/(rows + hspace*(rows-1)) sepH = hspace*figH figW = totWidth/(cols + wspace*(cols-1)) sepW = wspace*figW rowNum, colNum = divmod(num, cols) figBottom = top - (rowNum+1)*figH - rowNum*sepH figLeft = left + colNum*(figW + sepW) self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom, figW, figH) self.rowNum = rowNum self.colNum = colNum self.numRows = rows self.numCols = cols if 0: print 'rcn', rows, cols, num print 'lbrt', left, bottom, right, top print 'self.figBottom', self.figBottom print 'self.figLeft', self.figLeft print 'self.figW', self.figW print 'self.figH', self.figH print 'self.rowNum', self.rowNum print 'self.colNum', self.colNum print 'self.numRows', self.numRows print 'self.numCols', self.numCols def is_first_col(self): return self.colNum==0 def is_first_row(self): return self.rowNum==0 def is_last_row(self): return self.rowNum==self.numRows-1 def is_last_col(self): return self.colNum==self.numCols-1 # COVERAGE NOTE: Never used internally or from examples def label_outer(self): """ set the visible property on ticklabels so xticklabels are visible only if the subplot is in the last row and yticklabels are visible only if the subplot is in the first column """ lastrow = self.is_last_row() firstcol = self.is_first_col() for label in self.get_xticklabels(): label.set_visible(lastrow) for label in self.get_yticklabels(): label.set_visible(firstcol) _subplot_classes = {} def subplot_class_factory(axes_class=None): # This makes a new class that inherits from SubclassBase and the # given axes_class (which is assumed to be a subclass of Axes). # This is perhaps a little bit roundabout to make a new class on # the fly like this, but it means that a new Subplot class does # not have to be created for every type of Axes. if axes_class is None: axes_class = Axes new_class = _subplot_classes.get(axes_class) if new_class is None: new_class = new.classobj("%sSubplot" % (axes_class.__name__), (SubplotBase, axes_class), {'_axes_class': axes_class}) _subplot_classes[axes_class] = new_class return new_class # This is provided for backward compatibility Subplot = subplot_class_factory() martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes) """ # this is some discarded code I was using to find the minimum positive # data point for some log scaling fixes. I realized there was a # cleaner way to do it, but am keeping this around as an example for # how to get the data out of the axes. Might want to make something # like this a method one day, or better yet make get_verts an Artist # method minx, maxx = self.get_xlim() if minx<=0 or maxx<=0: # find the min pos value in the data xs = [] for line in self.lines: xs.extend(line.get_xdata(orig=False)) for patch in self.patches: xs.extend([x for x,y in patch.get_verts()]) for collection in self.collections: xs.extend([x for x,y in collection.get_verts()]) posx = [x for x in xs if x>0] if len(posx): minx = min(posx) maxx = max(posx) # warning, probably breaks inverted axis self.set_xlim((0.1*minx, maxx)) """
gpl-3.0
fabioticconi/scikit-learn
benchmarks/bench_plot_lasso_path.py
301
4003
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path from sklearn.linear_model import lasso_path from sklearn.datasets.samples_generator import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features / 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 2000, 5).astype(np.int) features_range = np.linspace(10, 2000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) #ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) #ax.legend() i += 1 plt.show()
bsd-3-clause
FluidityProject/multifluids
tests/sloshing_tank/plot_freesurface.py
5
2631
#!/usr/bin/env python import settings import ana_sol import sys import math import commands import matplotlib.pyplot as plt import getopt from scipy.special import erf from numpy import poly1d from matplotlib.pyplot import figure, show from numpy import pi, sin, linspace from matplotlib.mlab import stineman_interp from numpy import exp, cos from fluidity_tools import stat_parser as stat # Usage def usage(): print "plt_freesurface.py --file=detectorfile" print "All the other options are read from settings.py" ################# Main ########################### def main(argv=None): a_0 = settings.a0 # initial maximum perturbation g = settings.g # gravity eta= settings.eta # viscosity L= settings.L # wavelength timestep= settings.timestep # timestep filename='' global debug debug=False #debug=True try: opts, args = getopt.getopt(sys.argv[1:], "h:", ['file=']) except getopt.GetoptError: usage() sys.exit(2) for opt, arg in opts: if opt == '--file': filename=arg elif opt == '-h' or opt == '--help': usage() sys.exit(2) if filename=='': usage() sys.exit(2) print 'Using:\n\ta_0 =', a_0 # initial maximum perturbation print '\tg =', g # gravity print '\teta=', eta # viscosity print '\tL=', L # wavelength print '\ttimestep=', timestep # timestep ####################### Print time plot ########################### print 'Generating time plot' x_time= stat(filename)["ElapsedTime"]["value"] fs_simu= stat(filename)["water"]["FreeSurface"]["left"] # fs_simu= stat(filename)["water"]["FreeSurface"]["middle"] fs_ana = stat(filename)["water"]["FreeSurface_Analytical"]["left"] # fs_ana = stat(filename)["water"]["FreeSurface_Analytical"]["middle"] plt.ion() # swith on interactive mode fig = figure() ax = fig.add_subplot(111) ax.plot(x_time,fs_simu,'ro') ax.plot(x_time,fs_ana,'-') plt.title('Free Surface timeplot at x=0') plt.xlabel('Time [s]') plt.ylabel('Free surface [m]') plt.draw() raw_input("Please press Enter") #plt.cla() if __name__ == "__main__": main()
lgpl-2.1
mupif/mupif
mupif/Field.py
1
42683
# # MuPIF: Multi-Physics Integration Framework # Copyright (C) 2010-2015 Borek Patzak # # Czech Technical University, Faculty of Civil Engineering, # Department of Structural Mechanics, 166 29 Prague, Czech Republic # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301 USA # from builtins import range from builtins import object from . import Cell from . import FieldID from . import ValueType from . import BBox from . import APIError from . import MupifObject from . import Mesh from .Physics import PhysicalQuantities from .Physics.PhysicalQuantities import PhysicalQuantity from numpy import array, arange, random, zeros import numpy import copy import Pyro4 from enum import IntEnum import logging log = logging.getLogger() try: import cPickle as pickle # faster serialization if available except: import pickle # import logging - never use it here, it causes cPickle.PicklingError: Can't pickle <type 'thread.lock'>: attribute # lookup thread.lock failed # debug flag debug = 0 class FieldType(IntEnum): """ Represent the supported values of FieldType, i.e. FT_vertexBased or FT_cellBased. """ FT_vertexBased = 1 FT_cellBased = 2 @Pyro4.expose class Field(MupifObject.MupifObject, PhysicalQuantity): """ Representation of field. Field is a scalar, vector, or tensorial quantity defined on a spatial domain. The field, however is assumed to be fixed at certain time. The field can be evaluated in any spatial point belonging to underlying domain. Derived classes will implement fields defined on common discretizations, like fields defined on structured/unstructured FE meshes, FD grids, etc. .. automethod:: __init__ .. automethod:: _evaluate """ def __init__(self, mesh, fieldID, valueType, units, time, values=None, fieldType=FieldType.FT_vertexBased, objectID=0, metaData={}): """ Initializes the field instance. :param Mesh.Mesh mesh: Instance of a Mesh class representing the underlying discretization :param FieldID fieldID: Field type (displacement, strain, temperature ...) :param ValueType valueType: Type of field values (scalar, vector, tensor). Tensor is a tuple of 9 values. It is changed to 3x3 for VTK output automatically. :param Physics.PhysicalUnits units: Field value units :param Physics.PhysicalQuantity time: Time associated with field values :param values: Field values (format dependent on a particular field type, however each individual value should be stored as tuple, even scalar value) :type values: list of tuples representing individual values :param FieldType fieldType: Optional, determines field type (values specified as vertex or cell values), default is FT_vertexBased :param int objectID: Optional ID of problem object/subdomain to which field is related, default = 0 :param dict metaData: Optionally pass metadata for merging """ super(Field, self).__init__() self.mesh = mesh self.fieldID = fieldID self.valueType = valueType self.time = time self.uri = None # pyro uri; used in distributed setting # self.log = logging.getLogger() self.fieldType = fieldType self.objectID = objectID if values is None: if self.fieldType == FieldType.FT_vertexBased: ncomponents = mesh.getNumberOfVertices() else: ncomponents = mesh.getNumberOfCells() self.value = zeros((ncomponents, self.getRecordSize())) else: self.value = values if PhysicalQuantities.isPhysicalUnit(units): self.unit = units else: self.unit = PhysicalQuantities.findUnit(units) self.setMetadata('Units', self.unit.name()) self.setMetadata('Type', 'mupif.Field.Field') self.setMetadata('Type_ID', str(self.fieldID)) self.setMetadata('FieldType', str(fieldType)) self.setMetadata('ValueType', str(self.valueType)) self.updateMetadata(metaData) @classmethod def loadFromLocalFile(cls, fileName): """ Alternative constructor which loads instance directly from a Pickle module. :param str fileName: File name :return: Returns Field instance :rtype: Field """ return pickle.load(open(fileName, 'rb')) def getRecordSize(self): """ Return the number of scalars per value, depending on :obj:`valueType` passed when constructing the instance. :return: number of scalars (1,3,9 respectively for scalar, vector, tensor) :rtype: int """ if self.valueType == ValueType.Scalar: return 1 elif self.valueType == ValueType.Vector: return 3 elif self.valueType == ValueType.Tensor: return 9 else: raise ValueError("Invalid value of Field.valueType (%d)." % self.valueType) def getMesh(self): """ Obtain mesh. :return: Returns a mesh of underlying discretization :rtype: Mesh.Mesh """ return self.mesh def getValueType(self): """ Returns ValueType of the field, e.g. scalar, vector, tensor. :return: Returns value type of the receiver :rtype: ValueType """ return self.valueType def getFieldID(self): """ Returns FieldID, e.g. FID_Displacement, FID_Temperature. :return: Returns field ID :rtype: FieldID """ return self.fieldID def getFieldIDName(self): """ Returns name of the field. :return: Returns fieldID name :rtype: string """ return self.fieldID.name def getFieldType(self): """ Returns receiver field type (values specified as vertex or cell values) :return: Returns fieldType id :rtype: FieldType """ return self.fieldType def getTime(self): """ Get time of the field. :return: Time of field data :rtype: Physics.PhysicalQuantity """ return self.time def evaluate(self, positions, eps=0.0): """ Evaluates the receiver at given spatial position(s). :param positions: 1D/2D/3D position vectors :type positions: tuple, a list of tuples :param float eps: Optional tolerance for probing whether the point belongs to a cell (should really not be used) :return: field value(s) :rtype: Physics.PhysicalQuantity with given value or tuple of values """ # test if positions is a list of positions if isinstance(positions, list): ans = [] for pos in positions: ans.append(self._evaluate(pos, eps)) return PhysicalQuantity(ans, self.unit) else: # single position passed return PhysicalQuantity(self._evaluate(positions, eps), self.unit) def _evaluate(self, position, eps): """ Evaluates the receiver at a single spatial position. :param tuple position: 1D/2D/3D position vector :param float eps: Optional tolerance :return: field value :rtype: tuple of doubles .. note:: This method has some issues related to https://sourceforge.net/p/mupif/tickets/22/ . """ cells = self.mesh.giveCellLocalizer().giveItemsInBBox(BBox.BBox([c-eps for c in position], [c+eps for c in position])) # answer=None if len(cells): if self.fieldType == FieldType.FT_vertexBased: for icell in cells: try: if icell.containsPoint(position): if debug: log.debug(icell.getVertices()) try: answer = icell.interpolate(position, [self.value[i.number] for i in icell.getVertices()]) except IndexError: log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label) raise return answer except ZeroDivisionError: print('ZeroDivisionError?') log.debug(icell.number) log.debug(position) icell.debug = 1 log.debug(icell.containsPoint(position), icell.glob2loc(position)) log.error('Field::evaluate - no source cell found for position %s' % str(position)) for icell in cells: log.debug(icell.number) log.debug(icell.containsPoint(position)) log.debug(icell.glob2loc(position)) else: # if (self.fieldType == FieldType.FT_vertexBased): # in case of cell based fields do compute average of cell values containing point # this typically happens when point is on the shared edge or vertex count = 0 for icell in cells: if icell.containsPoint(position): if debug: log.debug(icell.getVertices()) try: tmp = self.value[icell.number] if count == 0: answer = list(tmp) else: for i in answer: answer = [x+y for x in answer for y in tmp] count += 1 except IndexError: log.error('Field::evaluate failed, inconsistent data at cell %d' % icell.label) log.error(icell.getVertices()) raise # end loop over icells if count == 0: log.error('Field::evaluate - no source cell found for position %s', str(position)) # for icell in cells: # log.debug(icell.number, icell.containsPoint(position), icell.glob2loc(position)) else: answer = [x/count for x in answer] return answer else: # no source cell found log.error('Field::evaluate - no source cell found for position ' + str(position)) raise ValueError('Field::evaluate - no source cell found for position ' + str(position)) def getVertexValue(self, vertexID): """ Returns the value associated with a given vertex. :param int vertexID: Vertex identifier :return: The value :rtype: Physics.PhysicalQuantity """ if self.fieldType == FieldType.FT_vertexBased: return PhysicalQuantity(self.value[vertexID], self.unit) else: raise TypeError('Attempt to acces vertex value of cell based field, use evaluate instead') def getCellValue(self, cellID): """ Returns the value associated with a given cell. :param int cellID: Cell identifier :return: The value :rtype: Physics.PhysicalQuantity """ if self.fieldType == FieldType.FT_cellBased: return PhysicalQuantity(self.value[cellID], self.unit) else: raise TypeError('Attempt to acces cell value of vertex based field, use evaluate instead') def _giveValue(self, componentID): """ Returns the value associated with a given component (vertex or cell). Depreceated, use getVertexValue() or getCellValue() :param int componentID: An identifier of a component: vertexID or cellID :return: The value :rtype: Physics.PhysicalQuantity """ return PhysicalQuantity(self.value[componentID], self.unit) def giveValue(self, componentID): """ Returns the value associated with a given component (vertex or cell). :param int componentID: An identifier of a component: vertexID or cellID :return: The value :rtype: tuple """ return self.value[componentID] def setValue(self, componentID, value): """ Sets the value associated with a given component (vertex or cell). :param int componentID: An identifier of a component: vertexID or cellID :param tuple value: Value to be set for a given component, should have the same units as receiver .. Note:: If a mesh has mapping attached (a mesh view) then we have to remember value locally and record change. The source field values are updated after commit() method is invoked. """ self.value[componentID] = value def commit(self): """ Commits the recorded changes (via setValue method) to a primary field. """ def getObjectID(self): """ Returns field objectID. :return: Object's ID :rtype: int """ return self.objectID def getUnits(self): """ :return: Returns units of the receiver :rtype: Physics.PhysicalUnits """ return self.unit def merge(self, field): """ Merges the receiver with given field together. Both fields should be on different parts of the domain (can also overlap), but should refer to same underlying discretization, otherwise unpredictable results can occur. :param Field field: given field to merge with. """ # first merge meshes mesh = copy.deepcopy(self.mesh) mesh.merge(field.mesh) log.debug(mesh) # merge the field values # some type checking first if self.fieldType != field.fieldType: raise TypeError("Field::merge: fieldType of receiver and parameter is different") if self.fieldType == FieldType.FT_vertexBased: values = [0]*mesh.getNumberOfVertices() for v in range(self.mesh.getNumberOfVertices()): values[mesh.vertexLabel2Number(self.mesh.getVertex(v).label)] = self.value[v] for v in range(field.mesh.getNumberOfVertices()): values[mesh.vertexLabel2Number(field.mesh.getVertex(v).label)] = field.value[v] else: values = [0]*mesh.getNumberOfCells() for v in range(self.mesh.getNumberOfCells()): values[mesh.cellLabel2Number(self.mesh.giveCell(v).label)] = self.value[v] for v in range(field.mesh.getNumberOfCells()): values[mesh.cellLabel2Number(field.mesh.giveCell(v).label)] = field.value[v] self.mesh = mesh self.value = values def field2VTKData (self, name=None, lookupTable=None): """ Creates VTK representation of the receiver. Useful for visualization. Requires pyvtk module. :param str name: human-readable name of the field :param pyvtk.LookupTable lookupTable: color lookup table :return: Instance of pyvtk :rtype: pyvtk.VtkData """ import pyvtk if name is None: name = self.getFieldIDName() if lookupTable and not isinstance(lookupTable, pyvtk.LookupTable): log.info('ignoring lookupTable which is not a pyvtk.LookupTable instance.') lookupTable = None if lookupTable is None: lookupTable=pyvtk.LookupTable([(0, .231, .298, 1.0), (.4, .865, .865, 1.0), (.8, .706, .016, 1.0)], name='coolwarm') # Scalars use different name than 'coolwarm'. Then Paraview uses its own color mapping instead of taking # 'coolwarm' from *.vtk file. This prevents setting Paraview's color mapping. scalarsKw = dict(name=name, lookup_table='default') else: scalarsKw = dict(name=name, lookup_table=lookupTable.name) # see http://cens.ioc.ee/cgi-bin/cvsweb/python/pyvtk/examples/example1.py?rev=1.3 for an example vectorsKw = dict(name=name) # vectors don't have a lookup_table if self.fieldType == FieldType.FT_vertexBased: if self.getValueType() == ValueType.Scalar: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Vector: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Vectors(self.value, **vectorsKw), lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Tensor: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.PointData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example') else: if self.getValueType() == ValueType.Scalar: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Scalars([val[0] for val in self.value], **scalarsKw), lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Vector: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Vectors(self.value, **vectorsKw),lookupTable), 'Unstructured Grid Example') elif self.getValueType() == ValueType.Tensor: return pyvtk.VtkData(self.mesh.getVTKRepresentation(), pyvtk.CellData(pyvtk.Tensors(self.getMartixForTensor(self.value), **vectorsKw), lookupTable), 'Unstructured Grid Example') def getMartixForTensor(self, values): """ Reshape values to a list with 3x3 arrays. Usable for VTK export. :param list values: List containing tuples of 9 values, e.g. [(1,2,3,4,5,6,7,8,9), (1,2,3,4,5,6,7,8,9), ...] :return: List containing 3x3 matrices for each tensor :rtype: list """ tensor = [] for i in values: tensor.append(numpy.reshape(i, (3, 3))) return tensor def dumpToLocalFile(self, fileName, protocol=pickle.HIGHEST_PROTOCOL): """ Dump Field to a file using a Pickle serialization module. :param str fileName: File name :param int protocol: Used protocol - 0=ASCII, 1=old binary, 2=new binary """ pickle.dump(self, open(fileName, 'wb'), protocol) def field2Image2D(self, plane='xy', elevation=(-1.e-6, 1.e-6), numX=10, numY=20, interp='linear', fieldComponent=0, vertex=True, colorBar='horizontal', colorBarLegend='', barRange=(None, None), barFormatNum='%.3g', title='', xlabel='', ylabel='', fileName='', show=True, figsize=(8, 4), matPlotFig=None): """ Plots and/or saves 2D image using a matplotlib library. Works for structured and unstructured 2D/3D fields. 2D/3D fields need to define plane. This method gives only basic viewing options, for aesthetic and more elaborated output use e.g. VTK field export with postprocessors such as ParaView or Mayavi. Idea from https://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html#id1 :param str plane: what plane to extract from field, valid values are 'xy', 'xz', 'yz' :param tuple elevation: range of third coordinate. For example, in plane='xy' is grabs z coordinates in the range :param int numX: number of divisions on x graph axis :param int numY: number of divisions on y graph axis :param str interp: interpolation type when transferring to a grid. Valid values 'linear', 'nearest' or 'cubic' :param int fieldComponent: component of the field :param bool vertex: if vertices shoud be plot as points :param str colorBar: color bar details. Valid values '' for no colorbar, 'vertical' or 'horizontal' :param str colorBarLegend: Legend for color bar. If '', current field name and units are printed. None prints nothing. :param tuple barRange: min and max bar range. If barRange=('NaN','NaN'), it is adjusted automatically :param str barFormatNum: format of color bar numbers :param str title: title :param str xlabel: x axis label :param str ylabel: y axis label :param str fileName: if nonempty, a filename is written to the disk, usually png, pdf, ps, eps and svg are supported :param bool show: if the plot should be showed :param tuple figsize: size of canvas in inches. Affects only showing a figure. Image to a file adjust one side automatically. :param obj matPlotFig: False means plot window remains in separate thread, True waits until a plot window becomes closed :return: handle to matPlotFig :rtype: matPlotFig """ try: import numpy as np import math from scipy.interpolate import griddata import matplotlib matplotlib.use('TkAgg') # Qt4Agg gives an empty, black window import matplotlib.pyplot as plt except ImportError as e: log.error('Skipping field2Image2D due to missing modules: %s' % e) return None # raise if self.fieldType != FieldType.FT_vertexBased: raise APIError.APIError('Only FieldType.FT_vertexBased is now supported') mesh = self.getMesh() numVertices = mesh.getNumberOfVertices() indX = 0 indY = 0 elev = 0 if plane == 'xy': indX = 0 indY = 1 elev = 2 elif plane == 'xz': indX = 0 indY = 2 elev = 1 elif plane == 'yz': indX = 1 indY = 2 elev = 0 # find eligible vertex points and values vertexPoints = [] vertexValue = [] for i in range(0, numVertices): coords = mesh.getVertex(i).getCoordinates() # print(coords) value = self.giveValue(i)[fieldComponent] if elevation[1] > coords[elev] > elevation[0]: vertexPoints.append((coords[indX], coords[indY])) vertexValue.append(value) if len(vertexPoints) == 0: log.info('No valid vertex points found, putting zeros on domain 1 x 1') for i in range(5): vertexPoints.append((i % 2, i/4.)) vertexValue.append(0) # for i in range (0, len(vertexPoints)): # print (vertexPoints[i], vertexValue[i]) vertexPointsArr = np.array(vertexPoints) vertexValueArr = np.array(vertexValue) xMin = vertexPointsArr[:, 0].min() xMax = vertexPointsArr[:, 0].max() yMin = vertexPointsArr[:, 1].min() yMax = vertexPointsArr[:, 1].max() # print(xMin, xMax, yMin, yMax) grid_x, grid_y = np.mgrid[xMin:xMax:complex(0, numX), yMin:yMax:complex(0, numY)] grid_z1 = griddata(vertexPointsArr, vertexValueArr, (grid_x, grid_y), interp) # print (grid_z1.T) plt.ion() # ineractive mode if matPlotFig is None: matPlotFig = plt.figure(figsize=figsize) # plt.xlim(xMin, xMax) # plt.ylim(yMin, yMax) plt.clf() plt.axis((xMin, xMax, yMin, yMax)) image = plt.imshow(grid_z1.T, extent=(xMin, xMax, yMin, yMax), origin='lower', aspect='equal') # plt.margins(tight=True) # plt.tight_layout() # plt.margins(x=-0.3, y=-0.3) if colorBar: cbar = plt.colorbar(orientation=colorBar, format=barFormatNum) if colorBarLegend is not None: if colorBarLegend == '': colorBarLegend = self.getFieldIDName() + '_' + str(fieldComponent) if self.unit is not None: colorBarLegend = colorBarLegend + ' (' + self.unit.name() + ')' cbar.set_label(colorBarLegend, rotation=0 if colorBar == 'horizontal' else 90) if title: plt.title(title) if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) if vertex == 1: plt.scatter(vertexPointsArr[:, 0], vertexPointsArr[:, 1], marker='o', c='b', s=5, zorder=10) # plt.axis('equal') # plt.gca().set_aspect('equal', adjustable='box-forced') if isinstance(barRange[0], float) or isinstance(barRange[0], int): image.set_clim(vmin=barRange[0], vmax=barRange[1]) if fileName: plt.savefig(fileName, bbox_inches='tight') if show: matPlotFig.canvas.draw() # plt.ioff() # plt.show(block=True) return matPlotFig def field2Image2DBlock(self): """ Block an open window from matPlotLib. Waits until closed. """ import matplotlib.pyplot as plt plt.ioff() plt.show(block=True) def toHdf5(self, fileName, group='component1/part1'): """ Dump field to HDF5, in a simple format suitable for interoperability (TODO: document). :param str fileName: HDF5 file :param str group: HDF5 group the data will be saved under. The HDF hierarchy is like this:: group | +--- mesh_01 {hash=25aa0aa04457} | +--- [vertex_coords] | +--- [cell_types] | \--- [cell_vertices] +--- mesh_02 {hash=17809e2b86ea} | +--- [vertex_coords] | +--- [cell_types] | \--- [cell_vertices] +--- ... +--- field_01 | +--- -> mesh_01 | \--- [vertex_values] +--- field_02 | +--- -> mesh_01 | \--- [vertex_values] +--- field_03 | +--- -> mesh_02 | \--- [cell_values] \--- ... where ``plain`` names are HDF (sub)groups, ``[bracketed]`` names are datasets, ``{name=value}`` are HDF attributes, ``->`` prefix indicated HDF5 hardlink (transparent to the user); numerical suffixes (``_01``, ...) are auto-allocated. Mesh objects are hardlinked using HDF5 hardlinks if an identical mesh is already stored in the group, based on hexdigest of its full data. .. note:: This method has not been tested yet. The format is subject to future changes. """ import h5py hdf = h5py.File(fileName, 'a', libver='latest') if group not in hdf: gg = hdf.create_group(group) else: gg = hdf[group] # raise IOError('Path "%s" is already used in "%s".'%(path,fileName)) def lowestUnused(trsf, predicate, start=1): """ Find the lowest unused index, where *predicate* is used to test for existence, and *trsf* transforms integer (starting at *start* and incremented until unused value is found) to whatever predicate accepts as argument. Lowest transformed value is returned. """ import itertools for i in itertools.count(start=start): t = trsf(i) if not predicate(t): return t # save mesh (not saved if there already) newgrp = lowestUnused(trsf=lambda i: 'mesh_%02d' % i, predicate=lambda t: t in gg) mh5 = self.getMesh().asHdf5Object(parentgroup=gg, newgroup=newgrp) if self.value: fieldGrp = hdf.create_group(lowestUnused(trsf=lambda i, group=group: group+'/field_%02d' % i, predicate=lambda t: t in hdf)) fieldGrp['mesh'] = mh5 fieldGrp.attrs['fieldID'] = self.fieldID fieldGrp.attrs['valueType'] = self.valueType # string/bytes may not contain NULL when stored as string in HDF5 # see http://docs.h5py.org/en/2.3/strings.html # that's why we cast to opaque type "void" and uncast using tostring before unpickling fieldGrp.attrs['units'] = numpy.void(pickle.dumps(self.unit)) fieldGrp.attrs['time'] = numpy.void(pickle.dumps(self.time)) # fieldGrp.attrs['time']=self.time.getValue() if self.fieldType == FieldType.FT_vertexBased: val = numpy.empty(shape=(self.getMesh().getNumberOfVertices(), self.getRecordSize()), dtype=numpy.float) for vert in range(self.getMesh().getNumberOfVertices()): val[vert] = self.getVertexValue(vert).getValue() fieldGrp['vertex_values'] = val elif self.fieldType == FieldType.FT_cellBased: # raise NotImplementedError("Saving cell-based fields to HDF5 is not yet implemented.") val = numpy.empty(shape=(self.getMesh().getNumberOfCells(), self.getRecordSize()), dtype=numpy.float) for cell in range(self.getMesh().getNumberOfCells()): val[cell] = self.getCellValue(cell) fieldGrp['cell_values'] = val else: raise RuntimeError("Unknown fieldType %d." % self.fieldType) @staticmethod def makeFromHdf5(fileName, group='component1/part1'): """ Restore Fields from HDF5 file. :param str fileName: HDF5 file :param str group: HDF5 group the data will be read from (IOError is raised if the group does not exist). :return: list of new :obj:`Field` instances :rtype: [Field,Field,...] .. note:: This method has not been tested yet. """ import h5py hdf = h5py.File(fileName, 'r', libver='latest') grp = hdf[group] # load mesh and field data from HDF5 meshObjs = [obj for name, obj in grp.items() if name.startswith('mesh_')] fieldObjs = [obj for name, obj in grp.items() if name.startswith('field_')] # construct all meshes as mupif objects meshes = [Mesh.Mesh.makeFromHdf5Object(meshObj) for meshObj in meshObjs] # construct all fields as mupif objects ret = [] for f in fieldObjs: if 'vertex_values' in f: fieldType, values = FieldType.FT_vertexBased, f['vertex_values'] elif 'cell_values' in f: fieldType, values = FieldType.FT_cellBased, f['cell_values'] else: ValueError("HDF5/mupif format error: unable to determine field type.") fieldID, valueType, units, time = FieldID(f.attrs['fieldID']), f.attrs['valueType'], f.attrs['units'].tostring(), f.attrs['time'].tostring() if units == '': units = None # special case, handled at saving time else: units = pickle.loads(units) if time == '': time = None # special case, handled at saving time else: time = pickle.loads(time) meshIndex = meshObjs.index(f['mesh']) # find which mesh object this field refers to ret.append(Field(mesh=meshes[meshIndex], fieldID=fieldID, units=units, time=time, valueType=valueType, values=values, fieldType=fieldType)) return ret def toVTK2(self, fileName, format='ascii'): """ Save the instance as Unstructured Grid in VTK2 format (``.vtk``). :param str fileName: where to save :param str format: one of ``ascii`` or ``binary`` """ self.field2VTKData().tofile(filename=fileName, format=format) @staticmethod def makeFromVTK2(fileName, unit, time=0, skip=['coolwarm']): """ Return fields stored in *fileName* in the VTK2 (``.vtk``) format. :param str fileName: filename to load from :param PhysicalUnit unit: physical unit of filed values :param float time: time value for created fields (time is not saved in VTK2, thus cannot be recovered) :param [string,] skip: file names to be skipped when reading the input file; the default value skips the default coolwarm colormap. :returns: one field from VTK :rtype: Field """ import pyvtk from .dataID import FieldID if not fileName.endswith('.vtk'): log.warning('Field.makeFromVTK2: fileName should end with .vtk, you may get in trouble (proceeding).') ret = [] try: data = pyvtk.VtkData(fileName) # this is where reading the file happens (inside pyvtk) except NotImplementedError: log.info('pyvtk fails to open (binary?) file "%s", trying through vtk.vtkGenericDataReader.' % fileName) return Field.makeFromVTK3(fileName, time=time, units=unit, forceVersion2=True) ugr = data.structure if not isinstance(ugr, pyvtk.UnstructuredGrid): raise NotImplementedError( "grid type %s is not handled by mupif (only UnstructuredGrid is)." % ugr.__class__.__name__) mesh = Mesh.UnstructuredMesh.makeFromPyvtkUnstructuredGrid(ugr) # get cell and point data pd, cd = data.point_data.data, data.cell_data.data for dd, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased): for d in dd: # will raise KeyError if fieldID with that name is not defined if d.name in skip: continue fid = FieldID[d.name] # determine the number of components using the expected number of values from the mesh expectedNumVal = (mesh.getNumberOfVertices() if fieldType == FieldType.FT_vertexBased else mesh.getNumberOfCells()) nc = len(d.scalars)//expectedNumVal valueType = ValueType.fromNumberOfComponents(nc) values = [d.scalars[i*nc:i*nc+nc] for i in range(len(d.scalars))] ret.append(Field( mesh=mesh, fieldID=fid, units=unit, # not stored at all time=time, # not stored either, set by caller valueType=valueType, values=values, fieldType=fieldType )) return ret def toVTK3(self, fileName, **kw): """ Save the instance as Unstructured Grid in VTK3 format (``.vtu``). This is a simple proxy for calling :obj:`manyToVTK3` with the instance as the only field to be saved. If multiple fields with identical mesh are to be saved in VTK3, use :obj:`manyToVTK3` directly. :param fileName: output file name :param ``**kw``: passed to :obj:`manyToVTK3` """ return self.manyToVTK3([self], fileName, **kw) @staticmethod def manyToVTK3(fields, fileName, ascii=False, compress=True): """ Save all fields passed as argument into VTK3 Unstructured Grid file (``*.vtu``). All *fields* must be defined on the same mesh object; exception will be raised if this is not the case. :param list of Field fields: :param fileName: output file name :param bool ascii: write numbers are ASCII in the XML-based VTU file (rather than base64-encoded binary in XML) :param bool compress: apply compression to the data """ import vtk if not fields: raise ValueError('At least one field must be passed.') # check if all fields are defined on the same mesh if len(set([f.mesh for f in fields])) != 1: raise RuntimeError( 'Not all fields are sharing the same Mesh object (and could not be saved to a single .vtu file') # convert mesh to VTK UnstructuredGrid mesh = fields[0].getMesh() vtkgrid = mesh.asVtkUnstructuredGrid() # add fields as arrays for f in fields: arr = vtk.vtkDoubleArray() arr.SetNumberOfComponents(f.getRecordSize()) arr.SetName(f.getFieldIDName()) assert f.getFieldType() in (FieldType.FT_vertexBased, FieldType.FT_cellBased) # other future types not handled if f.getFieldType() == FieldType.FT_vertexBased: nn = mesh.getNumberOfVertices() else: nn = mesh.getNumberOfCells() arr.SetNumberOfValues(nn) for i in range(nn): arr.SetTuple(i, f.giveValue(i)) if f.getFieldType() == FieldType.FT_vertexBased: vtkgrid.GetPointData().AddArray(arr) else: vtkgrid.GetCellData().AddArray(arr) # write the unstructured grid to file writer = vtk.vtkXMLUnstructuredGridWriter() if compress: writer.SetCompressor(vtk.vtkZLibDataCompressor()) if ascii: writer.SetDataModeToAscii() writer.SetFileName(fileName) # change between VTK5 and VTK6 if vtk.vtkVersion().GetVTKMajorVersion() == 6: writer.SetInputData(vtkgrid) else: writer.SetInputData(vtkgrid) writer.Write() # finito @staticmethod def makeFromVTK3(fileName, units, time=0, forceVersion2=False): """ Create fields from a VTK unstructured grid file (``.vtu``, format version 3, or ``.vtp`` with *forceVersion2*); the mesh is shared between fields. ``vtk.vtkXMLGenericDataObjectReader`` is used to open the file (unless *forceVersion2* is set), but it is checked that contained dataset is a ``vtk.vtkUnstructuredGrid`` and an error is raised if not. .. note:: Units are not supported when loading from VTK, all fields will have ``None`` unit assigned. :param str fileName: VTK (``*.vtu``) file :param PhysicalUnit units: units of read values :param float time: time value for created fields (time is not saved in VTK3, thus cannot be recovered) :param bool forceVersion2: if ``True``, ``vtk.vtkGenericDataObjectReader`` (for VTK version 2) will be used to open the file, isntead of ``vtk.vtkXMLGenericDataObjectReader``; this also supposes *fileName* ends with ``.vtk`` (not checked, but may cause an error). :return: list of new :obj:`Field` instances :rtype: [Field,Field,...] """ import vtk from .dataID import FieldID # rr=vtk.vtkXMLUnstructuredGridReader() if forceVersion2 or fileName.endswith('.vtk'): rr = vtk.vtkGenericDataObjectReader() else: rr = vtk.vtkXMLGenericDataObjectReader() rr.SetFileName(fileName) rr.Update() ugrid = rr.GetOutput() if not isinstance(ugrid, vtk.vtkUnstructuredGrid): raise RuntimeError("vtkDataObject read from '%s' must be a vtkUnstructuredGrid (not a %s)" % ( fileName, ugrid.__class__.__name__)) # import sys # sys.stderr.write(str((ugrid,ugrid.__class__,vtk.vtkUnstructuredGrid))) # make mesh -- implemented separately mesh = Mesh.UnstructuredMesh.makeFromVtkUnstructuredGrid(ugrid) # fields which will be returned ret = [] # get cell and point data cd, pd = ugrid.GetCellData(), ugrid.GetPointData() for data, fieldType in (pd, FieldType.FT_vertexBased), (cd, FieldType.FT_cellBased): for idata in range(data.GetNumberOfArrays()): aname, arr = pd.GetArrayName(idata), pd.GetArray(idata) nt = arr.GetNumberOfTuples() if nt == 0: raise RuntimeError("Zero values in field '%s', unable to determine value type." % aname) t0 = arr.GetTuple(0) valueType = ValueType.fromNumberOfComponents(len(arr.GetTuple(0))) # this will raise KeyError if fieldID with that name not defined fid = FieldID[aname] # get actual values as tuples values = [arr.GetTuple(t) for t in range(nt)] ret.append(Field( mesh=mesh, fieldID=fid, units=units, # not stored at all time=time, # not stored either, set by caller valueType=valueType, values=values, fieldType=fieldType )) return ret def _sum(self, other, sign1, sign2): """ Should return a new instance. As deep copy is expensive, this operation should be avoided. Better to modify the field values. """ raise TypeError('Not supported') def inUnitsOf(self, *units): """ Should return a new instance. As deep copy is expensive, this operation should be avoided. Better to use convertToUnits method performing in place conversion. """ raise TypeError('Not supported') # def __deepcopy__(self, memo): # """ Deepcopy operatin modified not to include attributes starting with underscore. # These are supposed to be the ones valid only to s specific copy of the receiver. # An example of these attributes are _PyroURI (injected by Application), # where _PyroURI contains the URI of specific object, the copy should receive # its own URI # """ # cls = self.__class__ # dpcpy = cls.__new__(cls) # # memo[id(self)] = dpcpy # for attr in dir(self): # if not attr.startswith('_'): # value = getattr(self, attr) # setattr(dpcpy, attr, copy.deepcopy(value, memo)) # return dpcpy
lgpl-3.0
miloharper/neural-network-animation
matplotlib/tests/test_ticker.py
9
4261
from __future__ import (absolute_import, division, print_function, unicode_literals) import six import nose.tools from nose.tools import assert_raises from numpy.testing import assert_almost_equal import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.ticker as mticker from matplotlib.testing.decorators import cleanup def test_MaxNLocator(): loc = mticker.MaxNLocator(nbins=5) test_value = np.array([20., 40., 60., 80., 100.]) assert_almost_equal(loc.tick_values(20, 100), test_value) test_value = np.array([0., 0.0002, 0.0004, 0.0006, 0.0008, 0.001]) assert_almost_equal(loc.tick_values(0.001, 0.0001), test_value) test_value = np.array([-1.0e+15, -5.0e+14, 0e+00, 5e+14, 1.0e+15]) assert_almost_equal(loc.tick_values(-1e15, 1e15), test_value) def test_LinearLocator(): loc = mticker.LinearLocator(numticks=3) test_value = np.array([-0.8, -0.3, 0.2]) assert_almost_equal(loc.tick_values(-0.8, 0.2), test_value) def test_MultipleLocator(): loc = mticker.MultipleLocator(base=3.147) test_value = np.array([-9.441, -6.294, -3.147, 0., 3.147, 6.294, 9.441, 12.588]) assert_almost_equal(loc.tick_values(-7, 10), test_value) @cleanup def test_AutoMinorLocator(): fig, ax = plt.subplots() ax.set_xlim(0, 1.39) ax.minorticks_on() test_value = np.array([0.05, 0.1, 0.15, 0.25, 0.3, 0.35, 0.45, 0.5, 0.55, 0.65, 0.7, 0.75, 0.85, 0.9, 0.95, 1, 1.05, 1.1, 1.15, 1.25, 1.3, 1.35]) assert_almost_equal(ax.xaxis.get_ticklocs(minor=True), test_value) def test_LogLocator(): loc = mticker.LogLocator(numticks=5) assert_raises(ValueError, loc.tick_values, 0, 1000) test_value = np.array([1.00000000e-05, 1.00000000e-03, 1.00000000e-01, 1.00000000e+01, 1.00000000e+03, 1.00000000e+05, 1.00000000e+07, 1.000000000e+09]) assert_almost_equal(loc.tick_values(0.001, 1.1e5), test_value) loc = mticker.LogLocator(base=2) test_value = np.array([0.5, 1., 2., 4., 8., 16., 32., 64., 128., 256.]) assert_almost_equal(loc.tick_values(1, 100), test_value) def test_LogFormatterExponent(): class FakeAxis(object): """Allow Formatter to be called without having a "full" plot set up.""" def get_view_interval(self): return 1, 10 i = np.arange(-3, 4, dtype=float) expected_result = ['-3', '-2', '-1', '0', '1', '2', '3'] for base in [2, 5, 10, np.pi, np.e]: formatter = mticker.LogFormatterExponent(base=base) formatter.axis = FakeAxis() vals = base**i labels = [formatter(x, pos) for (x, pos) in zip(vals, i)] nose.tools.assert_equal(labels, expected_result) # Should be a blank string for non-integer powers if labelOnlyBase=True formatter = mticker.LogFormatterExponent(base=10, labelOnlyBase=True) formatter.axis = FakeAxis() nose.tools.assert_equal(formatter(10**0.1), '') # Otherwise, non-integer powers should be nicely formatted locs = np.array([0.1, 0.00001, np.pi, 0.2, -0.2, -0.00001]) i = range(len(locs)) expected_result = ['0.1', '1e-05', '3.14', '0.2', '-0.2', '-1e-05'] for base in [2, 5, 10, np.pi, np.e]: formatter = mticker.LogFormatterExponent(base, labelOnlyBase=False) formatter.axis = FakeAxis() vals = base**locs labels = [formatter(x, pos) for (x, pos) in zip(vals, i)] nose.tools.assert_equal(labels, expected_result) def test_use_offset(): for use_offset in [True, False]: with matplotlib.rc_context({'axes.formatter.useoffset': use_offset}): tmp_form = mticker.ScalarFormatter() nose.tools.assert_equal(use_offset, tmp_form.get_useOffset()) def test_formatstrformatter(): # test % style formatter tmp_form = mticker.FormatStrFormatter('%05d') nose.tools.assert_equal('00002', tmp_form(2)) # test str.format() style formatter tmp_form = mticker.StrMethodFormatter('{x:05d}') nose.tools.assert_equal('00002', tmp_form(2)) if __name__ == '__main__': import nose nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
mit
marionleborgne/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/contour.py
69
42063
""" These are classes to support contour plotting and labelling for the axes class """ from __future__ import division import warnings import matplotlib as mpl import numpy as np from numpy import ma import matplotlib._cntr as _cntr import matplotlib.path as path import matplotlib.ticker as ticker import matplotlib.cm as cm import matplotlib.colors as colors import matplotlib.collections as collections import matplotlib.font_manager as font_manager import matplotlib.text as text import matplotlib.cbook as cbook import matplotlib.mlab as mlab # Import needed for adding manual selection capability to clabel from matplotlib.blocking_input import BlockingContourLabeler # We can't use a single line collection for contour because a line # collection can have only a single line style, and we want to be able to have # dashed negative contours, for example, and solid positive contours. # We could use a single polygon collection for filled contours, but it # seems better to keep line and filled contours similar, with one collection # per level. class ContourLabeler: '''Mixin to provide labelling capability to ContourSet''' def clabel(self, *args, **kwargs): """ call signature:: clabel(cs, **kwargs) adds labels to line contours in *cs*, where *cs* is a :class:`~matplotlib.contour.ContourSet` object returned by contour. :: clabel(cs, v, **kwargs) only labels contours listed in *v*. Optional keyword arguments: *fontsize*: See http://matplotlib.sf.net/fonts.html *colors*: - if *None*, the color of each label matches the color of the corresponding contour - if one string color, e.g. *colors* = 'r' or *colors* = 'red', all labels will be plotted in this color - if a tuple of matplotlib color args (string, float, rgb, etc), different labels will be plotted in different colors in the order specified *inline*: controls whether the underlying contour is removed or not. Default is *True*. *inline_spacing*: space in pixels to leave on each side of label when placing inline. Defaults to 5. This spacing will be exact for labels at locations where the contour is straight, less so for labels on curved contours. *fmt*: a format string for the label. Default is '%1.3f' Alternatively, this can be a dictionary matching contour levels with arbitrary strings to use for each contour level (i.e., fmt[level]=string) *manual*: if *True*, contour labels will be placed manually using mouse clicks. Click the first button near a contour to add a label, click the second button (or potentially both mouse buttons at once) to finish adding labels. The third button can be used to remove the last label added, but only if labels are not inline. Alternatively, the keyboard can be used to select label locations (enter to end label placement, delete or backspace act like the third mouse button, and any other key will select a label location). .. plot:: mpl_examples/pylab_examples/contour_demo.py """ """ NOTES on how this all works: clabel basically takes the input arguments and uses them to add a list of "label specific" attributes to the ContourSet object. These attributes are all of the form label* and names should be fairly self explanatory. Once these attributes are set, clabel passes control to the labels method (case of automatic label placement) or BlockingContourLabeler (case of manual label placement). """ fontsize = kwargs.get('fontsize', None) inline = kwargs.get('inline', 1) inline_spacing = kwargs.get('inline_spacing', 5) self.labelFmt = kwargs.get('fmt', '%1.3f') _colors = kwargs.get('colors', None) # Detect if manual selection is desired and remove from argument list self.labelManual=kwargs.get('manual',False) if len(args) == 0: levels = self.levels indices = range(len(self.levels)) elif len(args) == 1: levlabs = list(args[0]) indices, levels = [], [] for i, lev in enumerate(self.levels): if lev in levlabs: indices.append(i) levels.append(lev) if len(levels) < len(levlabs): msg = "Specified levels " + str(levlabs) msg += "\n don't match available levels " msg += str(self.levels) raise ValueError(msg) else: raise TypeError("Illegal arguments to clabel, see help(clabel)") self.labelLevelList = levels self.labelIndiceList = indices self.labelFontProps = font_manager.FontProperties() if fontsize == None: font_size = int(self.labelFontProps.get_size_in_points()) else: if type(fontsize) not in [int, float, str]: raise TypeError("Font size must be an integer number.") # Can't it be floating point, as indicated in line above? else: if type(fontsize) == str: font_size = int(self.labelFontProps.get_size_in_points()) else: self.labelFontProps.set_size(fontsize) font_size = fontsize self.labelFontSizeList = [font_size] * len(levels) if _colors == None: self.labelMappable = self self.labelCValueList = np.take(self.cvalues, self.labelIndiceList) else: cmap = colors.ListedColormap(_colors, N=len(self.labelLevelList)) self.labelCValueList = range(len(self.labelLevelList)) self.labelMappable = cm.ScalarMappable(cmap = cmap, norm = colors.NoNorm()) #self.labelTexts = [] # Initialized in ContourSet.__init__ #self.labelCValues = [] # same self.labelXYs = [] if self.labelManual: print 'Select label locations manually using first mouse button.' print 'End manual selection with second mouse button.' if not inline: print 'Remove last label by clicking third mouse button.' blocking_contour_labeler = BlockingContourLabeler(self) blocking_contour_labeler(inline,inline_spacing) else: self.labels(inline,inline_spacing) # Hold on to some old attribute names. These are depricated and will # be removed in the near future (sometime after 2008-08-01), but keeping # for now for backwards compatibility self.cl = self.labelTexts self.cl_xy = self.labelXYs self.cl_cvalues = self.labelCValues self.labelTextsList = cbook.silent_list('text.Text', self.labelTexts) return self.labelTextsList def print_label(self, linecontour,labelwidth): "if contours are too short, don't plot a label" lcsize = len(linecontour) if lcsize > 10 * labelwidth: return 1 xmax = np.amax(linecontour[:,0]) xmin = np.amin(linecontour[:,0]) ymax = np.amax(linecontour[:,1]) ymin = np.amin(linecontour[:,1]) lw = labelwidth if (xmax - xmin) > 1.2* lw or (ymax - ymin) > 1.2 * lw: return 1 else: return 0 def too_close(self, x,y, lw): "if there's a label already nearby, find a better place" if self.labelXYs != []: dist = [np.sqrt((x-loc[0]) ** 2 + (y-loc[1]) ** 2) for loc in self.labelXYs] for d in dist: if d < 1.2*lw: return 1 else: return 0 else: return 0 def get_label_coords(self, distances, XX, YY, ysize, lw): """ labels are ploted at a location with the smallest dispersion of the contour from a straight line unless there's another label nearby, in which case the second best place on the contour is picked up if there's no good place a label isplotted at the beginning of the contour """ hysize = int(ysize/2) adist = np.argsort(distances) for ind in adist: x, y = XX[ind][hysize], YY[ind][hysize] if self.too_close(x,y, lw): continue else: return x,y, ind ind = adist[0] x, y = XX[ind][hysize], YY[ind][hysize] return x,y, ind def get_label_width(self, lev, fmt, fsize): "get the width of the label in points" if cbook.is_string_like(lev): lw = (len(lev)) * fsize else: lw = (len(self.get_text(lev,fmt))) * fsize return lw def get_real_label_width( self, lev, fmt, fsize ): """ This computes actual onscreen label width. This uses some black magic to determine onscreen extent of non-drawn label. This magic may not be very robust. """ # Find middle of axes xx = np.mean( np.asarray(self.ax.axis()).reshape(2,2), axis=1 ) # Temporarily create text object t = text.Text( xx[0], xx[1] ) self.set_label_props( t, self.get_text(lev,fmt), 'k' ) # Some black magic to get onscreen extent # NOTE: This will only work for already drawn figures, as the canvas # does not have a renderer otherwise. This is the reason this function # can't be integrated into the rest of the code. bbox = t.get_window_extent(renderer=self.ax.figure.canvas.renderer) # difference in pixel extent of image lw = np.diff(bbox.corners()[0::2,0])[0] return lw def set_label_props(self, label, text, color): "set the label properties - color, fontsize, text" label.set_text(text) label.set_color(color) label.set_fontproperties(self.labelFontProps) label.set_clip_box(self.ax.bbox) def get_text(self, lev, fmt): "get the text of the label" if cbook.is_string_like(lev): return lev else: if isinstance(fmt,dict): return fmt[lev] else: return fmt%lev def locate_label(self, linecontour, labelwidth): """find a good place to plot a label (relatively flat part of the contour) and the angle of rotation for the text object """ nsize= len(linecontour) if labelwidth > 1: xsize = int(np.ceil(nsize/labelwidth)) else: xsize = 1 if xsize == 1: ysize = nsize else: ysize = labelwidth XX = np.resize(linecontour[:,0],(xsize, ysize)) YY = np.resize(linecontour[:,1],(xsize, ysize)) #I might have fouled up the following: yfirst = YY[:,0].reshape(xsize, 1) ylast = YY[:,-1].reshape(xsize, 1) xfirst = XX[:,0].reshape(xsize, 1) xlast = XX[:,-1].reshape(xsize, 1) s = (yfirst-YY) * (xlast-xfirst) - (xfirst-XX) * (ylast-yfirst) L = np.sqrt((xlast-xfirst)**2+(ylast-yfirst)**2).ravel() dist = np.add.reduce(([(abs(s)[i]/L[i]) for i in range(xsize)]),-1) x,y,ind = self.get_label_coords(dist, XX, YY, ysize, labelwidth) #print 'ind, x, y', ind, x, y # There must be a more efficient way... lc = [tuple(l) for l in linecontour] dind = lc.index((x,y)) #print 'dind', dind #dind = list(linecontour).index((x,y)) return x, y, dind def calc_label_rot_and_inline( self, slc, ind, lw, lc=None, spacing=5 ): """ This function calculates the appropriate label rotation given the linecontour coordinates in screen units, the index of the label location and the label width. It will also break contour and calculate inlining if *lc* is not empty (lc defaults to the empty list if None). *spacing* is the space around the label in pixels to leave empty. Do both of these tasks at once to avoid calling mlab.path_length multiple times, which is relatively costly. The method used here involves calculating the path length along the contour in pixel coordinates and then looking approximately label width / 2 away from central point to determine rotation and then to break contour if desired. """ if lc is None: lc = [] # Half the label width hlw = lw/2.0 # Check if closed and, if so, rotate contour so label is at edge closed = mlab.is_closed_polygon(slc) if closed: slc = np.r_[ slc[ind:-1], slc[:ind+1] ] if len(lc): # Rotate lc also if not empty lc = np.r_[ lc[ind:-1], lc[:ind+1] ] ind = 0 # Path length in pixel space pl = mlab.path_length(slc) pl = pl-pl[ind] # Use linear interpolation to get points around label xi = np.array( [ -hlw, hlw ] ) if closed: # Look at end also for closed contours dp = np.array([pl[-1],0]) else: dp = np.zeros_like(xi) ll = mlab.less_simple_linear_interpolation( pl, slc, dp+xi, extrap=True ) # get vector in pixel space coordinates from one point to other dd = np.diff( ll, axis=0 ).ravel() # Get angle of vector - must be calculated in pixel space for # text rotation to work correctly if np.all(dd==0): # Must deal with case of zero length label rotation = 0.0 else: rotation = np.arctan2(dd[1], dd[0]) * 180.0 / np.pi # Fix angle so text is never upside-down if rotation > 90: rotation = rotation - 180.0 if rotation < -90: rotation = 180.0 + rotation # Break contour if desired nlc = [] if len(lc): # Expand range by spacing xi = dp + xi + np.array([-spacing,spacing]) # Get indices near points of interest I = mlab.less_simple_linear_interpolation( pl, np.arange(len(pl)), xi, extrap=False ) # If those indices aren't beyond contour edge, find x,y if (not np.isnan(I[0])) and int(I[0])<>I[0]: xy1 = mlab.less_simple_linear_interpolation( pl, lc, [ xi[0] ] ) if (not np.isnan(I[1])) and int(I[1])<>I[1]: xy2 = mlab.less_simple_linear_interpolation( pl, lc, [ xi[1] ] ) # Make integer I = [ np.floor(I[0]), np.ceil(I[1]) ] # Actually break contours if closed: # This will remove contour if shorter than label if np.all(~np.isnan(I)): nlc.append( np.r_[ xy2, lc[I[1]:I[0]+1], xy1 ] ) else: # These will remove pieces of contour if they have length zero if not np.isnan(I[0]): nlc.append( np.r_[ lc[:I[0]+1], xy1 ] ) if not np.isnan(I[1]): nlc.append( np.r_[ xy2, lc[I[1]:] ] ) # The current implementation removes contours completely # covered by labels. Uncomment line below to keep # original contour if this is the preferred behavoir. #if not len(nlc): nlc = [ lc ] return (rotation,nlc) def add_label(self,x,y,rotation,lev,cvalue): dx,dy = self.ax.transData.inverted().transform_point((x,y)) t = text.Text(dx, dy, rotation = rotation, horizontalalignment='center', verticalalignment='center') color = self.labelMappable.to_rgba(cvalue,alpha=self.alpha) _text = self.get_text(lev,self.labelFmt) self.set_label_props(t, _text, color) self.labelTexts.append(t) self.labelCValues.append(cvalue) self.labelXYs.append((x,y)) # Add label to plot here - useful for manual mode label selection self.ax.add_artist(t) def pop_label(self,index=-1): '''Defaults to removing last label, but any index can be supplied''' self.labelCValues.pop(index) t = self.labelTexts.pop(index) t.remove() def labels(self, inline, inline_spacing): trans = self.ax.transData # A bit of shorthand for icon, lev, fsize, cvalue in zip( self.labelIndiceList, self.labelLevelList, self.labelFontSizeList, self.labelCValueList ): con = self.collections[icon] lw = self.get_label_width(lev, self.labelFmt, fsize) additions = [] paths = con.get_paths() for segNum, linepath in enumerate(paths): lc = linepath.vertices # Line contour slc0 = trans.transform(lc) # Line contour in screen coords # For closed polygons, add extra point to avoid division by # zero in print_label and locate_label. Other than these # functions, this is not necessary and should probably be # eventually removed. if mlab.is_closed_polygon( lc ): slc = np.r_[ slc0, slc0[1:2,:] ] else: slc = slc0 if self.print_label(slc,lw): # Check if long enough for a label x,y,ind = self.locate_label(slc, lw) if inline: lcarg = lc else: lcarg = None rotation,new=self.calc_label_rot_and_inline( slc0, ind, lw, lcarg, inline_spacing ) # Actually add the label self.add_label(x,y,rotation,lev,cvalue) # If inline, add new contours if inline: for n in new: # Add path if not empty or single point if len(n)>1: additions.append( path.Path(n) ) else: # If not adding label, keep old path additions.append(linepath) # After looping over all segments on a contour, remove old # paths and add new ones if inlining if inline: del paths[:] paths.extend(additions) class ContourSet(cm.ScalarMappable, ContourLabeler): """ Create and store a set of contour lines or filled regions. User-callable method: clabel Useful attributes: ax: the axes object in which the contours are drawn collections: a silent_list of LineCollections or PolyCollections levels: contour levels layers: same as levels for line contours; half-way between levels for filled contours. See _process_colors method. """ def __init__(self, ax, *args, **kwargs): """ Draw contour lines or filled regions, depending on whether keyword arg 'filled' is False (default) or True. The first argument of the initializer must be an axes object. The remaining arguments and keyword arguments are described in ContourSet.contour_doc. """ self.ax = ax self.levels = kwargs.get('levels', None) self.filled = kwargs.get('filled', False) self.linewidths = kwargs.get('linewidths', None) self.linestyles = kwargs.get('linestyles', 'solid') self.alpha = kwargs.get('alpha', 1.0) self.origin = kwargs.get('origin', None) self.extent = kwargs.get('extent', None) cmap = kwargs.get('cmap', None) self.colors = kwargs.get('colors', None) norm = kwargs.get('norm', None) self.extend = kwargs.get('extend', 'neither') self.antialiased = kwargs.get('antialiased', True) self.nchunk = kwargs.get('nchunk', 0) self.locator = kwargs.get('locator', None) if (isinstance(norm, colors.LogNorm) or isinstance(self.locator, ticker.LogLocator)): self.logscale = True if norm is None: norm = colors.LogNorm() if self.extend is not 'neither': raise ValueError('extend kwarg does not work yet with log scale') else: self.logscale = False if self.origin is not None: assert(self.origin in ['lower', 'upper', 'image']) if self.extent is not None: assert(len(self.extent) == 4) if cmap is not None: assert(isinstance(cmap, colors.Colormap)) if self.colors is not None and cmap is not None: raise ValueError('Either colors or cmap must be None') if self.origin == 'image': self.origin = mpl.rcParams['image.origin'] x, y, z = self._contour_args(*args) # also sets self.levels, # self.layers if self.colors is not None: cmap = colors.ListedColormap(self.colors, N=len(self.layers)) if self.filled: self.collections = cbook.silent_list('collections.PolyCollection') else: self.collections = cbook.silent_list('collections.LineCollection') # label lists must be initialized here self.labelTexts = [] self.labelCValues = [] kw = {'cmap': cmap} if norm is not None: kw['norm'] = norm cm.ScalarMappable.__init__(self, **kw) # sets self.cmap; self._process_colors() _mask = ma.getmask(z) if _mask is ma.nomask: _mask = None if self.filled: if self.linewidths is not None: warnings.warn('linewidths is ignored by contourf') C = _cntr.Cntr(x, y, z.filled(), _mask) lowers = self._levels[:-1] uppers = self._levels[1:] for level, level_upper in zip(lowers, uppers): nlist = C.trace(level, level_upper, points = 0, nchunk = self.nchunk) col = collections.PolyCollection(nlist, antialiaseds = (self.antialiased,), edgecolors= 'none', alpha=self.alpha) self.ax.add_collection(col) self.collections.append(col) else: tlinewidths = self._process_linewidths() self.tlinewidths = tlinewidths tlinestyles = self._process_linestyles() C = _cntr.Cntr(x, y, z.filled(), _mask) for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles): nlist = C.trace(level, points = 0) col = collections.LineCollection(nlist, linewidths = width, linestyle = lstyle, alpha=self.alpha) if level < 0.0 and self.monochrome: ls = mpl.rcParams['contour.negative_linestyle'] col.set_linestyle(ls) col.set_label('_nolegend_') self.ax.add_collection(col, False) self.collections.append(col) self.changed() # set the colors x0 = ma.minimum(x) x1 = ma.maximum(x) y0 = ma.minimum(y) y1 = ma.maximum(y) self.ax.update_datalim([(x0,y0), (x1,y1)]) self.ax.autoscale_view() def changed(self): tcolors = [ (tuple(rgba),) for rgba in self.to_rgba(self.cvalues, alpha=self.alpha)] self.tcolors = tcolors for color, collection in zip(tcolors, self.collections): collection.set_alpha(self.alpha) collection.set_color(color) for label, cv in zip(self.labelTexts, self.labelCValues): label.set_alpha(self.alpha) label.set_color(self.labelMappable.to_rgba(cv)) # add label colors cm.ScalarMappable.changed(self) def _autolev(self, z, N): ''' Select contour levels to span the data. We need two more levels for filled contours than for line contours, because for the latter we need to specify the lower and upper boundary of each range. For example, a single contour boundary, say at z = 0, requires only one contour line, but two filled regions, and therefore three levels to provide boundaries for both regions. ''' if self.locator is None: if self.logscale: self.locator = ticker.LogLocator() else: self.locator = ticker.MaxNLocator(N+1) self.locator.create_dummy_axis() zmax = self.zmax zmin = self.zmin self.locator.set_bounds(zmin, zmax) lev = self.locator() zmargin = (zmax - zmin) * 0.000001 # so z < (zmax + zmargin) if zmax >= lev[-1]: lev[-1] += zmargin if zmin <= lev[0]: if self.logscale: lev[0] = 0.99 * zmin else: lev[0] -= zmargin self._auto = True if self.filled: return lev return lev[1:-1] def _initialize_x_y(self, z): ''' Return X, Y arrays such that contour(Z) will match imshow(Z) if origin is not None. The center of pixel Z[i,j] depends on origin: if origin is None, x = j, y = i; if origin is 'lower', x = j + 0.5, y = i + 0.5; if origin is 'upper', x = j + 0.5, y = Nrows - i - 0.5 If extent is not None, x and y will be scaled to match, as in imshow. If origin is None and extent is not None, then extent will give the minimum and maximum values of x and y. ''' if z.ndim != 2: raise TypeError("Input must be a 2D array.") else: Ny, Nx = z.shape if self.origin is None: # Not for image-matching. if self.extent is None: return np.meshgrid(np.arange(Nx), np.arange(Ny)) else: x0,x1,y0,y1 = self.extent x = np.linspace(x0, x1, Nx) y = np.linspace(y0, y1, Ny) return np.meshgrid(x, y) # Match image behavior: if self.extent is None: x0,x1,y0,y1 = (0, Nx, 0, Ny) else: x0,x1,y0,y1 = self.extent dx = float(x1 - x0)/Nx dy = float(y1 - y0)/Ny x = x0 + (np.arange(Nx) + 0.5) * dx y = y0 + (np.arange(Ny) + 0.5) * dy if self.origin == 'upper': y = y[::-1] return np.meshgrid(x,y) def _check_xyz(self, args): ''' For functions like contour, check that the dimensions of the input arrays match; if x and y are 1D, convert them to 2D using meshgrid. Possible change: I think we should make and use an ArgumentError Exception class (here and elsewhere). ''' # We can strip away the x and y units x = self.ax.convert_xunits( args[0] ) y = self.ax.convert_yunits( args[1] ) x = np.asarray(x, dtype=np.float64) y = np.asarray(y, dtype=np.float64) z = ma.asarray(args[2], dtype=np.float64) if z.ndim != 2: raise TypeError("Input z must be a 2D array.") else: Ny, Nx = z.shape if x.shape == z.shape and y.shape == z.shape: return x,y,z if x.ndim != 1 or y.ndim != 1: raise TypeError("Inputs x and y must be 1D or 2D.") nx, = x.shape ny, = y.shape if nx != Nx or ny != Ny: raise TypeError("Length of x must be number of columns in z,\n" + "and length of y must be number of rows.") x,y = np.meshgrid(x,y) return x,y,z def _contour_args(self, *args): if self.filled: fn = 'contourf' else: fn = 'contour' Nargs = len(args) if Nargs <= 2: z = ma.asarray(args[0], dtype=np.float64) x, y = self._initialize_x_y(z) elif Nargs <=4: x,y,z = self._check_xyz(args[:3]) else: raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn)) self.zmax = ma.maximum(z) self.zmin = ma.minimum(z) if self.logscale and self.zmin <= 0: z = ma.masked_where(z <= 0, z) warnings.warn('Log scale: values of z <=0 have been masked') self.zmin = z.min() self._auto = False if self.levels is None: if Nargs == 1 or Nargs == 3: lev = self._autolev(z, 7) else: # 2 or 4 args level_arg = args[-1] try: if type(level_arg) == int: lev = self._autolev(z, level_arg) else: lev = np.asarray(level_arg).astype(np.float64) except: raise TypeError( "Last %s arg must give levels; see help(%s)" % (fn,fn)) if self.filled and len(lev) < 2: raise ValueError("Filled contours require at least 2 levels.") # Workaround for cntr.c bug wrt masked interior regions: #if filled: # z = ma.masked_array(z.filled(-1e38)) # It's not clear this is any better than the original bug. self.levels = lev #if self._auto and self.extend in ('both', 'min', 'max'): # raise TypeError("Auto level selection is inconsistent " # + "with use of 'extend' kwarg") self._levels = list(self.levels) if self.extend in ('both', 'min'): self._levels.insert(0, min(self.levels[0],self.zmin) - 1) if self.extend in ('both', 'max'): self._levels.append(max(self.levels[-1],self.zmax) + 1) self._levels = np.asarray(self._levels) self.vmin = np.amin(self.levels) # alternative would be self.layers self.vmax = np.amax(self.levels) if self.extend in ('both', 'min'): self.vmin = 2 * self.levels[0] - self.levels[1] if self.extend in ('both', 'max'): self.vmax = 2 * self.levels[-1] - self.levels[-2] self.layers = self._levels # contour: a line is a thin layer if self.filled: self.layers = 0.5 * (self._levels[:-1] + self._levels[1:]) if self.extend in ('both', 'min'): self.layers[0] = 0.5 * (self.vmin + self._levels[1]) if self.extend in ('both', 'max'): self.layers[-1] = 0.5 * (self.vmax + self._levels[-2]) return (x, y, z) def _process_colors(self): """ Color argument processing for contouring. Note that we base the color mapping on the contour levels, not on the actual range of the Z values. This means we don't have to worry about bad values in Z, and we always have the full dynamic range available for the selected levels. The color is based on the midpoint of the layer, except for an extended end layers. """ self.monochrome = self.cmap.monochrome if self.colors is not None: i0, i1 = 0, len(self.layers) if self.extend in ('both', 'min'): i0 = -1 if self.extend in ('both', 'max'): i1 = i1 + 1 self.cvalues = range(i0, i1) self.set_norm(colors.NoNorm()) else: self.cvalues = self.layers if not self.norm.scaled(): self.set_clim(self.vmin, self.vmax) if self.extend in ('both', 'max', 'min'): self.norm.clip = False self.set_array(self.layers) # self.tcolors are set by the "changed" method def _process_linewidths(self): linewidths = self.linewidths Nlev = len(self.levels) if linewidths is None: tlinewidths = [(mpl.rcParams['lines.linewidth'],)] *Nlev else: if cbook.iterable(linewidths) and len(linewidths) < Nlev: linewidths = list(linewidths) * int(np.ceil(Nlev/len(linewidths))) elif not cbook.iterable(linewidths) and type(linewidths) in [int, float]: linewidths = [linewidths] * Nlev tlinewidths = [(w,) for w in linewidths] return tlinewidths def _process_linestyles(self): linestyles = self.linestyles Nlev = len(self.levels) if linestyles is None: tlinestyles = ['solid'] * Nlev else: if cbook.is_string_like(linestyles): tlinestyles = [linestyles] * Nlev elif cbook.iterable(linestyles) and len(linestyles) <= Nlev: tlinestyles = list(linestyles) * int(np.ceil(Nlev/len(linestyles))) return tlinestyles def get_alpha(self): '''returns alpha to be applied to all ContourSet artists''' return self.alpha def set_alpha(self, alpha): '''sets alpha for all ContourSet artists''' self.alpha = alpha self.changed() contour_doc = """ :func:`~matplotlib.pyplot.contour` and :func:`~matplotlib.pyplot.contourf` draw contour lines and filled contours, respectively. Except as noted, function signatures and return values are the same for both versions. :func:`~matplotlib.pyplot.contourf` differs from the Matlab (TM) version in that it does not draw the polygon edges, because the contouring engine yields simply connected regions with branch cuts. To draw the edges, add line contours with calls to :func:`~matplotlib.pyplot.contour`. call signatures:: contour(Z) make a contour plot of an array *Z*. The level values are chosen automatically. :: contour(X,Y,Z) *X*, *Y* specify the (*x*, *y*) coordinates of the surface :: contour(Z,N) contour(X,Y,Z,N) contour *N* automatically-chosen levels. :: contour(Z,V) contour(X,Y,Z,V) draw contour lines at the values specified in sequence *V* :: contourf(..., V) fill the (len(*V*)-1) regions between the values in *V* :: contour(Z, **kwargs) Use keyword args to control colors, linewidth, origin, cmap ... see below for more details. *X*, *Y*, and *Z* must be arrays with the same dimensions. *Z* may be a masked array, but filled contouring may not handle internal masked regions correctly. ``C = contour(...)`` returns a :class:`~matplotlib.contour.ContourSet` object. Optional keyword arguments: *colors*: [ None | string | (mpl_colors) ] If *None*, the colormap specified by cmap will be used. If a string, like 'r' or 'red', all levels will be plotted in this color. If a tuple of matplotlib color args (string, float, rgb, etc), different levels will be plotted in different colors in the order specified. *alpha*: float The alpha blending value *cmap*: [ None | Colormap ] A cm :class:`~matplotlib.cm.Colormap` instance or *None*. If *cmap* is *None* and *colors* is *None*, a default Colormap is used. *norm*: [ None | Normalize ] A :class:`matplotlib.colors.Normalize` instance for scaling data values to colors. If *norm* is *None* and *colors* is *None*, the default linear scaling is used. *origin*: [ None | 'upper' | 'lower' | 'image' ] If *None*, the first value of *Z* will correspond to the lower left corner, location (0,0). If 'image', the rc value for ``image.origin`` will be used. This keyword is not active if *X* and *Y* are specified in the call to contour. *extent*: [ None | (x0,x1,y0,y1) ] If *origin* is not *None*, then *extent* is interpreted as in :func:`matplotlib.pyplot.imshow`: it gives the outer pixel boundaries. In this case, the position of Z[0,0] is the center of the pixel, not a corner. If *origin* is *None*, then (*x0*, *y0*) is the position of Z[0,0], and (*x1*, *y1*) is the position of Z[-1,-1]. This keyword is not active if *X* and *Y* are specified in the call to contour. *locator*: [ None | ticker.Locator subclass ] If *locator* is None, the default :class:`~matplotlib.ticker.MaxNLocator` is used. The locator is used to determine the contour levels if they are not given explicitly via the *V* argument. *extend*: [ 'neither' | 'both' | 'min' | 'max' ] Unless this is 'neither', contour levels are automatically added to one or both ends of the range so that all data are included. These added ranges are then mapped to the special colormap values which default to the ends of the colormap range, but can be set via :meth:`matplotlib.cm.Colormap.set_under` and :meth:`matplotlib.cm.Colormap.set_over` methods. contour-only keyword arguments: *linewidths*: [ None | number | tuple of numbers ] If *linewidths* is *None*, the default width in ``lines.linewidth`` in ``matplotlibrc`` is used. If a number, all levels will be plotted with this linewidth. If a tuple, different levels will be plotted with different linewidths in the order specified *linestyles*: [None | 'solid' | 'dashed' | 'dashdot' | 'dotted' ] If *linestyles* is *None*, the 'solid' is used. *linestyles* can also be an iterable of the above strings specifying a set of linestyles to be used. If this iterable is shorter than the number of contour levels it will be repeated as necessary. If contour is using a monochrome colormap and the contour level is less than 0, then the linestyle specified in ``contour.negative_linestyle`` in ``matplotlibrc`` will be used. contourf-only keyword arguments: *antialiased*: [ True | False ] enable antialiasing *nchunk*: [ 0 | integer ] If 0, no subdivision of the domain. Specify a positive integer to divide the domain into subdomains of roughly *nchunk* by *nchunk* points. This may never actually be advantageous, so this option may be removed. Chunking introduces artifacts at the chunk boundaries unless *antialiased* is *False*. **Example:** .. plot:: mpl_examples/pylab_examples/contour_demo.py """ def find_nearest_contour( self, x, y, indices=None, pixel=True ): """ Finds contour that is closest to a point. Defaults to measuring distance in pixels (screen space - useful for manual contour labeling), but this can be controlled via a keyword argument. Returns a tuple containing the contour, segment, index of segment, x & y of segment point and distance to minimum point. Call signature:: conmin,segmin,imin,xmin,ymin,dmin = find_nearest_contour( self, x, y, indices=None, pixel=True ) Optional keyword arguments:: *indices*: Indexes of contour levels to consider when looking for nearest point. Defaults to using all levels. *pixel*: If *True*, measure distance in pixel space, if not, measure distance in axes space. Defaults to *True*. """ # This function uses a method that is probably quite # inefficient based on converting each contour segment to # pixel coordinates and then comparing the given point to # those coordinates for each contour. This will probably be # quite slow for complex contours, but for normal use it works # sufficiently well that the time is not noticeable. # Nonetheless, improvements could probably be made. if indices==None: indices = range(len(self.levels)) dmin = 1e10 conmin = None segmin = None xmin = None ymin = None for icon in indices: con = self.collections[icon] paths = con.get_paths() for segNum, linepath in enumerate(paths): lc = linepath.vertices # transfer all data points to screen coordinates if desired if pixel: lc = self.ax.transData.transform(lc) ds = (lc[:,0]-x)**2 + (lc[:,1]-y)**2 d = min( ds ) if d < dmin: dmin = d conmin = icon segmin = segNum imin = mpl.mlab.find( ds == d )[0] xmin = lc[imin,0] ymin = lc[imin,1] return (conmin,segmin,imin,xmin,ymin,dmin)
agpl-3.0
numenta/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/colorbar.py
69
27260
''' Colorbar toolkit with two classes and a function: :class:`ColorbarBase` the base class with full colorbar drawing functionality. It can be used as-is to make a colorbar for a given colormap; a mappable object (e.g., image) is not needed. :class:`Colorbar` the derived class for use with images or contour plots. :func:`make_axes` a function for resizing an axes and adding a second axes suitable for a colorbar The :meth:`~matplotlib.figure.Figure.colorbar` method uses :func:`make_axes` and :class:`Colorbar`; the :func:`~matplotlib.pyplot.colorbar` function is a thin wrapper over :meth:`~matplotlib.figure.Figure.colorbar`. ''' import numpy as np import matplotlib as mpl import matplotlib.colors as colors import matplotlib.cm as cm import matplotlib.ticker as ticker import matplotlib.cbook as cbook import matplotlib.lines as lines import matplotlib.patches as patches import matplotlib.collections as collections import matplotlib.contour as contour make_axes_kw_doc = ''' ========== ==================================================== Property Description ========== ==================================================== *fraction* 0.15; fraction of original axes to use for colorbar *pad* 0.05 if vertical, 0.15 if horizontal; fraction of original axes between colorbar and new image axes *shrink* 1.0; fraction by which to shrink the colorbar *aspect* 20; ratio of long to short dimensions ========== ==================================================== ''' colormap_kw_doc = ''' =========== ==================================================== Property Description =========== ==================================================== *extend* [ 'neither' | 'both' | 'min' | 'max' ] If not 'neither', make pointed end(s) for out-of- range values. These are set for a given colormap using the colormap set_under and set_over methods. *spacing* [ 'uniform' | 'proportional' ] Uniform spacing gives each discrete color the same space; proportional makes the space proportional to the data interval. *ticks* [ None | list of ticks | Locator object ] If None, ticks are determined automatically from the input. *format* [ None | format string | Formatter object ] If None, the :class:`~matplotlib.ticker.ScalarFormatter` is used. If a format string is given, e.g. '%.3f', that is used. An alternative :class:`~matplotlib.ticker.Formatter` object may be given instead. *drawedges* [ False | True ] If true, draw lines at color boundaries. =========== ==================================================== The following will probably be useful only in the context of indexed colors (that is, when the mappable has norm=NoNorm()), or other unusual circumstances. ============ =================================================== Property Description ============ =================================================== *boundaries* None or a sequence *values* None or a sequence which must be of length 1 less than the sequence of *boundaries*. For each region delimited by adjacent entries in *boundaries*, the color mapped to the corresponding value in values will be used. ============ =================================================== ''' colorbar_doc = ''' Add a colorbar to a plot. Function signatures for the :mod:`~matplotlib.pyplot` interface; all but the first are also method signatures for the :meth:`~matplotlib.figure.Figure.colorbar` method:: colorbar(**kwargs) colorbar(mappable, **kwargs) colorbar(mappable, cax=cax, **kwargs) colorbar(mappable, ax=ax, **kwargs) arguments: *mappable* the :class:`~matplotlib.image.Image`, :class:`~matplotlib.contour.ContourSet`, etc. to which the colorbar applies; this argument is mandatory for the :meth:`~matplotlib.figure.Figure.colorbar` method but optional for the :func:`~matplotlib.pyplot.colorbar` function, which sets the default to the current image. keyword arguments: *cax* None | axes object into which the colorbar will be drawn *ax* None | parent axes object from which space for a new colorbar axes will be stolen Additional keyword arguments are of two kinds: axes properties: %s colorbar properties: %s If *mappable* is a :class:`~matplotlib.contours.ContourSet`, its *extend* kwarg is included automatically. Note that the *shrink* kwarg provides a simple way to keep a vertical colorbar, for example, from being taller than the axes of the mappable to which the colorbar is attached; but it is a manual method requiring some trial and error. If the colorbar is too tall (or a horizontal colorbar is too wide) use a smaller value of *shrink*. For more precise control, you can manually specify the positions of the axes objects in which the mappable and the colorbar are drawn. In this case, do not use any of the axes properties kwargs. returns: :class:`~matplotlib.colorbar.Colorbar` instance; see also its base class, :class:`~matplotlib.colorbar.ColorbarBase`. Call the :meth:`~matplotlib.colorbar.ColorbarBase.set_label` method to label the colorbar. ''' % (make_axes_kw_doc, colormap_kw_doc) class ColorbarBase(cm.ScalarMappable): ''' Draw a colorbar in an existing axes. This is a base class for the :class:`Colorbar` class, which is the basis for the :func:`~matplotlib.pyplot.colorbar` method and pylab function. It is also useful by itself for showing a colormap. If the *cmap* kwarg is given but *boundaries* and *values* are left as None, then the colormap will be displayed on a 0-1 scale. To show the under- and over-value colors, specify the *norm* as:: colors.Normalize(clip=False) To show the colors versus index instead of on the 0-1 scale, use:: norm=colors.NoNorm. Useful attributes: :attr:`ax` the Axes instance in which the colorbar is drawn :attr:`lines` a LineCollection if lines were drawn, otherwise None :attr:`dividers` a LineCollection if *drawedges* is True, otherwise None Useful public methods are :meth:`set_label` and :meth:`add_lines`. ''' _slice_dict = {'neither': slice(0,1000000), 'both': slice(1,-1), 'min': slice(1,1000000), 'max': slice(0,-1)} def __init__(self, ax, cmap=None, norm=None, alpha=1.0, values=None, boundaries=None, orientation='vertical', extend='neither', spacing='uniform', # uniform or proportional ticks=None, format=None, drawedges=False, filled=True, ): self.ax = ax if cmap is None: cmap = cm.get_cmap() if norm is None: norm = colors.Normalize() self.alpha = alpha cm.ScalarMappable.__init__(self, cmap=cmap, norm=norm) self.values = values self.boundaries = boundaries self.extend = extend self._inside = self._slice_dict[extend] self.spacing = spacing self.orientation = orientation self.drawedges = drawedges self.filled = filled self.solids = None self.lines = None self.dividers = None self.set_label('') if cbook.iterable(ticks): self.locator = ticker.FixedLocator(ticks, nbins=len(ticks)) else: self.locator = ticks # Handle default in _ticker() if format is None: if isinstance(self.norm, colors.LogNorm): self.formatter = ticker.LogFormatter() else: self.formatter = ticker.ScalarFormatter() elif cbook.is_string_like(format): self.formatter = ticker.FormatStrFormatter(format) else: self.formatter = format # Assume it is a Formatter # The rest is in a method so we can recalculate when clim changes. self.draw_all() def draw_all(self): ''' Calculate any free parameters based on the current cmap and norm, and do all the drawing. ''' self._process_values() self._find_range() X, Y = self._mesh() C = self._values[:,np.newaxis] self._config_axes(X, Y) if self.filled: self._add_solids(X, Y, C) self._set_label() def _config_axes(self, X, Y): ''' Make an axes patch and outline. ''' ax = self.ax ax.set_frame_on(False) ax.set_navigate(False) xy = self._outline(X, Y) ax.update_datalim(xy) ax.set_xlim(*ax.dataLim.intervalx) ax.set_ylim(*ax.dataLim.intervaly) self.outline = lines.Line2D(xy[:, 0], xy[:, 1], color=mpl.rcParams['axes.edgecolor'], linewidth=mpl.rcParams['axes.linewidth']) ax.add_artist(self.outline) self.outline.set_clip_box(None) self.outline.set_clip_path(None) c = mpl.rcParams['axes.facecolor'] self.patch = patches.Polygon(xy, edgecolor=c, facecolor=c, linewidth=0.01, zorder=-1) ax.add_artist(self.patch) ticks, ticklabels, offset_string = self._ticker() if self.orientation == 'vertical': ax.set_xticks([]) ax.yaxis.set_label_position('right') ax.yaxis.set_ticks_position('right') ax.set_yticks(ticks) ax.set_yticklabels(ticklabels) ax.yaxis.get_major_formatter().set_offset_string(offset_string) else: ax.set_yticks([]) ax.xaxis.set_label_position('bottom') ax.set_xticks(ticks) ax.set_xticklabels(ticklabels) ax.xaxis.get_major_formatter().set_offset_string(offset_string) def _set_label(self): if self.orientation == 'vertical': self.ax.set_ylabel(self._label, **self._labelkw) else: self.ax.set_xlabel(self._label, **self._labelkw) def set_label(self, label, **kw): ''' Label the long axis of the colorbar ''' self._label = label self._labelkw = kw self._set_label() def _outline(self, X, Y): ''' Return *x*, *y* arrays of colorbar bounding polygon, taking orientation into account. ''' N = X.shape[0] ii = [0, 1, N-2, N-1, 2*N-1, 2*N-2, N+1, N, 0] x = np.take(np.ravel(np.transpose(X)), ii) y = np.take(np.ravel(np.transpose(Y)), ii) x = x.reshape((len(x), 1)) y = y.reshape((len(y), 1)) if self.orientation == 'horizontal': return np.hstack((y, x)) return np.hstack((x, y)) def _edges(self, X, Y): ''' Return the separator line segments; helper for _add_solids. ''' N = X.shape[0] # Using the non-array form of these line segments is much # simpler than making them into arrays. if self.orientation == 'vertical': return [zip(X[i], Y[i]) for i in range(1, N-1)] else: return [zip(Y[i], X[i]) for i in range(1, N-1)] def _add_solids(self, X, Y, C): ''' Draw the colors using :meth:`~matplotlib.axes.Axes.pcolor`; optionally add separators. ''' ## Change to pcolorfast after fixing bugs in some backends... if self.orientation == 'vertical': args = (X, Y, C) else: args = (np.transpose(Y), np.transpose(X), np.transpose(C)) kw = {'cmap':self.cmap, 'norm':self.norm, 'shading':'flat', 'alpha':self.alpha} # Save, set, and restore hold state to keep pcolor from # clearing the axes. Ordinarily this will not be needed, # since the axes object should already have hold set. _hold = self.ax.ishold() self.ax.hold(True) col = self.ax.pcolor(*args, **kw) self.ax.hold(_hold) #self.add_observer(col) # We should observe, not be observed... self.solids = col if self.drawedges: self.dividers = collections.LineCollection(self._edges(X,Y), colors=(mpl.rcParams['axes.edgecolor'],), linewidths=(0.5*mpl.rcParams['axes.linewidth'],) ) self.ax.add_collection(self.dividers) def add_lines(self, levels, colors, linewidths): ''' Draw lines on the colorbar. ''' N = len(levels) dummy, y = self._locate(levels) if len(y) <> N: raise ValueError("levels are outside colorbar range") x = np.array([0.0, 1.0]) X, Y = np.meshgrid(x,y) if self.orientation == 'vertical': xy = [zip(X[i], Y[i]) for i in range(N)] else: xy = [zip(Y[i], X[i]) for i in range(N)] col = collections.LineCollection(xy, linewidths=linewidths) self.lines = col col.set_color(colors) self.ax.add_collection(col) def _ticker(self): ''' Return two sequences: ticks (colorbar data locations) and ticklabels (strings). ''' locator = self.locator formatter = self.formatter if locator is None: if self.boundaries is None: if isinstance(self.norm, colors.NoNorm): nv = len(self._values) base = 1 + int(nv/10) locator = ticker.IndexLocator(base=base, offset=0) elif isinstance(self.norm, colors.BoundaryNorm): b = self.norm.boundaries locator = ticker.FixedLocator(b, nbins=10) elif isinstance(self.norm, colors.LogNorm): locator = ticker.LogLocator() else: locator = ticker.MaxNLocator() else: b = self._boundaries[self._inside] locator = ticker.FixedLocator(b, nbins=10) if isinstance(self.norm, colors.NoNorm): intv = self._values[0], self._values[-1] else: intv = self.vmin, self.vmax locator.create_dummy_axis() formatter.create_dummy_axis() locator.set_view_interval(*intv) locator.set_data_interval(*intv) formatter.set_view_interval(*intv) formatter.set_data_interval(*intv) b = np.array(locator()) b, ticks = self._locate(b) formatter.set_locs(b) ticklabels = [formatter(t, i) for i, t in enumerate(b)] offset_string = formatter.get_offset() return ticks, ticklabels, offset_string def _process_values(self, b=None): ''' Set the :attr:`_boundaries` and :attr:`_values` attributes based on the input boundaries and values. Input boundaries can be *self.boundaries* or the argument *b*. ''' if b is None: b = self.boundaries if b is not None: self._boundaries = np.asarray(b, dtype=float) if self.values is None: self._values = 0.5*(self._boundaries[:-1] + self._boundaries[1:]) if isinstance(self.norm, colors.NoNorm): self._values = (self._values + 0.00001).astype(np.int16) return self._values = np.array(self.values) return if self.values is not None: self._values = np.array(self.values) if self.boundaries is None: b = np.zeros(len(self.values)+1, 'd') b[1:-1] = 0.5*(self._values[:-1] - self._values[1:]) b[0] = 2.0*b[1] - b[2] b[-1] = 2.0*b[-2] - b[-3] self._boundaries = b return self._boundaries = np.array(self.boundaries) return # Neither boundaries nor values are specified; # make reasonable ones based on cmap and norm. if isinstance(self.norm, colors.NoNorm): b = self._uniform_y(self.cmap.N+1) * self.cmap.N - 0.5 v = np.zeros((len(b)-1,), dtype=np.int16) v[self._inside] = np.arange(self.cmap.N, dtype=np.int16) if self.extend in ('both', 'min'): v[0] = -1 if self.extend in ('both', 'max'): v[-1] = self.cmap.N self._boundaries = b self._values = v return elif isinstance(self.norm, colors.BoundaryNorm): b = list(self.norm.boundaries) if self.extend in ('both', 'min'): b = [b[0]-1] + b if self.extend in ('both', 'max'): b = b + [b[-1] + 1] b = np.array(b) v = np.zeros((len(b)-1,), dtype=float) bi = self.norm.boundaries v[self._inside] = 0.5*(bi[:-1] + bi[1:]) if self.extend in ('both', 'min'): v[0] = b[0] - 1 if self.extend in ('both', 'max'): v[-1] = b[-1] + 1 self._boundaries = b self._values = v return else: if not self.norm.scaled(): self.norm.vmin = 0 self.norm.vmax = 1 b = self.norm.inverse(self._uniform_y(self.cmap.N+1)) if self.extend in ('both', 'min'): b[0] = b[0] - 1 if self.extend in ('both', 'max'): b[-1] = b[-1] + 1 self._process_values(b) def _find_range(self): ''' Set :attr:`vmin` and :attr:`vmax` attributes to the first and last boundary excluding extended end boundaries. ''' b = self._boundaries[self._inside] self.vmin = b[0] self.vmax = b[-1] def _central_N(self): '''number of boundaries **before** extension of ends''' nb = len(self._boundaries) if self.extend == 'both': nb -= 2 elif self.extend in ('min', 'max'): nb -= 1 return nb def _extended_N(self): ''' Based on the colormap and extend variable, return the number of boundaries. ''' N = self.cmap.N + 1 if self.extend == 'both': N += 2 elif self.extend in ('min', 'max'): N += 1 return N def _uniform_y(self, N): ''' Return colorbar data coordinates for *N* uniformly spaced boundaries, plus ends if required. ''' if self.extend == 'neither': y = np.linspace(0, 1, N) else: if self.extend == 'both': y = np.zeros(N + 2, 'd') y[0] = -0.05 y[-1] = 1.05 elif self.extend == 'min': y = np.zeros(N + 1, 'd') y[0] = -0.05 else: y = np.zeros(N + 1, 'd') y[-1] = 1.05 y[self._inside] = np.linspace(0, 1, N) return y def _proportional_y(self): ''' Return colorbar data coordinates for the boundaries of a proportional colorbar. ''' if isinstance(self.norm, colors.BoundaryNorm): b = self._boundaries[self._inside] y = (self._boundaries - self._boundaries[0]) y = y / (self._boundaries[-1] - self._boundaries[0]) else: y = self.norm(self._boundaries.copy()) if self.extend in ('both', 'min'): y[0] = -0.05 if self.extend in ('both', 'max'): y[-1] = 1.05 yi = y[self._inside] norm = colors.Normalize(yi[0], yi[-1]) y[self._inside] = norm(yi) return y def _mesh(self): ''' Return X,Y, the coordinate arrays for the colorbar pcolormesh. These are suitable for a vertical colorbar; swapping and transposition for a horizontal colorbar are done outside this function. ''' x = np.array([0.0, 1.0]) if self.spacing == 'uniform': y = self._uniform_y(self._central_N()) else: y = self._proportional_y() self._y = y X, Y = np.meshgrid(x,y) if self.extend in ('min', 'both'): X[0,:] = 0.5 if self.extend in ('max', 'both'): X[-1,:] = 0.5 return X, Y def _locate(self, x): ''' Given a possible set of color data values, return the ones within range, together with their corresponding colorbar data coordinates. ''' if isinstance(self.norm, (colors.NoNorm, colors.BoundaryNorm)): b = self._boundaries xn = x xout = x else: # Do calculations using normalized coordinates so # as to make the interpolation more accurate. b = self.norm(self._boundaries, clip=False).filled() # We do our own clipping so that we can allow a tiny # bit of slop in the end point ticks to allow for # floating point errors. xn = self.norm(x, clip=False).filled() in_cond = (xn > -0.001) & (xn < 1.001) xn = np.compress(in_cond, xn) xout = np.compress(in_cond, x) # The rest is linear interpolation with clipping. y = self._y N = len(b) ii = np.minimum(np.searchsorted(b, xn), N-1) i0 = np.maximum(ii - 1, 0) #db = b[ii] - b[i0] db = np.take(b, ii) - np.take(b, i0) db = np.where(i0==ii, 1.0, db) #dy = y[ii] - y[i0] dy = np.take(y, ii) - np.take(y, i0) z = np.take(y, i0) + (xn-np.take(b,i0))*dy/db return xout, z def set_alpha(self, alpha): self.alpha = alpha class Colorbar(ColorbarBase): def __init__(self, ax, mappable, **kw): mappable.autoscale_None() # Ensure mappable.norm.vmin, vmax # are set when colorbar is called, # even if mappable.draw has not yet # been called. This will not change # vmin, vmax if they are already set. self.mappable = mappable kw['cmap'] = mappable.cmap kw['norm'] = mappable.norm kw['alpha'] = mappable.get_alpha() if isinstance(mappable, contour.ContourSet): CS = mappable kw['boundaries'] = CS._levels kw['values'] = CS.cvalues kw['extend'] = CS.extend #kw['ticks'] = CS._levels kw.setdefault('ticks', ticker.FixedLocator(CS.levels, nbins=10)) kw['filled'] = CS.filled ColorbarBase.__init__(self, ax, **kw) if not CS.filled: self.add_lines(CS) else: ColorbarBase.__init__(self, ax, **kw) def add_lines(self, CS): ''' Add the lines from a non-filled :class:`~matplotlib.contour.ContourSet` to the colorbar. ''' if not isinstance(CS, contour.ContourSet) or CS.filled: raise ValueError('add_lines is only for a ContourSet of lines') tcolors = [c[0] for c in CS.tcolors] tlinewidths = [t[0] for t in CS.tlinewidths] # The following was an attempt to get the colorbar lines # to follow subsequent changes in the contour lines, # but more work is needed: specifically, a careful # look at event sequences, and at how # to make one object track another automatically. #tcolors = [col.get_colors()[0] for col in CS.collections] #tlinewidths = [col.get_linewidth()[0] for lw in CS.collections] #print 'tlinewidths:', tlinewidths ColorbarBase.add_lines(self, CS.levels, tcolors, tlinewidths) def update_bruteforce(self, mappable): ''' Manually change any contour line colors. This is called when the image or contour plot to which this colorbar belongs is changed. ''' # We are using an ugly brute-force method: clearing and # redrawing the whole thing. The problem is that if any # properties have been changed by methods other than the # colorbar methods, those changes will be lost. self.ax.cla() self.draw_all() #if self.vmin != self.norm.vmin or self.vmax != self.norm.vmax: # self.ax.cla() # self.draw_all() if isinstance(self.mappable, contour.ContourSet): CS = self.mappable if not CS.filled: self.add_lines(CS) #if self.lines is not None: # tcolors = [c[0] for c in CS.tcolors] # self.lines.set_color(tcolors) #Fixme? Recalculate boundaries, ticks if vmin, vmax have changed. #Fixme: Some refactoring may be needed; we should not # be recalculating everything if there was a simple alpha # change. def make_axes(parent, **kw): orientation = kw.setdefault('orientation', 'vertical') fraction = kw.pop('fraction', 0.15) shrink = kw.pop('shrink', 1.0) aspect = kw.pop('aspect', 20) #pb = transforms.PBox(parent.get_position()) pb = parent.get_position(original=True).frozen() if orientation == 'vertical': pad = kw.pop('pad', 0.05) x1 = 1.0-fraction pb1, pbx, pbcb = pb.splitx(x1-pad, x1) pbcb = pbcb.shrunk(1.0, shrink).anchored('C', pbcb) anchor = (0.0, 0.5) panchor = (1.0, 0.5) else: pad = kw.pop('pad', 0.15) pbcb, pbx, pb1 = pb.splity(fraction, fraction+pad) pbcb = pbcb.shrunk(shrink, 1.0).anchored('C', pbcb) aspect = 1.0/aspect anchor = (0.5, 1.0) panchor = (0.5, 0.0) parent.set_position(pb1) parent.set_anchor(panchor) fig = parent.get_figure() cax = fig.add_axes(pbcb) cax.set_aspect(aspect, anchor=anchor, adjustable='box') return cax, kw make_axes.__doc__ =''' Resize and reposition a parent axes, and return a child axes suitable for a colorbar:: cax, kw = make_axes(parent, **kw) Keyword arguments may include the following (with defaults): *orientation* 'vertical' or 'horizontal' %s All but the first of these are stripped from the input kw set. Returns (cax, kw), the child axes and the reduced kw dictionary. ''' % make_axes_kw_doc
agpl-3.0
NDManh/numbbo
code-postprocessing/bbob_pproc/comp2/pptable2.py
3
20251
#! /usr/bin/env python # -*- coding: utf-8 -*- """Rank-sum tests table on "Final Data Points". That is, for example, using 1/#fevals(ftarget) if ftarget was reached and -f_final otherwise as input for the rank-sum test, where obviously the larger the better. One table per function and dimension. """ from __future__ import absolute_import import os, warnings import numpy import matplotlib.pyplot as plt from .. import genericsettings, bestalg, toolsstats, pproc from ..pptex import tableLaTeX, tableLaTeXStar, writeFEvals2, writeFEvalsMaxPrec, writeLabels from ..toolsstats import significancetest from pdb import set_trace targetsOfInterest = pproc.TargetValues((1e+1, 1e-1, 1e-3, 1e-5, 1e-7)) targetf = 1e-8 # value for determining the success ratio samplesize = genericsettings.simulated_runlength_bootstrap_sample_size table_caption_one = r"""% Expected running time (ERT in number of function evaluations) divided by the respective best ERT measured during BBOB-2009 in dimensions 5 (left) and 20 (right). The ERT and in braces, as dispersion measure, the half difference between 90 and 10\%-tile of bootstrapped run lengths appear for each algorithm and """ table_caption_two1 = r"""% target, the corresponding best ERT in the first row. The different target \Df-values are shown in the top row. \#succ is the number of trials that reached the (final) target $\fopt + 10^{-8}$. """ table_caption_two2 = r"""% run-length based target, the corresponding best ERT (preceded by the target \Df-value in \textit{italics}) in the first row. \#succ is the number of trials that reached the target value of the last column. """ table_caption_rest = r"""% The median number of conducted function evaluations is additionally given in \textit{italics}, if the target in the last column was never reached. 1:\algorithmAshort\ is \algorithmA\ and 2:\algorithmBshort\ is \algorithmB. Bold entries are statistically significantly better compared to the other algorithm, with $p=0.05$ or $p=10^{-k}$ where $k\in\{2,3,4,\dots\}$ is the number following the $\star$ symbol, with Bonferroni correction of #1. A $\downarrow$ indicates the same tested against the best algorithm of BBOB-2009. """ table_caption = table_caption_one + table_caption_two1 + table_caption_rest table_caption_expensive = table_caption_one + table_caption_two2 + table_caption_rest def main(dsList0, dsList1, dimsOfInterest, outputdir, info='', verbose=True): """One table per dimension, modified to fit in 1 page per table.""" #TODO: method is long, split if possible dictDim0 = dsList0.dictByDim() dictDim1 = dsList1.dictByDim() alg0 = set(i[0] for i in dsList0.dictByAlg().keys()).pop().replace(genericsettings.extraction_folder_prefix, '')[0:3] alg1 = set(i[0] for i in dsList1.dictByAlg().keys()).pop().replace(genericsettings.extraction_folder_prefix, '')[0:3] open(os.path.join(outputdir, 'bbob_pproc_commands.tex'), 'a' ).write(r'\providecommand{\algorithmAshort}{%s}' % writeLabels(alg0) + '\n' + r'\providecommand{\algorithmBshort}{%s}' % writeLabels(alg1) + '\n') if info: info = '_' + info dims = set.intersection(set(dictDim0.keys()), set(dictDim1.keys())) bestalgentries = bestalg.loadBestAlgorithm(dsList0.isBiobjective()) header = [] if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues): header = [r'\#FEs/D'] headerHtml = ['<thead>\n<tr>\n<th>#FEs/D</th>\n'] for label in targetsOfInterest.labels(): header.append(r'\multicolumn{2}{@{}c@{}}{%s}' % label) headerHtml.append('<td>%s</td>\n' % label) else: header = [r'$\Delta f_\mathrm{opt}$'] headerHtml = ['<thead>\n<tr>\n<th>&#916; f</th>\n'] for label in targetsOfInterest.labels(): header.append(r'\multicolumn{2}{@{\,}c@{\,}}{%s}' % label) headerHtml.append('<td>%s</td>\n' % label) header.append(r'\multicolumn{2}{@{}l@{}}{\#succ}') headerHtml.append('<td>#succ</td>\n</tr>\n</thead>\n') for d in dimsOfInterest: # TODO set as input arguments table = [header] tableHtml = headerHtml extraeol = [r'\hline'] try: dictFunc0 = dictDim0[d].dictByFunc() dictFunc1 = dictDim1[d].dictByFunc() except KeyError: continue funcs = set.union(set(dictFunc0.keys()), set(dictFunc1.keys())) nbtests = len(funcs) * 2. #len(dimsOfInterest) tableHtml.append('<tbody>\n') for f in sorted(funcs): tableHtml.append('<tr>\n') targets = targetsOfInterest((f, d)) targetf = targets[-1] bestalgentry = bestalgentries[(d, f)] curline = [r'${\bf f_{%d}}$' % f] curlineHtml = ['<th><b>f<sub>%d</sub></b></th>\n' % f] bestalgdata = bestalgentry.detERT(targets) bestalgevals, bestalgalgs = bestalgentry.detEvals(targets) if isinstance(targetsOfInterest, pproc.RunlengthBasedTargetValues): # write ftarget:fevals for i in xrange(len(bestalgdata[:-1])): temp = "%.1e" % targetsOfInterest((f, d))[i] if temp[-2]=="0": temp = temp[:-2]+temp[-1] curline.append(r'\multicolumn{2}{@{}c@{}}{\textit{%s}:%s \quad}' % (temp,writeFEvalsMaxPrec(bestalgdata[i], 2))) curlineHtml.append('<td><i>%s</i>:%s</td>\n' % (temp, writeFEvalsMaxPrec(bestalgdata[i], 2))) temp = "%.1e" % targetsOfInterest((f, d))[-1] if temp[-2]=="0": temp = temp[:-2]+temp[-1] curline.append(r'\multicolumn{2}{@{}c@{}|}{\textit{%s}:%s }' % (temp,writeFEvalsMaxPrec(bestalgdata[-1], 2))) curlineHtml.append('<td><i>%s</i>:%s</td>\n' % (temp, writeFEvalsMaxPrec(bestalgdata[-1], 2))) else: # write #fevals of the reference alg for i in bestalgdata[:-1]: curline.append(r'\multicolumn{2}{@{}c@{}}{%s \quad}' % writeFEvalsMaxPrec(i, 2)) curlineHtml.append('<td>%s</td>\n' % writeFEvalsMaxPrec(i, 2)) curline.append(r'\multicolumn{2}{@{}c@{}|}{%s}' % writeFEvalsMaxPrec(bestalgdata[-1], 2)) curlineHtml.append('<td>%s</td>\n' % writeFEvalsMaxPrec(bestalgdata[-1], 2)) tmp = bestalgentry.detEvals([targetf])[0][0] tmp2 = numpy.sum(numpy.isnan(tmp) == False) curline.append('%d' % (tmp2)) if tmp2 > 0: curline.append('/%d' % len(tmp)) curlineHtml.append('<td>%d/%d</td>\n' % (tmp2, len(tmp))) else: curlineHtml.append('<td>%d</td>\n' % (tmp2)) table.append(curline[:]) tableHtml.extend(curlineHtml[:]) tableHtml.append('</tr>\n') extraeol.append('') rankdata0 = [] # never used # generate all data from ranksum test entries = [] ertdata = {} for nb, dsList in enumerate((dictFunc0, dictFunc1)): try: entry = dsList[f][0] # take the first DataSet, there should be only one? except KeyError: warnings.warn('data missing for data set ' + str(nb) + ' and function ' + str(f)) print('*** Warning: data missing for data set ' + str(nb) + ' and function ' + str(f) + '***') continue # TODO: problem here! ertdata[nb] = entry.detERT(targets) entries.append(entry) for _t in ertdata.values(): for _tt in _t: if _tt is None: raise ValueError if len(entries) < 2: # funcion not available for *both* algorithms continue # TODO: check which one is missing and make sure that what is there is displayed properly in the following testres0vs1 = significancetest(entries[0], entries[1], targets) testresbestvs1 = significancetest(bestalgentry, entries[1], targets) testresbestvs0 = significancetest(bestalgentry, entries[0], targets) for nb, entry in enumerate(entries): tableHtml.append('<tr>\n') if nb == 0: curline = [r'1:\:\algorithmAshort\hspace*{\fill}'] curlineHtml = ['<th>1: %s</th>\n' % alg0] else: curline = [r'2:\:\algorithmBshort\hspace*{\fill}'] curlineHtml = ['<th>2: %s</th>\n' % alg1] #data = entry.detERT(targetsOfInterest) dispersion = [] data = [] evals = entry.detEvals(targets) for i in evals: succ = (numpy.isnan(i) == False) tmp = i.copy() tmp[succ==False] = entry.maxevals[numpy.isnan(i)] #set_trace() data.append(toolsstats.sp(tmp, issuccessful=succ)[0]) #if not any(succ): #set_trace() if any(succ): tmp2 = toolsstats.drawSP(tmp[succ], tmp[succ==False], (10, 50, 90), samplesize)[0] dispersion.append((tmp2[-1]-tmp2[0])/2.) else: dispersion.append(None) if nb == 0: assert not isinstance(data, numpy.ndarray) data0 = data[:] # TODO: check if it is not an array, it's never used anyway? for i, dati in enumerate(data): z, p = testres0vs1[i] # TODO: there is something with the sign that I don't get # assign significance flag, which is the -log10(p) significance0vs1 = 0 if nb != 0: z = -z # the test is symmetric if nbtests * p < 0.05 and z > 0: significance0vs1 = -int(numpy.ceil(numpy.log10(min([1.0, nbtests * p])))) # this is the larger the more significant isBold = significance0vs1 > 0 alignment = 'c' if i == len(data) - 1: # last element alignment = 'c|' if numpy.isinf(bestalgdata[i]): # if the 2009 best did not solve the problem tmp = writeFEvalsMaxPrec(float(dati), 2) if not numpy.isinf(dati): tmpHtml = '<i>%s</i>' % (tmp) tmp = r'\textit{%s}' % (tmp) if isBold: tmp = r'\textbf{%s}' % tmp tmpHtml = '<b>%s</b>' % tmpHtml if dispersion[i] and numpy.isfinite(dispersion[i]): tmp += r'${\scriptscriptstyle (%s)}$' % writeFEvalsMaxPrec(dispersion[i], 1) tableentry = (r'\multicolumn{2}{@{}%s@{}}{%s}' % (alignment, tmp)) tableentryHtml = (' (%s)' % tmp) else: # Formatting tmp = float(dati)/bestalgdata[i] assert not numpy.isnan(tmp) isscientific = False if tmp >= 1000: isscientific = True tableentry = writeFEvals2(tmp, 2, isscientific=isscientific) tableentry = writeFEvalsMaxPrec(tmp, 2) tableentryHtml = writeFEvalsMaxPrec(tmp, 2) if numpy.isinf(tmp) and i == len(data)-1: tableentry = (tableentry + r'\textit{%s}' % writeFEvals2(numpy.median(entry.maxevals), 2)) tableentryHtml = (tableentryHtml + ' <i>%s</i>' % writeFEvals2(numpy.median(entry.maxevals), 2)) if isBold: tableentry = r'\textbf{%s}' % tableentry tableentryHtml = '<b>%s</b>' % tableentryHtml elif 11 < 3 and significance0vs1 < 0: # cave: negative significance has no meaning anymore tableentry = r'\textit{%s}' % tableentry tableentryHtml = '<i>%s</i>' % tableentryHtml if dispersion[i] and numpy.isfinite(dispersion[i]/bestalgdata[i]): tableentry += r'${\scriptscriptstyle (%s)}$' % writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 1) tableentryHtml += ' (%s)' % writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 1) tableentry = (r'\multicolumn{2}{@{}%s@{}}{%s}' % (alignment, tableentry)) elif tableentry.find('e') > -1 or (numpy.isinf(tmp) and i != len(data) - 1): if isBold: tableentry = r'\textbf{%s}' % tableentry tableentryHtml = '<b>%s</b>' % tableentryHtml elif 11 < 3 and significance0vs1 < 0: tableentry = r'\textit{%s}' % tableentry tableentryHtml = '<i>%s</i>' % tableentryHtml if dispersion[i] and numpy.isfinite(dispersion[i]/bestalgdata[i]): tableentry += r'${\scriptscriptstyle (%s)}$' % writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 1) tableentryHtml += ' (%s)' % writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 1) tableentry = (r'\multicolumn{2}{@{}%s@{}}{%s}' % (alignment, tableentry)) else: tmp = tableentry.split('.', 1) tmpHtml = tableentryHtml.split('.', 1) if isBold: tmp = list(r'\textbf{%s}' % i for i in tmp) tmpHtml = list('<b>%s</b>' % i for i in tmpHtml) elif 11 < 3 and significance0vs1 < 0: tmp = list(r'\textit{%s}' % i for i in tmp) tmpHtml = list('<i>%s</i>' % i for i in tmpHtml) tableentry = ' & .'.join(tmp) tableentryHtml = '.'.join(tmpHtml) if len(tmp) == 1: tableentry += '&' if dispersion[i] and numpy.isfinite(dispersion[i]/bestalgdata[i]): tableentry += r'${\scriptscriptstyle (%s)}$' % writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 1) tableentryHtml += ' (%s)' % writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 1) superscript = '' superscriptHtml = '' if nb == 0: z, p = testresbestvs0[i] else: z, p = testresbestvs1[i] #The conditions are now that ERT < ERT_best if ((nbtests * p) < 0.05 and dati - bestalgdata[i] < 0. and z < 0.): nbstars = -numpy.ceil(numpy.log10(nbtests * p)) #tmp = '\hspace{-.5ex}'.join(nbstars * [r'\star']) if z > 0: superscript = r'\uparrow' #* nbstars superscriptHtml = '&uarr;' else: superscript = r'\downarrow' #* nbstars superscriptHtml = '&darr;' # print z, linebest[i], line1 if nbstars > 1: superscript += str(int(nbstars)) superscriptHtml += str(int(nbstars)) if superscript or significance0vs1: s = '' shtml = '' if significance0vs1 > 0: s = '\star' shtml = '&#9733;' if significance0vs1 > 1: s += str(significance0vs1) shtml += str(significance0vs1) s = r'$^{' + s + superscript + r'}$' shtml = '<sup>' + shtml + superscriptHtml + '</sup>' if tableentry.endswith('}'): tableentry = tableentry[:-1] + s + r'}' else: tableentry += s tableentryHtml += shtml tableentryHtml = tableentryHtml.replace('$\infty$', '&infin;') curlineHtml.append('<td>%s</td>\n' % tableentryHtml) curline.append(tableentry) #curline.append(tableentry) #if dispersion[i] is None or numpy.isinf(bestalgdata[i]): #curline.append('') #else: #tmp = writeFEvalsMaxPrec(dispersion[i]/bestalgdata[i], 2) #curline.append('(%s)' % tmp) tmp = entry.evals[entry.evals[:, 0] <= targetf, 1:] try: tmp = tmp[0] curline.append('%d' % numpy.sum(numpy.isnan(tmp) == False)) curlineHtml.append('<td>%d' % numpy.sum(numpy.isnan(tmp) == False)) except IndexError: curline.append('%d' % 0) curlineHtml.append('<td>%d' % 0) curline.append('/%d' % entry.nbRuns()) curlineHtml.append('/%d</td>\n' % entry.nbRuns()) table.append(curline[:]) tableHtml.extend(curlineHtml[:]) tableHtml.append('</tr>\n') extraeol.append('') extraeol[-1] = r'\hline' extraeol[-1] = '' outputfile = os.path.join(outputdir, 'pptable2_%02dD%s.tex' % (d, info)) spec = r'@{}c@{}|' + '*{%d}{@{}r@{}@{}l@{}}' % len(targetsOfInterest) + '|@{}r@{}@{}l@{}' res = r'\providecommand{\algorithmAshort}{%s}' % writeLabels(alg0) + '\n' res += r'\providecommand{\algorithmBshort}{%s}' % writeLabels(alg1) + '\n' # open(os.path.join(outputdir, 'bbob_pproc_commands.tex'), 'a').write(res) #res += tableLaTeXStar(table, width=r'0.45\textwidth', spec=spec, #extraeol=extraeol) res += tableLaTeX(table, spec=spec, extraeol=extraeol) f = open(outputfile, 'w') f.write(res) f.close() res = ("").join(str(item) for item in tableHtml) res = '<p><b>%d-D</b></p>\n<table>\n%s</table>\n' % (d, res) filename = os.path.join(outputdir, genericsettings.two_algorithm_file_name + '.html') lines = [] with open(filename) as infile: for line in infile: if '<!--pptable2Html-->' in line: lines.append(res) lines.append(line) with open(filename, 'w') as outfile: for line in lines: outfile.write(line) if verbose: print "Table written in %s" % outputfile
bsd-3-clause
mattpitkin/GraWIToNStatisticsLectures
figures/scripts/pvalue.py
1
1242
#!/usr/bin/env python """ Make plots showing how to calculate the p-value """ import matplotlib.pyplot as pl from scipy.stats import norm from scipy.special import erf import numpy as np mu = 0. # the mean, mu sigma = 1. # standard deviation x = np.linspace(-4, 4, 1000) # x # set plot to render labels using latex pl.rc('text', usetex=True) pl.rc('font', family='serif') pl.rc('font', size=14) fig = pl.figure(figsize=(7,4), dpi=100) # value of x for calculating p-value Z = 1.233 y = norm.pdf(x, mu, sigma) # plot pdfs pl.plot(x, y, 'r') pl.plot([-Z, -Z], [0., np.max(y)], 'k--') pl.plot([Z, Z], [0., np.max(y)], 'k--') pl.fill_between(x, np.zeros(len(x)), y, where=x<=-Z, facecolor='green', interpolate=True, alpha=0.6) pl.fill_between(x, np.zeros(len(x)), y, where=x>=Z, facecolor='green', interpolate=True, alpha=0.6) pvalue = 1.-erf(Z/np.sqrt(2.)) ax = pl.gca() ax.set_xlabel('$Z$', fontsize=14) ax.set_ylabel('$p(Z)$', fontsize=14) ax.set_xlim(-4, 4) ax.grid(True) ax.text(Z+0.1, 0.3, '$Z_{\\textrm{obs}} = 1.233$', fontsize=16) ax.text(-3.6, 0.31, '$p$-value$= %.2f$' % pvalue, fontsize=18, bbox={'facecolor': 'none', 'pad':12, 'ec': 'r'}) fig.subplots_adjust(bottom=0.15) pl.savefig('../pvalue.pdf') pl.show()
mit
gwpy/gwpy.github.io
docs/0.8.0/plotter/colors-1.py
7
1123
from __future__ import division import numpy from matplotlib import (pyplot, rcParams) from matplotlib.colors import to_hex from gwpy.plotter import colors rcParams.update({ 'text.usetex': False, 'font.size': 15 }) th = numpy.linspace(0, 2*numpy.pi, 512) names = [ 'gwpy:geo600', 'gwpy:kagra', 'gwpy:ligo-hanford', 'gwpy:ligo-india', 'gwpy:ligo-livingston', 'gwpy:virgo', ] fig = pyplot.figure(figsize=(5, 2)) ax = fig.gca() ax.axis('off') for j, name in enumerate(sorted(names)): c = str(to_hex(name)) v_offset = -(j / len(names)) ax.plot(th, .1*numpy.sin(th) + v_offset, color=c) ax.annotate("{!r}".format(name), (0, v_offset), xytext=(-1.5, 0), ha='right', va='center', color=c, textcoords='offset points', family='monospace') ax.annotate("{!r}".format(c), (2*numpy.pi, v_offset), xytext=(1.5, 0), ha='left', va='center', color=c, textcoords='offset points', family='monospace') fig.subplots_adjust(**{'bottom': 0.0, 'left': 0.54, 'right': 0.78, 'top': 1}) pyplot.show()
gpl-3.0
karpeev/libmesh
doc/statistics/libmesh_citations.py
1
2340
#!/usr/bin/env python import matplotlib.pyplot as plt import numpy as np # Number of "papers using libmesh" by year. # # Note 1: this does not count citations "only," the authors must have actually # used libmesh in part of their work. Therefore, these counts do not include # things like Wolfgang citing us in his papers to show how Deal.II is # superior... # # Note 2: I typically update this data after regenerating the web page, # since bibtex2html renumbers the references starting from "1" each year. # # Note 3: These citations include anything that is not a dissertation/thesis. # So, some are conference papers, some are journal articles, etc. # # Note 4: The libmesh paper came out in 2006, but there are some citations # prior to that date, obviously. These counts include citations of the # website libmesh.sf.net as well... # # Note 5: Preprints are listed as the "current year + 1" and are constantly # being moved to their respective years after being published. data = [ '2004', 5, '\'05', 2, '\'06', 13, '\'07', 8, '\'08', 23, '\'09', 30, '\'10', 24, '\'11', 37, '\'12', 50, '\'13', 78, '\'14', 62, '\'15', 24, 'P', 5, # Preprints 'T', 38 # Theses ] # Extract the x-axis labels from the data array xlabels = data[0::2] # Extract the publication counts from the data array n_papers = data[1::2] # The number of data points N = len(xlabels); # Get a reference to the figure fig = plt.figure() # 111 is equivalent to Matlab's subplot(1,1,1) command ax = fig.add_subplot(111) # Create an x-axis for plotting x = np.linspace(1, N, N) # Width of the bars width = 0.8 # Make the bar chart. Plot years in blue, preprints and theses in green. ax.bar(x[0:N-2], n_papers[0:N-2], width, color='b') ax.bar(x[N-2:N], n_papers[N-2:N], width, color='g') # Label the x-axis plt.xlabel('P=Preprints, T=Theses') # Set up the xtick locations and labels. Note that you have to offset # the position of the ticks by width/2, where width is the width of # the bars. ax.set_xticks(np.linspace(1,N,N) + width/2) ax.set_xticklabels(xlabels) # Create a title string title_string = 'LibMesh Citations, (' + str(sum(n_papers)) + ' Total)' fig.suptitle(title_string) # Save as PDF plt.savefig('libmesh_citations.pdf') # Local Variables: # python-indent: 2 # End:
lgpl-2.1
numenta/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/mlab.py
69
104273
""" Numerical python functions written for compatability with matlab(TM) commands with the same names. Matlab(TM) compatible functions ------------------------------- :func:`cohere` Coherence (normalized cross spectral density) :func:`csd` Cross spectral density uing Welch's average periodogram :func:`detrend` Remove the mean or best fit line from an array :func:`find` Return the indices where some condition is true; numpy.nonzero is similar but more general. :func:`griddata` interpolate irregularly distributed data to a regular grid. :func:`prctile` find the percentiles of a sequence :func:`prepca` Principal Component Analysis :func:`psd` Power spectral density uing Welch's average periodogram :func:`rk4` A 4th order runge kutta integrator for 1D or ND systems :func:`specgram` Spectrogram (power spectral density over segments of time) Miscellaneous functions ------------------------- Functions that don't exist in matlab(TM), but are useful anyway: :meth:`cohere_pairs` Coherence over all pairs. This is not a matlab function, but we compute coherence a lot in my lab, and we compute it for a lot of pairs. This function is optimized to do this efficiently by caching the direct FFTs. :meth:`rk4` A 4th order Runge-Kutta ODE integrator in case you ever find yourself stranded without scipy (and the far superior scipy.integrate tools) record array helper functions ------------------------------- A collection of helper methods for numpyrecord arrays .. _htmlonly:: See :ref:`misc-examples-index` :meth:`rec2txt` pretty print a record array :meth:`rec2csv` store record array in CSV file :meth:`csv2rec` import record array from CSV file with type inspection :meth:`rec_append_fields` adds field(s)/array(s) to record array :meth:`rec_drop_fields` drop fields from record array :meth:`rec_join` join two record arrays on sequence of fields :meth:`rec_groupby` summarize data by groups (similar to SQL GROUP BY) :meth:`rec_summarize` helper code to filter rec array fields into new fields For the rec viewer functions(e rec2csv), there are a bunch of Format objects you can pass into the functions that will do things like color negative values red, set percent formatting and scaling, etc. Example usage:: r = csv2rec('somefile.csv', checkrows=0) formatd = dict( weight = FormatFloat(2), change = FormatPercent(2), cost = FormatThousands(2), ) rec2excel(r, 'test.xls', formatd=formatd) rec2csv(r, 'test.csv', formatd=formatd) scroll = rec2gtk(r, formatd=formatd) win = gtk.Window() win.set_size_request(600,800) win.add(scroll) win.show_all() gtk.main() Deprecated functions --------------------- The following are deprecated; please import directly from numpy (with care--function signatures may differ): :meth:`conv` convolution (numpy.convolve) :meth:`corrcoef` The matrix of correlation coefficients :meth:`hist` Histogram (numpy.histogram) :meth:`linspace` Linear spaced array from min to max :meth:`load` load ASCII file - use numpy.loadtxt :meth:`meshgrid` Make a 2D grid from 2 1 arrays (numpy.meshgrid) :meth:`polyfit` least squares best polynomial fit of x to y (numpy.polyfit) :meth:`polyval` evaluate a vector for a vector of polynomial coeffs (numpy.polyval) :meth:`save` save ASCII file - use numpy.savetxt :meth:`trapz` trapeziodal integration (trapz(x,y) -> numpy.trapz(y,x)) :meth:`vander` the Vandermonde matrix (numpy.vander) """ from __future__ import division import csv, warnings, copy, os import numpy as np ma = np.ma from matplotlib import verbose import matplotlib.nxutils as nxutils import matplotlib.cbook as cbook # set is a new builtin function in 2.4; delete the following when # support for 2.3 is dropped. try: set except NameError: from sets import Set as set def linspace(*args, **kw): warnings.warn("use numpy.linspace", DeprecationWarning) return np.linspace(*args, **kw) def meshgrid(x,y): warnings.warn("use numpy.meshgrid", DeprecationWarning) return np.meshgrid(x,y) def mean(x, dim=None): warnings.warn("Use numpy.mean(x) or x.mean()", DeprecationWarning) if len(x)==0: return None return np.mean(x, axis=dim) def logspace(xmin,xmax,N): return np.exp(np.linspace(np.log(xmin), np.log(xmax), N)) def _norm(x): "return sqrt(x dot x)" return np.sqrt(np.dot(x,x)) def window_hanning(x): "return x times the hanning window of len(x)" return np.hanning(len(x))*x def window_none(x): "No window function; simply return x" return x #from numpy import convolve as conv def conv(x, y, mode=2): 'convolve x with y' warnings.warn("Use numpy.convolve(x, y, mode='full')", DeprecationWarning) return np.convolve(x,y,mode) def detrend(x, key=None): if key is None or key=='constant': return detrend_mean(x) elif key=='linear': return detrend_linear(x) def demean(x, axis=0): "Return x minus its mean along the specified axis" x = np.asarray(x) if axis: ind = [slice(None)] * axis ind.append(np.newaxis) return x - x.mean(axis)[ind] return x - x.mean(axis) def detrend_mean(x): "Return x minus the mean(x)" return x - x.mean() def detrend_none(x): "Return x: no detrending" return x def detrend_linear(y): "Return y minus best fit line; 'linear' detrending " # This is faster than an algorithm based on linalg.lstsq. x = np.arange(len(y), dtype=np.float_) C = np.cov(x, y, bias=1) b = C[0,1]/C[0,0] a = y.mean() - b*x.mean() return y - (b*x + a) #This is a helper function that implements the commonality between the #psd, csd, and spectrogram. It is *NOT* meant to be used outside of mlab def _spectral_helper(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): #The checks for if y is x are so that we can use the same function to #implement the core of psd(), csd(), and spectrogram() without doing #extra calculations. We return the unaveraged Pxy, freqs, and t. same_data = y is x #Make sure we're dealing with a numpy array. If y and x were the same #object to start with, keep them that way x = np.asarray(x) if not same_data: y = np.asarray(y) # zero pad x and y up to NFFT if they are shorter than NFFT if len(x)<NFFT: n = len(x) x = np.resize(x, (NFFT,)) x[n:] = 0 if not same_data and len(y)<NFFT: n = len(y) y = np.resize(y, (NFFT,)) y[n:] = 0 if pad_to is None: pad_to = NFFT if scale_by_freq is None: warnings.warn("psd, csd, and specgram have changed to scale their " "densities by the sampling frequency for better MatLab " "compatibility. You can pass scale_by_freq=False to disable " "this behavior. Also, one-sided densities are scaled by a " "factor of 2.") scale_by_freq = True # For real x, ignore the negative frequencies unless told otherwise if (sides == 'default' and np.iscomplexobj(x)) or sides == 'twosided': numFreqs = pad_to scaling_factor = 1. elif sides in ('default', 'onesided'): numFreqs = pad_to//2 + 1 scaling_factor = 2. else: raise ValueError("sides must be one of: 'default', 'onesided', or " "'twosided'") # Matlab divides by the sampling frequency so that density function # has units of dB/Hz and can be integrated by the plotted frequency # values. Perform the same scaling here. if scale_by_freq: scaling_factor /= Fs if cbook.iterable(window): assert(len(window) == NFFT) windowVals = window else: windowVals = window(np.ones((NFFT,), x.dtype)) step = NFFT - noverlap ind = np.arange(0, len(x) - NFFT + 1, step) n = len(ind) Pxy = np.zeros((numFreqs,n), np.complex_) # do the ffts of the slices for i in range(n): thisX = x[ind[i]:ind[i]+NFFT] thisX = windowVals * detrend(thisX) fx = np.fft.fft(thisX, n=pad_to) if same_data: fy = fx else: thisY = y[ind[i]:ind[i]+NFFT] thisY = windowVals * detrend(thisY) fy = np.fft.fft(thisY, n=pad_to) Pxy[:,i] = np.conjugate(fx[:numFreqs]) * fy[:numFreqs] # Scale the spectrum by the norm of the window to compensate for # windowing loss; see Bendat & Piersol Sec 11.5.2. Also include # scaling factors for one-sided densities and dividing by the sampling # frequency, if desired. Pxy *= scaling_factor / (np.abs(windowVals)**2).sum() t = 1./Fs * (ind + NFFT / 2.) freqs = float(Fs) / pad_to * np.arange(numFreqs) return Pxy, freqs, t #Split out these keyword docs so that they can be used elsewhere kwdocd = dict() kwdocd['PSD'] =""" Keyword arguments: *NFFT*: integer The number of data points used in each block for the FFT. Must be even; a power 2 is most efficient. The default value is 256. *Fs*: scalar The sampling frequency (samples per time unit). It is used to calculate the Fourier frequencies, freqs, in cycles per time unit. The default value is 2. *detrend*: callable The function applied to each segment before fft-ing, designed to remove the mean or linear trend. Unlike in matlab, where the *detrend* parameter is a vector, in matplotlib is it a function. The :mod:`~matplotlib.pylab` module defines :func:`~matplotlib.pylab.detrend_none`, :func:`~matplotlib.pylab.detrend_mean`, and :func:`~matplotlib.pylab.detrend_linear`, but you can use a custom function as well. *window*: callable or ndarray A function or a vector of length *NFFT*. To create window vectors see :func:`window_hanning`, :func:`window_none`, :func:`numpy.blackman`, :func:`numpy.hamming`, :func:`numpy.bartlett`, :func:`scipy.signal`, :func:`scipy.signal.get_window`, etc. The default is :func:`window_hanning`. If a function is passed as the argument, it must take a data segment as an argument and return the windowed version of the segment. *noverlap*: integer The number of points of overlap between blocks. The default value is 0 (no overlap). *pad_to*: integer The number of points to which the data segment is padded when performing the FFT. This can be different from *NFFT*, which specifies the number of data points used. While not increasing the actual resolution of the psd (the minimum distance between resolvable peaks), this can give more points in the plot, allowing for more detail. This corresponds to the *n* parameter in the call to fft(). The default is None, which sets *pad_to* equal to *NFFT* *sides*: [ 'default' | 'onesided' | 'twosided' ] Specifies which sides of the PSD to return. Default gives the default behavior, which returns one-sided for real data and both for complex data. 'onesided' forces the return of a one-sided PSD, while 'twosided' forces two-sided. *scale_by_freq*: boolean Specifies whether the resulting density values should be scaled by the scaling frequency, which gives density in units of Hz^-1. This allows for integration over the returned frequency values. The default is True for MatLab compatibility. """ def psd(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): """ The power spectral density by Welch's average periodogram method. The vector *x* is divided into *NFFT* length blocks. Each block is detrended by the function *detrend* and windowed by the function *window*. *noverlap* gives the length of the overlap between blocks. The absolute(fft(block))**2 of each segment are averaged to compute *Pxx*, with a scaling to correct for power loss due to windowing. If len(*x*) < *NFFT*, it will be zero padded to *NFFT*. *x* Array or sequence containing the data %(PSD)s Returns the tuple (*Pxx*, *freqs*). Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ Pxx,freqs = csd(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) return Pxx.real,freqs psd.__doc__ = psd.__doc__ % kwdocd def csd(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): """ The cross power spectral density by Welch's average periodogram method. The vectors *x* and *y* are divided into *NFFT* length blocks. Each block is detrended by the function *detrend* and windowed by the function *window*. *noverlap* gives the length of the overlap between blocks. The product of the direct FFTs of *x* and *y* are averaged over each segment to compute *Pxy*, with a scaling to correct for power loss due to windowing. If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero padded to *NFFT*. *x*, *y* Array or sequence containing the data %(PSD)s Returns the tuple (*Pxy*, *freqs*). Refs: Bendat & Piersol -- Random Data: Analysis and Measurement Procedures, John Wiley & Sons (1986) """ Pxy, freqs, t = _spectral_helper(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) if len(Pxy.shape) == 2 and Pxy.shape[1]>1: Pxy = Pxy.mean(axis=1) return Pxy, freqs csd.__doc__ = csd.__doc__ % kwdocd def specgram(x, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=128, pad_to=None, sides='default', scale_by_freq=None): """ Compute a spectrogram of data in *x*. Data are split into *NFFT* length segements and the PSD of each section is computed. The windowing function *window* is applied to each segment, and the amount of overlap of each segment is specified with *noverlap*. If *x* is real (i.e. non-complex) only the spectrum of the positive frequencie is returned. If *x* is complex then the complete spectrum is returned. %(PSD)s Returns a tuple (*Pxx*, *freqs*, *t*): - *Pxx*: 2-D array, columns are the periodograms of successive segments - *freqs*: 1-D array of frequencies corresponding to the rows in Pxx - *t*: 1-D array of times corresponding to midpoints of segments. .. seealso:: :func:`psd`: :func:`psd` differs in the default overlap; in returning the mean of the segment periodograms; and in not returning times. """ assert(NFFT > noverlap) Pxx, freqs, t = _spectral_helper(x, x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pxx = Pxx.real #Needed since helper implements generically if (np.iscomplexobj(x) and sides == 'default') or sides == 'twosided': # center the frequency range at zero freqs = np.concatenate((freqs[NFFT/2:]-Fs,freqs[:NFFT/2])) Pxx = np.concatenate((Pxx[NFFT/2:,:],Pxx[:NFFT/2,:]),0) return Pxx, freqs, t specgram.__doc__ = specgram.__doc__ % kwdocd _coh_error = """Coherence is calculated by averaging over *NFFT* length segments. Your signal is too short for your choice of *NFFT*. """ def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, pad_to=None, sides='default', scale_by_freq=None): """ The coherence between *x* and *y*. Coherence is the normalized cross spectral density: .. math:: C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}} *x*, *y* Array or sequence containing the data %(PSD)s The return value is the tuple (*Cxy*, *f*), where *f* are the frequencies of the coherence vector. For cohere, scaling the individual densities by the sampling frequency has no effect, since the factors cancel out. .. seealso:: :func:`psd` and :func:`csd`: For information about the methods used to compute :math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`. """ if len(x)<2*NFFT: raise ValueError(_coh_error) Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides, scale_by_freq) Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy) Cxy.shape = (len(f),) return Cxy, f cohere.__doc__ = cohere.__doc__ % kwdocd def corrcoef(*args): """ corrcoef(*X*) where *X* is a matrix returns a matrix of correlation coefficients for the columns of *X* corrcoef(*x*, *y*) where *x* and *y* are vectors returns the matrix of correlation coefficients for *x* and *y*. Numpy arrays can be real or complex. The correlation matrix is defined from the covariance matrix *C* as .. math:: r_{ij} = \\frac{C_{ij}}{\\sqrt{C_{ii}C_{jj}}} """ warnings.warn("Use numpy.corrcoef", DeprecationWarning) kw = dict(rowvar=False) return np.corrcoef(*args, **kw) def polyfit(*args, **kwargs): u""" polyfit(*x*, *y*, *N*) Do a best fit polynomial of order *N* of *y* to *x*. Return value is a vector of polynomial coefficients [pk ... p1 p0]. Eg, for *N*=2:: p2*x0^2 + p1*x0 + p0 = y1 p2*x1^2 + p1*x1 + p0 = y1 p2*x2^2 + p1*x2 + p0 = y2 ..... p2*xk^2 + p1*xk + p0 = yk Method: if *X* is a the Vandermonde Matrix computed from *x* (see `vandermonds <http://mathworld.wolfram.com/VandermondeMatrix.html>`_), then the polynomial least squares solution is given by the '*p*' in X*p = y where *X* is a (len(*x*) \N{MULTIPLICATION SIGN} *N* + 1) matrix, *p* is a *N*+1 length vector, and *y* is a (len(*x*) \N{MULTIPLICATION SIGN} 1) vector. This equation can be solved as .. math:: p = (X_t X)^-1 X_t y where :math:`X_t` is the transpose of *X* and -1 denotes the inverse. Numerically, however, this is not a good method, so we use :func:`numpy.linalg.lstsq`. For more info, see `least squares fitting <http://mathworld.wolfram.com/LeastSquaresFittingPolynomial.html>`_, but note that the *k*'s and *n*'s in the superscripts and subscripts on that page. The linear algebra is correct, however. .. seealso:: :func:`polyval` """ warnings.warn("use numpy.poyfit", DeprecationWarning) return np.polyfit(*args, **kwargs) def polyval(*args, **kwargs): """ *y* = polyval(*p*, *x*) *p* is a vector of polynomial coeffients and *y* is the polynomial evaluated at *x*. Example code to remove a polynomial (quadratic) trend from y:: p = polyfit(x, y, 2) trend = polyval(p, x) resid = y - trend .. seealso:: :func:`polyfit` """ warnings.warn("use numpy.polyval", DeprecationWarning) return np.polyval(*args, **kwargs) def vander(*args, **kwargs): """ *X* = vander(*x*, *N* = *None*) The Vandermonde matrix of vector *x*. The *i*-th column of *X* is the the *i*-th power of *x*. *N* is the maximum power to compute; if *N* is *None* it defaults to len(*x*). """ warnings.warn("Use numpy.vander()", DeprecationWarning) return np.vander(*args, **kwargs) def donothing_callback(*args): pass def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning, noverlap=0, preferSpeedOverMemory=True, progressCallback=donothing_callback, returnPxx=False): u""" Cxy, Phase, freqs = cohere_pairs(X, ij, ...) Compute the coherence for all pairs in *ij*. *X* is a (*numSamples*, *numCols*) numpy array. *ij* is a list of tuples (*i*, *j*). Each tuple is a pair of indexes into the columns of *X* for which you want to compute coherence. For example, if *X* has 64 columns, and you want to compute all nonredundant pairs, define *ij* as:: ij = [] for i in range(64): for j in range(i+1,64): ij.append( (i, j) ) The other function arguments, except for *preferSpeedOverMemory* (see below), are explained in the help string of :func:`psd`. Return value is a tuple (*Cxy*, *Phase*, *freqs*). - *Cxy*: a dictionary of (*i*, *j*) tuples -> coherence vector for that pair. I.e., ``Cxy[(i,j)] = cohere(X[:,i], X[:,j])``. Number of dictionary keys is ``len(ij)``. - *Phase*: a dictionary of phases of the cross spectral density at each frequency for each pair. The keys are ``(i,j)``. - *freqs*: a vector of frequencies, equal in length to either the coherence or phase vectors for any (*i*, *j*) key.. Eg, to make a coherence Bode plot:: subplot(211) plot( freqs, Cxy[(12,19)]) subplot(212) plot( freqs, Phase[(12,19)]) For a large number of pairs, :func:`cohere_pairs` can be much more efficient than just calling :func:`cohere` for each pair, because it caches most of the intensive computations. If *N* is the number of pairs, this function is O(N) for most of the heavy lifting, whereas calling cohere for each pair is O(N\N{SUPERSCRIPT TWO}). However, because of the caching, it is also more memory intensive, making 2 additional complex arrays with approximately the same number of elements as *X*. The parameter *preferSpeedOverMemory*, if *False*, limits the caching by only making one, rather than two, complex cache arrays. This is useful if memory becomes critical. Even when *preferSpeedOverMemory* is *False*, :func:`cohere_pairs` will still give significant performace gains over calling :func:`cohere` for each pair, and will use subtantially less memory than if *preferSpeedOverMemory* is *True*. In my tests with a (43000, 64) array over all non-redundant pairs, *preferSpeedOverMemory* = *True* delivered a 33% performace boost on a 1.7GHZ Athlon with 512MB RAM compared with *preferSpeedOverMemory* = *False*. But both solutions were more than 10x faster than naievly crunching all possible pairs through cohere. .. seealso:: :file:`test/cohere_pairs_test.py` in the src tree: For an example script that shows that this :func:`cohere_pairs` and :func:`cohere` give the same results for a given pair. """ numRows, numCols = X.shape # zero pad if X is too short if numRows < NFFT: tmp = X X = np.zeros( (NFFT, numCols), X.dtype) X[:numRows,:] = tmp del tmp numRows, numCols = X.shape # get all the columns of X that we are interested in by checking # the ij tuples seen = {} for i,j in ij: seen[i]=1; seen[j] = 1 allColumns = seen.keys() Ncols = len(allColumns) del seen # for real X, ignore the negative frequencies if np.iscomplexobj(X): numFreqs = NFFT else: numFreqs = NFFT//2+1 # cache the FFT of every windowed, detrended NFFT length segement # of every channel. If preferSpeedOverMemory, cache the conjugate # as well if cbook.iterable(window): assert(len(window) == NFFT) windowVals = window else: windowVals = window(np.ones((NFFT,), typecode(X))) ind = range(0, numRows-NFFT+1, NFFT-noverlap) numSlices = len(ind) FFTSlices = {} FFTConjSlices = {} Pxx = {} slices = range(numSlices) normVal = norm(windowVals)**2 for iCol in allColumns: progressCallback(i/Ncols, 'Cacheing FFTs') Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_) for iSlice in slices: thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol] thisSlice = windowVals*detrend(thisSlice) Slices[iSlice,:] = fft(thisSlice)[:numFreqs] FFTSlices[iCol] = Slices if preferSpeedOverMemory: FFTConjSlices[iCol] = conjugate(Slices) Pxx[iCol] = np.divide(np.mean(absolute(Slices)**2), normVal) del Slices, ind, windowVals # compute the coherences and phases for all pairs using the # cached FFTs Cxy = {} Phase = {} count = 0 N = len(ij) for i,j in ij: count +=1 if count%10==0: progressCallback(count/N, 'Computing coherences') if preferSpeedOverMemory: Pxy = FFTSlices[i] * FFTConjSlices[j] else: Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j]) if numSlices>1: Pxy = np.mean(Pxy) Pxy = np.divide(Pxy, normVal) Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j]) Phase[(i,j)] = np.arctan2(Pxy.imag, Pxy.real) freqs = Fs/NFFT*np.arange(numFreqs) if returnPxx: return Cxy, Phase, freqs, Pxx else: return Cxy, Phase, freqs def entropy(y, bins): r""" Return the entropy of the data in *y*. .. math:: \sum p_i \log_2(p_i) where :math:`p_i` is the probability of observing *y* in the :math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a range of bins; see :func:`numpy.histogram`. Compare *S* with analytic calculation for a Gaussian:: x = mu + sigma * randn(200000) Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) ) """ n,bins = np.histogram(y, bins) n = n.astype(np.float_) n = np.take(n, np.nonzero(n)[0]) # get the positive p = np.divide(n, len(y)) delta = bins[1]-bins[0] S = -1.0*np.sum(p*log(p)) + log(delta) #S = -1.0*np.sum(p*log(p)) return S def hist(y, bins=10, normed=0): """ Return the histogram of *y* with *bins* equally sized bins. If bins is an array, use those bins. Return value is (*n*, *x*) where *n* is the count for each bin in *x*. If *normed* is *False*, return the counts in the first element of the returned tuple. If *normed* is *True*, return the probability density :math:`\\frac{n}{(len(y)\mathrm{dbin}}`. If *y* has rank > 1, it will be raveled. If *y* is masked, only the unmasked values will be used. Credits: the Numeric 22 documentation """ warnings.warn("Use numpy.histogram()", DeprecationWarning) return np.histogram(y, bins=bins, range=None, normed=normed) def normpdf(x, *args): "Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*" mu, sigma = args return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2) def levypdf(x, gamma, alpha): "Returm the levy pdf evaluated at *x* for params *gamma*, *alpha*" N = len(x) if N%2 != 0: raise ValueError, 'x must be an event length array; try\n' + \ 'x = np.linspace(minx, maxx, N), where N is even' dx = x[1]-x[0] f = 1/(N*dx)*np.arange(-N/2, N/2, np.float_) ind = np.concatenate([np.arange(N/2, N, int), np.arange(0, N/2, int)]) df = f[1]-f[0] cfl = exp(-gamma*np.absolute(2*pi*f)**alpha) px = np.fft.fft(np.take(cfl,ind)*df).astype(np.float_) return np.take(px, ind) def find(condition): "Return the indices where ravel(condition) is true" res, = np.nonzero(np.ravel(condition)) return res def trapz(x, y): """ Trapezoidal integral of *y*(*x*). """ warnings.warn("Use numpy.trapz(y,x) instead of trapz(x,y)", DeprecationWarning) return np.trapz(y, x) #if len(x)!=len(y): # raise ValueError, 'x and y must have the same length' #if len(x)<2: # raise ValueError, 'x and y must have > 1 element' #return np.sum(0.5*np.diff(x)*(y[1:]+y[:-1])) def longest_contiguous_ones(x): """ Return the indices of the longest stretch of contiguous ones in *x*, assuming *x* is a vector of zeros and ones. If there are two equally long stretches, pick the first. """ x = np.ravel(x) if len(x)==0: return np.array([]) ind = (x==0).nonzero()[0] if len(ind)==0: return np.arange(len(x)) if len(ind)==len(x): return np.array([]) y = np.zeros( (len(x)+2,), x.dtype) y[1:-1] = x dif = np.diff(y) up = (dif == 1).nonzero()[0]; dn = (dif == -1).nonzero()[0]; i = (dn-up == max(dn - up)).nonzero()[0][0] ind = np.arange(up[i], dn[i]) return ind def longest_ones(x): '''alias for longest_contiguous_ones''' return longest_contiguous_ones(x) def prepca(P, frac=0): """ Compute the principal components of *P*. *P* is a (*numVars*, *numObs*) array. *frac* is the minimum fraction of variance that a component must contain to be included. Return value is a tuple of the form (*Pcomponents*, *Trans*, *fracVar*) where: - *Pcomponents* : a (numVars, numObs) array - *Trans* : the weights matrix, ie, *Pcomponents* = *Trans* * *P* - *fracVar* : the fraction of the variance accounted for by each component returned A similar function of the same name was in the Matlab (TM) R13 Neural Network Toolbox but is not found in later versions; its successor seems to be called "processpcs". """ U,s,v = np.linalg.svd(P) varEach = s**2/P.shape[1] totVar = varEach.sum() fracVar = varEach/totVar ind = slice((fracVar>=frac).sum()) # select the components that are greater Trans = U[:,ind].transpose() # The transformed data Pcomponents = np.dot(Trans,P) return Pcomponents, Trans, fracVar[ind] def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)): """ Return the percentiles of *x*. *p* can either be a sequence of percentile values or a scalar. If *p* is a sequence, the ith element of the return sequence is the *p*(i)-th percentile of *x*. If *p* is a scalar, the largest value of *x* less than or equal to the *p* percentage point in the sequence is returned. """ x = np.array(x).ravel() # we need a copy x.sort() Nx = len(x) if not cbook.iterable(p): return x[int(p*Nx/100.0)] p = np.asarray(p)* Nx/100.0 ind = p.astype(int) ind = np.where(ind>=Nx, Nx-1, ind) return x.take(ind) def prctile_rank(x, p): """ Return the rank for each element in *x*, return the rank 0..len(*p*). Eg if *p* = (25, 50, 75), the return value will be a len(*x*) array with values in [0,1,2,3] where 0 indicates the value is less than the 25th percentile, 1 indicates the value is >= the 25th and < 50th percentile, ... and 3 indicates the value is above the 75th percentile cutoff. *p* is either an array of percentiles in [0..100] or a scalar which indicates how many quantiles of data you want ranked. """ if not cbook.iterable(p): p = np.arange(100.0/p, 100.0, 100.0/p) else: p = np.asarray(p) if p.max()<=1 or p.min()<0 or p.max()>100: raise ValueError('percentiles should be in range 0..100, not 0..1') ptiles = prctile(x, p) return np.searchsorted(ptiles, x) def center_matrix(M, dim=0): """ Return the matrix *M* with each row having zero mean and unit std. If *dim* = 1 operate on columns instead of rows. (*dim* is opposite to the numpy axis kwarg.) """ M = np.asarray(M, np.float_) if dim: M = (M - M.mean(axis=0)) / M.std(axis=0) else: M = (M - M.mean(axis=1)[:,np.newaxis]) M = M / M.std(axis=1)[:,np.newaxis] return M def rk4(derivs, y0, t): """ Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta. This is a toy implementation which may be useful if you find yourself stranded on a system w/o scipy. Otherwise use :func:`scipy.integrate`. *y0* initial state vector *t* sample times *derivs* returns the derivative of the system and has the signature ``dy = derivs(yi, ti)`` Example 1 :: ## 2D system def derivs6(x,t): d1 = x[0] + 2*x[1] d2 = -3*x[0] + 4*x[1] return (d1, d2) dt = 0.0005 t = arange(0.0, 2.0, dt) y0 = (1,2) yout = rk4(derivs6, y0, t) Example 2:: ## 1D system alpha = 2 def derivs(x,t): return -alpha*x + exp(-t) y0 = 1 yout = rk4(derivs, y0, t) If you have access to scipy, you should probably be using the scipy.integrate tools rather than this function. """ try: Ny = len(y0) except TypeError: yout = np.zeros( (len(t),), np.float_) else: yout = np.zeros( (len(t), Ny), np.float_) yout[0] = y0 i = 0 for i in np.arange(len(t)-1): thist = t[i] dt = t[i+1] - thist dt2 = dt/2.0 y0 = yout[i] k1 = np.asarray(derivs(y0, thist)) k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2)) k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2)) k4 = np.asarray(derivs(y0 + dt*k3, thist+dt)) yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4) return yout def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0): """ Bivariate Gaussian distribution for equal shape *X*, *Y*. See `bivariate normal <http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_ at mathworld. """ Xmu = X-mux Ymu = Y-muy rho = sigmaxy/(sigmax*sigmay) z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay) denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2) return np.exp( -z/(2*(1-rho**2))) / denom def get_xyz_where(Z, Cond): """ *Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is a boolean matrix where some condition is satisfied. Return value is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and *z* are the values of *Z* at those indices. *x*, *y*, and *z* are 1D arrays. """ X,Y = np.indices(Z.shape) return X[Cond], Y[Cond], Z[Cond] def get_sparse_matrix(M,N,frac=0.1): """ Return a *M* x *N* sparse matrix with *frac* elements randomly filled. """ data = np.zeros((M,N))*0. for i in range(int(M*N*frac)): x = np.random.randint(0,M-1) y = np.random.randint(0,N-1) data[x,y] = np.random.rand() return data def dist(x,y): """ Return the distance between two points. """ d = x-y return np.sqrt(np.dot(d,d)) def dist_point_to_segment(p, s0, s1): """ Get the distance of a point to a segment. *p*, *s0*, *s1* are *xy* sequences This algorithm from http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment """ p = np.asarray(p, np.float_) s0 = np.asarray(s0, np.float_) s1 = np.asarray(s1, np.float_) v = s1 - s0 w = p - s0 c1 = np.dot(w,v); if ( c1 <= 0 ): return dist(p, s0); c2 = np.dot(v,v) if ( c2 <= c1 ): return dist(p, s1); b = c1 / c2 pb = s0 + b * v; return dist(p, pb) def segments_intersect(s1, s2): """ Return *True* if *s1* and *s2* intersect. *s1* and *s2* are defined as:: s1: (x1, y1), (x2, y2) s2: (x3, y3), (x4, y4) """ (x1, y1), (x2, y2) = s1 (x3, y3), (x4, y4) = s2 den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1)) n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3)) n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3)) if den == 0: # lines parallel return False u1 = n1/den u2 = n2/den return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0 def fftsurr(x, detrend=detrend_none, window=window_none): """ Compute an FFT phase randomized surrogate of *x*. """ if cbook.iterable(window): x=window*detrend(x) else: x = window(detrend(x)) z = np.fft.fft(x) a = 2.*np.pi*1j phase = a * np.random.rand(len(x)) z = z*np.exp(phase) return np.fft.ifft(z).real def liaupunov(x, fprime): """ *x* is a very long trajectory from a map, and *fprime* returns the derivative of *x*. Returns : .. math:: \lambda = \\frac{1}{n}\\sum \\ln|f^'(x_i)| .. seealso:: Sec 10.5 Strogatz (1994) "Nonlinear Dynamics and Chaos". `Wikipedia article on Lyapunov Exponent <http://en.wikipedia.org/wiki/Lyapunov_exponent>`_. .. note:: What the function here calculates may not be what you really want; *caveat emptor*. It also seems that this function's name is badly misspelled. """ return np.mean(np.log(np.absolute(fprime(x)))) class FIFOBuffer: """ A FIFO queue to hold incoming *x*, *y* data in a rotating buffer using numpy arrays under the hood. It is assumed that you will call asarrays much less frequently than you add data to the queue -- otherwise another data structure will be faster. This can be used to support plots where data is added from a real time feed and the plot object wants to grab data from the buffer and plot it to screen less freqeuently than the incoming. If you set the *dataLim* attr to :class:`~matplotlib.transforms.BBox` (eg :attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as new data come in. TODO: add a grow method that will extend nmax .. note:: mlab seems like the wrong place for this class. """ def __init__(self, nmax): """ Buffer up to *nmax* points. """ self._xa = np.zeros((nmax,), np.float_) self._ya = np.zeros((nmax,), np.float_) self._xs = np.zeros((nmax,), np.float_) self._ys = np.zeros((nmax,), np.float_) self._ind = 0 self._nmax = nmax self.dataLim = None self.callbackd = {} def register(self, func, N): """ Call *func* every time *N* events are passed; *func* signature is ``func(fifo)``. """ self.callbackd.setdefault(N, []).append(func) def add(self, x, y): """ Add scalar *x* and *y* to the queue. """ if self.dataLim is not None: xys = ((x,y),) self.dataLim.update(xys, -1) #-1 means use the default ignore setting ind = self._ind % self._nmax #print 'adding to fifo:', ind, x, y self._xs[ind] = x self._ys[ind] = y for N,funcs in self.callbackd.items(): if (self._ind%N)==0: for func in funcs: func(self) self._ind += 1 def last(self): """ Get the last *x*, *y* or *None*. *None* if no data set. """ if self._ind==0: return None, None ind = (self._ind-1) % self._nmax return self._xs[ind], self._ys[ind] def asarrays(self): """ Return *x* and *y* as arrays; their length will be the len of data added or *nmax*. """ if self._ind<self._nmax: return self._xs[:self._ind], self._ys[:self._ind] ind = self._ind % self._nmax self._xa[:self._nmax-ind] = self._xs[ind:] self._xa[self._nmax-ind:] = self._xs[:ind] self._ya[:self._nmax-ind] = self._ys[ind:] self._ya[self._nmax-ind:] = self._ys[:ind] return self._xa, self._ya def update_datalim_to_current(self): """ Update the *datalim* in the current data in the fifo. """ if self.dataLim is None: raise ValueError('You must first set the dataLim attr') x, y = self.asarrays() self.dataLim.update_numerix(x, y, True) def movavg(x,n): """ Compute the len(*n*) moving average of *x*. """ w = np.empty((n,), dtype=np.float_) w[:] = 1.0/n return np.convolve(x, w, mode='valid') def save(fname, X, fmt='%.18e',delimiter=' '): """ Save the data in *X* to file *fname* using *fmt* string to convert the data to strings. *fname* can be a filename or a file handle. If the filename ends in '.gz', the file is automatically saved in compressed gzip format. The :func:`load` function understands gzipped files transparently. Example usage:: save('test.out', X) # X is an array save('test1.out', (x,y,z)) # x,y,z equal sized 1D arrays save('test2.out', x) # x is 1D save('test3.out', x, fmt='%1.4e') # use exponential notation *delimiter* is used to separate the fields, eg. *delimiter* ',' for comma-separated values. """ if cbook.is_string_like(fname): if fname.endswith('.gz'): import gzip fh = gzip.open(fname,'wb') else: fh = file(fname,'w') elif hasattr(fname, 'seek'): fh = fname else: raise ValueError('fname must be a string or file handle') X = np.asarray(X) origShape = None if X.ndim == 1: origShape = X.shape X.shape = len(X), 1 for row in X: fh.write(delimiter.join([fmt%val for val in row]) + '\n') if origShape is not None: X.shape = origShape def load(fname,comments='#',delimiter=None, converters=None,skiprows=0, usecols=None, unpack=False, dtype=np.float_): """ Load ASCII data from *fname* into an array and return the array. The data must be regular, same number of values in every row *fname* can be a filename or a file handle. Support for gzipped files is automatic, if the filename ends in '.gz'. matfile data is not supported; for that, use :mod:`scipy.io.mio` module. Example usage:: X = load('test.dat') # data in two columns t = X[:,0] y = X[:,1] Alternatively, you can do the same with "unpack"; see below:: X = load('test.dat') # a matrix of data x = load('test.dat') # a single column of data - *comments*: the character used to indicate the start of a comment in the file - *delimiter* is a string-like character used to seperate values in the file. If *delimiter* is unspecified or *None*, any whitespace string is a separator. - *converters*, if not *None*, is a dictionary mapping column number to a function that will convert that column to a float (or the optional *dtype* if specified). Eg, if column 0 is a date string:: converters = {0:datestr2num} - *skiprows* is the number of rows from the top to skip. - *usecols*, if not *None*, is a sequence of integer column indexes to extract where 0 is the first column, eg ``usecols=[1,4,5]`` to extract just the 2nd, 5th and 6th columns - *unpack*, if *True*, will transpose the matrix allowing you to unpack into named arguments on the left hand side:: t,y = load('test.dat', unpack=True) # for two column data x,y,z = load('somefile.dat', usecols=[3,5,7], unpack=True) - *dtype*: the array will have this dtype. default: ``numpy.float_`` .. seealso:: See :file:`examples/pylab_examples/load_converter.py` in the source tree: Exercises many of these options. """ if converters is None: converters = {} fh = cbook.to_filehandle(fname) X = [] if delimiter==' ': # space splitting is a special case since x.split() is what # you want, not x.split(' ') def splitfunc(x): return x.split() else: def splitfunc(x): return x.split(delimiter) converterseq = None for i,line in enumerate(fh): if i<skiprows: continue line = line.split(comments, 1)[0].strip() if not len(line): continue if converterseq is None: converterseq = [converters.get(j,float) for j,val in enumerate(splitfunc(line))] if usecols is not None: vals = splitfunc(line) row = [converterseq[j](vals[j]) for j in usecols] else: row = [converterseq[j](val) for j,val in enumerate(splitfunc(line))] thisLen = len(row) X.append(row) X = np.array(X, dtype) r,c = X.shape if r==1 or c==1: X.shape = max(r,c), if unpack: return X.transpose() else: return X def slopes(x,y): """ SLOPES calculate the slope y'(x) Given data vectors X and Y SLOPES calculates Y'(X), i.e the slope of a curve Y(X). The slope is estimated using the slope obtained from that of a parabola through any three consecutive points. This method should be superior to that described in the appendix of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel W. Stineman (Creative Computing July 1980) in at least one aspect: Circles for interpolation demand a known aspect ratio between x- and y-values. For many functions, however, the abscissa are given in different dimensions, so an aspect ratio is completely arbitrary. The parabola method gives very similar results to the circle method for most regular cases but behaves much better in special cases Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de (inspired by a original implementation by Halldor Bjornsson, Icelandic Meteorological Office, March 2006 halldor at vedur.is) """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) yp=np.zeros(y.shape, np.float_) dx=x[1:] - x[:-1] dy=y[1:] - y[:-1] dydx = dy/dx yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1]) yp[0] = 2.0 * dy[0]/dx[0] - yp[1] yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2] return yp def stineman_interp(xi,x,y,yp=None): """ STINEMAN_INTERP Well behaved data interpolation. Given data vectors X and Y, the slope vector YP and a new abscissa vector XI the function stineman_interp(xi,x,y,yp) uses Stineman interpolation to calculate a vector YI corresponding to XI. Here's an example that generates a coarse sine curve, then interpolates over a finer abscissa: x = linspace(0,2*pi,20); y = sin(x); yp = cos(x) xi = linspace(0,2*pi,40); yi = stineman_interp(xi,x,y,yp); plot(x,y,'o',xi,yi) The interpolation method is described in the article A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell W. Stineman. The article appeared in the July 1980 issue of Creative Computing with a note from the editor stating that while they were not an academic journal but once in a while something serious and original comes in adding that this was "apparently a real solution" to a well known problem. For yp=None, the routine automatically determines the slopes using the "slopes" routine. X is assumed to be sorted in increasing order For values xi[j] < x[0] or xi[j] > x[-1], the routine tries a extrapolation. The relevance of the data obtained from this, of course, questionable... original implementation by Halldor Bjornsson, Icelandic Meteorolocial Office, March 2006 halldor at vedur.is completely reworked and optimized for Python by Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) assert x.shape == y.shape N=len(y) if yp is None: yp = slopes(x,y) else: yp=np.asarray(yp, np.float_) xi=np.asarray(xi, np.float_) yi=np.zeros(xi.shape, np.float_) # calculate linear slopes dx = x[1:] - x[:-1] dy = y[1:] - y[:-1] s = dy/dx #note length of s is N-1 so last element is #N-2 # find the segment each xi is in # this line actually is the key to the efficiency of this implementation idx = np.searchsorted(x[1:-1], xi) # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # the y-values that would come out from a linear interpolation: sidx = s.take(idx) xidx = x.take(idx) yidx = y.take(idx) xidxp1 = x.take(idx+1) yo = yidx + sidx * (xi - xidx) # the difference that comes when using the slopes given in yp dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point dy1dy2 = dy1*dy2 # The following is optimized for Python. The solution actually # does more calculations than necessary but exploiting the power # of numpy, this is far more efficient than coding a loop by hand # in Python yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1, ((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)), 0.0, 1/(dy1+dy2),)) return yi def inside_poly(points, verts): """ points is a sequence of x,y points verts is a sequence of x,y vertices of a poygon return value is a sequence of indices into points for the points that are inside the polygon """ res, = np.nonzero(nxutils.points_inside_poly(points, verts)) return res def poly_below(ymin, xs, ys): """ given a arrays *xs* and *ys*, return the vertices of a polygon that has a scalar lower bound *ymin* and an upper bound at the *ys*. intended for use with Axes.fill, eg:: xv, yv = poly_below(0, x, y) ax.fill(xv, yv) """ return poly_between(xs, ys, xmin) def poly_between(x, ylower, yupper): """ given a sequence of x, ylower and yupper, return the polygon that fills the regions between them. ylower or yupper can be scalar or iterable. If they are iterable, they must be equal in length to x return value is x, y arrays for use with Axes.fill """ Nx = len(x) if not cbook.iterable(ylower): ylower = ylower*np.ones(Nx) if not cbook.iterable(yupper): yupper = yupper*np.ones(Nx) x = np.concatenate( (x, x[::-1]) ) y = np.concatenate( (yupper, ylower[::-1]) ) return x,y ### the following code was written and submitted by Fernando Perez ### from the ipython numutils package under a BSD license # begin fperez functions """ A set of convenient utilities for numerical work. Most of this module requires numpy or is meant to be used with it. Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu> All rights reserved. This license was generated from the BSD license template as found in: http://www.opensource.org/licenses/bsd-license.php Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the IPython project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import operator import math #***************************************************************************** # Globals #**************************************************************************** # function definitions exp_safe_MIN = math.log(2.2250738585072014e-308) exp_safe_MAX = 1.7976931348623157e+308 def exp_safe(x): """ Compute exponentials which safely underflow to zero. Slow, but convenient to use. Note that numpy provides proper floating point exception handling with access to the underlying hardware. """ if type(x) is np.ndarray: return exp(np.clip(x,exp_safe_MIN,exp_safe_MAX)) else: return math.exp(x) def amap(fn,*args): """ amap(function, sequence[, sequence, ...]) -> array. Works like :func:`map`, but it returns an array. This is just a convenient shorthand for ``numpy.array(map(...))``. """ return np.array(map(fn,*args)) #from numpy import zeros_like def zeros_like(a): """ Return an array of zeros of the shape and typecode of *a*. """ warnings.warn("Use numpy.zeros_like(a)", DeprecationWarning) return np.zeros_like(a) #from numpy import sum as sum_flat def sum_flat(a): """ Return the sum of all the elements of *a*, flattened out. It uses ``a.flat``, and if *a* is not contiguous, a call to ``ravel(a)`` is made. """ warnings.warn("Use numpy.sum(a) or a.sum()", DeprecationWarning) return np.sum(a) #from numpy import mean as mean_flat def mean_flat(a): """ Return the mean of all the elements of *a*, flattened out. """ warnings.warn("Use numpy.mean(a) or a.mean()", DeprecationWarning) return np.mean(a) def rms_flat(a): """ Return the root mean square of all the elements of *a*, flattened out. """ return np.sqrt(np.mean(np.absolute(a)**2)) def l1norm(a): """ Return the *l1* norm of *a*, flattened out. Implemented as a separate function (not a call to :func:`norm` for speed). """ return np.sum(np.absolute(a)) def l2norm(a): """ Return the *l2* norm of *a*, flattened out. Implemented as a separate function (not a call to :func:`norm` for speed). """ return np.sqrt(np.sum(np.absolute(a)**2)) def norm_flat(a,p=2): """ norm(a,p=2) -> l-p norm of a.flat Return the l-p norm of *a*, considered as a flat array. This is NOT a true matrix norm, since arrays of arbitrary rank are always flattened. *p* can be a number or the string 'Infinity' to get the L-infinity norm. """ # This function was being masked by a more general norm later in # the file. We may want to simply delete it. if p=='Infinity': return np.amax(np.absolute(a)) else: return (np.sum(np.absolute(a)**p))**(1.0/p) def frange(xini,xfin=None,delta=None,**kw): """ frange([start,] stop[, step, keywords]) -> array of floats Return a numpy ndarray containing a progression of floats. Similar to :func:`numpy.arange`, but defaults to a closed interval. ``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start* defaults to 0, and the endpoint *is included*. This behavior is different from that of :func:`range` and :func:`numpy.arange`. This is deliberate, since :func:`frange` will probably be more useful for generating lists of points for function evaluation, and endpoints are often desired in this use. The usual behavior of :func:`range` can be obtained by setting the keyword *closed* = 0, in this case, :func:`frange` basically becomes :func:numpy.arange`. When *step* is given, it specifies the increment (or decrement). All arguments can be floating point numbers. ``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where *xfin* <= *x1*. :func:`frange` can also be called with the keyword *npts*. This sets the number of points the list should contain (and overrides the value *step* might have been given). :func:`numpy.arange` doesn't offer this option. Examples:: >>> frange(3) array([ 0., 1., 2., 3.]) >>> frange(3,closed=0) array([ 0., 1., 2.]) >>> frange(1,6,2) array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries >>> frange(1,6.5,npts=5) array([ 1. , 2.375, 3.75 , 5.125, 6.5 ]) """ #defaults kw.setdefault('closed',1) endpoint = kw['closed'] != 0 # funny logic to allow the *first* argument to be optional (like range()) # This was modified with a simpler version from a similar frange() found # at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472 if xfin == None: xfin = xini + 0.0 xini = 0.0 if delta == None: delta = 1.0 # compute # of points, spacing and return final list try: npts=kw['npts'] delta=(xfin-xini)/float(npts-endpoint) except KeyError: npts = int(round((xfin-xini)/delta)) + endpoint #npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint # round finds the nearest, so the endpoint can be up to # delta/2 larger than xfin. return np.arange(npts)*delta+xini # end frange() #import numpy.diag as diagonal_matrix def diagonal_matrix(diag): """ Return square diagonal matrix whose non-zero elements are given by the input array. """ warnings.warn("Use numpy.diag(d)", DeprecationWarning) return np.diag(diag) def identity(n, rank=2, dtype='l', typecode=None): """ Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*). For ranks higher than 2, this object is simply a multi-index Kronecker delta:: / 1 if i0=i1=...=iR, id[i0,i1,...,iR] = -| \ 0 otherwise. Optionally a *dtype* (or typecode) may be given (it defaults to 'l'). Since rank defaults to 2, this function behaves in the default case (when only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is much faster. """ if typecode is not None: warnings.warn("Use dtype kwarg instead of typecode", DeprecationWarning) dtype = typecode iden = np.zeros((n,)*rank, dtype) for i in range(n): idx = (i,)*rank iden[idx] = 1 return iden def base_repr (number, base = 2, padding = 0): """ Return the representation of a *number* in any given *base*. """ chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' if number < base: \ return (padding - 1) * chars [0] + chars [int (number)] max_exponent = int (math.log (number)/math.log (base)) max_power = long (base) ** max_exponent lead_digit = int (number/max_power) return chars [lead_digit] + \ base_repr (number - max_power * lead_digit, base, \ max (padding - 1, max_exponent)) def binary_repr(number, max_length = 1025): """ Return the binary representation of the input *number* as a string. This is more efficient than using :func:`base_repr` with base 2. Increase the value of max_length for very large numbers. Note that on 32-bit machines, 2**1023 is the largest integer power of 2 which can be converted to a Python float. """ #assert number < 2L << max_length shifts = map (operator.rshift, max_length * [number], \ range (max_length - 1, -1, -1)) digits = map (operator.mod, shifts, max_length * [2]) if not digits.count (1): return 0 digits = digits [digits.index (1):] return ''.join (map (repr, digits)).replace('L','') def log2(x,ln2 = math.log(2.0)): """ Return the log(*x*) in base 2. This is a _slow_ function but which is guaranteed to return the correct integer value if the input is an integer exact power of 2. """ try: bin_n = binary_repr(x)[1:] except (AssertionError,TypeError): return math.log(x)/ln2 else: if '1' in bin_n: return math.log(x)/ln2 else: return len(bin_n) def ispower2(n): """ Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise. Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly. """ bin_n = binary_repr(n)[1:] if '1' in bin_n: return 0 else: return len(bin_n) def isvector(X): """ Like the Matlab (TM) function with the same name, returns *True* if the supplied numpy array or matrix *X* looks like a vector, meaning it has a one non-singleton axis (i.e., it can have multiple axes, but all must have length 1, except for one of them). If you just want to see if the array has 1 axis, use X.ndim == 1. """ return np.prod(X.shape)==np.max(X.shape) #from numpy import fromfunction as fromfunction_kw def fromfunction_kw(function, dimensions, **kwargs): """ Drop-in replacement for :func:`numpy.fromfunction`. Allows passing keyword arguments to the desired function. Call it as (keywords are optional):: fromfunction_kw(MyFunction, dimensions, keywords) The function ``MyFunction`` is responsible for handling the dictionary of keywords it will receive. """ warnings.warn("Use numpy.fromfunction()", DeprecationWarning) return np.fromfunction(function, dimensions, **kwargs) ### end fperez numutils code def rem(x,y): """ Deprecated - see :func:`numpy.remainder` """ raise NotImplementedError('Deprecated - see numpy.remainder') def norm(x,y=2): """ Deprecated - see :func:`numpy.linalg.norm` """ raise NotImplementedError('Deprecated - see numpy.linalg.norm') def orth(A): """ Deprecated - needs clean room implementation """ raise NotImplementedError('Deprecated - needs clean room implementation') def rank(x): """ Deprecated - see :func:`numpy.rank` """ raise NotImplementedError('Deprecated - see numpy.rank') def sqrtm(x): """ Deprecated - needs clean room implementation """ raise NotImplementedError('Deprecated - see scipy.linalg.sqrtm') def mfuncC(f, x): """ Deprecated """ raise NotImplementedError('Deprecated - needs clean room implementation') def approx_real(x): """ Deprecated - needs clean room implementation """ raise NotImplementedError('Deprecated - needs clean room implementation') #helpers for loading, saving, manipulating and viewing numpy record arrays def safe_isnan(x): ':func:`numpy.isnan` for arbitrary types' if cbook.is_string_like(x): return False try: b = np.isnan(x) except NotImplementedError: return False except TypeError: return False else: return b def safe_isinf(x): ':func:`numpy.isinf` for arbitrary types' if cbook.is_string_like(x): return False try: b = np.isinf(x) except NotImplementedError: return False except TypeError: return False else: return b def rec_view(rec): """ Return a view of an ndarray as a recarray .. seealso:: http://projects.scipy.org/pipermail/numpy-discussion/2008-August/036429.html """ return rec.view(np.recarray) #return rec.view(dtype=(np.record, rec.dtype), type=np.recarray) def rec_append_field(rec, name, arr, dtype=None): """ Return a new record array with field name populated with data from array *arr*. This function is Deprecated. Please use :func:`rec_append_fields`. """ warnings.warn("use rec_append_fields", DeprecationWarning) return rec_append_fields(rec, name, arr, dtype) def rec_append_fields(rec, names, arrs, dtypes=None): """ Return a new record array with field names populated with data from arrays in *arrs*. If appending a single field, then *names*, *arrs* and *dtypes* do not have to be lists. They can just be the values themselves. """ if (not cbook.is_string_like(names) and cbook.iterable(names) \ and len(names) and cbook.is_string_like(names[0])): if len(names) != len(arrs): raise ValueError, "number of arrays do not match number of names" else: # we have only 1 name and 1 array names = [names] arrs = [arrs] arrs = map(np.asarray, arrs) if dtypes is None: dtypes = [a.dtype for a in arrs] elif not cbook.iterable(dtypes): dtypes = [dtypes] if len(arrs) != len(dtypes): if len(dtypes) == 1: dtypes = dtypes * len(arrs) else: raise ValueError, "dtypes must be None, a single dtype or a list" newdtype = np.dtype(rec.dtype.descr + zip(names, dtypes)) newrec = np.empty(rec.shape, dtype=newdtype) for field in rec.dtype.fields: newrec[field] = rec[field] for name, arr in zip(names, arrs): newrec[name] = arr return rec_view(newrec) def rec_drop_fields(rec, names): """ Return a new numpy record array with fields in *names* dropped. """ names = set(names) Nr = len(rec) newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names if name not in names]) newrec = np.empty(Nr, dtype=newdtype) for field in newdtype.names: newrec[field] = rec[field] return rec_view(newrec) def rec_groupby(r, groupby, stats): """ *r* is a numpy record array *groupby* is a sequence of record array attribute names that together form the grouping key. eg ('date', 'productcode') *stats* is a sequence of (*attr*, *func*, *outname*) tuples which will call ``x = func(attr)`` and assign *x* to the record array output with attribute *outname*. For example:: stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') ) Return record array has *dtype* names for each attribute name in the the *groupby* argument, with the associated group values, and for each outname name in the *stats* argument, with the associated stat summary output. """ # build a dictionary from groupby keys-> list of indices into r with # those keys rowd = dict() for i, row in enumerate(r): key = tuple([row[attr] for attr in groupby]) rowd.setdefault(key, []).append(i) # sort the output by groupby keys keys = rowd.keys() keys.sort() rows = [] for key in keys: row = list(key) # get the indices for this groupby key ind = rowd[key] thisr = r[ind] # call each stat function for this groupby slice row.extend([func(thisr[attr]) for attr, func, outname in stats]) rows.append(row) # build the output record array with groupby and outname attributes attrs, funcs, outnames = zip(*stats) names = list(groupby) names.extend(outnames) return np.rec.fromrecords(rows, names=names) def rec_summarize(r, summaryfuncs): """ *r* is a numpy record array *summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples which will apply *func* to the the array *r*[attr] and assign the output to a new attribute name *outname*. The returned record array is identical to *r*, with extra arrays for each element in *summaryfuncs*. """ names = list(r.dtype.names) arrays = [r[name] for name in names] for attr, func, outname in summaryfuncs: names.append(outname) arrays.append(np.asarray(func(r[attr]))) return np.rec.fromarrays(arrays, names=names) def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'): """ Join record arrays *r1* and *r2* on *key*; *key* is a tuple of field names -- if *key* is a string it is assumed to be a single attribute name. If *r1* and *r2* have equal values on all the keys in the *key* tuple, then their fields will be merged into a new record array containing the intersection of the fields of *r1* and *r2*. *r1* (also *r2*) must not have any duplicate keys. The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To do a rightouter join just reverse *r1* and *r2*. The *defaults* keyword is a dictionary filled with ``{column_name:default_value}`` pairs. The keywords *r1postfix* and *r2postfix* are postfixed to column names (other than keys) that are both in *r1* and *r2*. """ if cbook.is_string_like(key): key = (key, ) for name in key: if name not in r1.dtype.names: raise ValueError('r1 does not have key field %s'%name) if name not in r2.dtype.names: raise ValueError('r2 does not have key field %s'%name) def makekey(row): return tuple([row[name] for name in key]) r1d = dict([(makekey(row),i) for i,row in enumerate(r1)]) r2d = dict([(makekey(row),i) for i,row in enumerate(r2)]) r1keys = set(r1d.keys()) r2keys = set(r2d.keys()) common_keys = r1keys & r2keys r1ind = np.array([r1d[k] for k in common_keys]) r2ind = np.array([r2d[k] for k in common_keys]) common_len = len(common_keys) left_len = right_len = 0 if jointype == "outer" or jointype == "leftouter": left_keys = r1keys.difference(r2keys) left_ind = np.array([r1d[k] for k in left_keys]) left_len = len(left_ind) if jointype == "outer": right_keys = r2keys.difference(r1keys) right_ind = np.array([r2d[k] for k in right_keys]) right_len = len(right_ind) def key_desc(name): 'if name is a string key, use the larger size of r1 or r2 before merging' dt1 = r1.dtype[name] if dt1.type != np.string_: return (name, dt1.descr[0][1]) dt2 = r1.dtype[name] assert dt2==dt1 if dt1.num>dt2.num: return (name, dt1.descr[0][1]) else: return (name, dt2.descr[0][1]) keydesc = [key_desc(name) for name in key] def mapped_r1field(name): """ The column name in *newrec* that corresponds to the column in *r1*. """ if name in key or name not in r2.dtype.names: return name else: return name + r1postfix def mapped_r2field(name): """ The column name in *newrec* that corresponds to the column in *r2*. """ if name in key or name not in r1.dtype.names: return name else: return name + r2postfix r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key] r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key] newdtype = np.dtype(keydesc + r1desc + r2desc) newrec = np.empty(common_len + left_len + right_len, dtype=newdtype) if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse newrec_fields = newrec.dtype.fields.keys() for k, v in defaults.items(): if k in newrec_fields: newrec[k] = v for field in r1.dtype.names: newfield = mapped_r1field(field) if common_len: newrec[newfield][:common_len] = r1[field][r1ind] if (jointype == "outer" or jointype == "leftouter") and left_len: newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind] for field in r2.dtype.names: newfield = mapped_r2field(field) if field not in key and common_len: newrec[newfield][:common_len] = r2[field][r2ind] if jointype == "outer" and right_len: newrec[newfield][-right_len:] = r2[field][right_ind] newrec.sort(order=key) return rec_view(newrec) def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',', converterd=None, names=None, missing='', missingd=None, use_mrecords=True): """ Load data from comma/space/tab delimited file in *fname* into a numpy record array and return the record array. If *names* is *None*, a header row is required to automatically assign the recarray names. The headers will be lower cased, spaces will be converted to underscores, and illegal attribute name characters removed. If *names* is not *None*, it is a sequence of names to use for the column names. In this case, it is assumed there is no header row. - *fname*: can be a filename or a file handle. Support for gzipped files is automatic, if the filename ends in '.gz' - *comments*: the character used to indicate the start of a comment in the file - *skiprows*: is the number of rows from the top to skip - *checkrows*: is the number of rows to check to validate the column data type. When set to zero all rows are validated. - *converted*: if not *None*, is a dictionary mapping column number or munged column name to a converter function. - *names*: if not None, is a list of header names. In this case, no header will be read from the file - *missingd* is a dictionary mapping munged column names to field values which signify that the field does not contain actual data and should be masked, e.g. '0000-00-00' or 'unused' - *missing*: a string whose value signals a missing field regardless of the column it appears in - *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing If no rows are found, *None* is returned -- see :file:`examples/loadrec.py` """ if converterd is None: converterd = dict() if missingd is None: missingd = {} import dateutil.parser import datetime parsedate = dateutil.parser.parse fh = cbook.to_filehandle(fname) class FH: """ For space-delimited files, we want different behavior than comma or tab. Generally, we want multiple spaces to be treated as a single separator, whereas with comma and tab we want multiple commas to return multiple (empty) fields. The join/strip trick below effects this. """ def __init__(self, fh): self.fh = fh def close(self): self.fh.close() def seek(self, arg): self.fh.seek(arg) def fix(self, s): return ' '.join(s.split()) def next(self): return self.fix(self.fh.next()) def __iter__(self): for line in self.fh: yield self.fix(line) if delimiter==' ': fh = FH(fh) reader = csv.reader(fh, delimiter=delimiter) def process_skiprows(reader): if skiprows: for i, row in enumerate(reader): if i>=(skiprows-1): break return fh, reader process_skiprows(reader) def ismissing(name, val): "Should the value val in column name be masked?" if val == missing or val == missingd.get(name) or val == '': return True else: return False def with_default_value(func, default): def newfunc(name, val): if ismissing(name, val): return default else: return func(val) return newfunc def mybool(x): if x=='True': return True elif x=='False': return False else: raise ValueError('invalid bool') dateparser = dateutil.parser.parse mydateparser = with_default_value(dateparser, datetime.date(1,1,1)) myfloat = with_default_value(float, np.nan) myint = with_default_value(int, -1) mystr = with_default_value(str, '') mybool = with_default_value(mybool, None) def mydate(x): # try and return a date object d = dateparser(x) if d.hour>0 or d.minute>0 or d.second>0: raise ValueError('not a date') return d.date() mydate = with_default_value(mydate, datetime.date(1,1,1)) def get_func(name, item, func): # promote functions in this order funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr} try: func(name, item) except: if func==mystr: raise ValueError('Could not find a working conversion function') else: return get_func(name, item, funcmap[func]) # recurse else: return func # map column names that clash with builtins -- TODO - extend this list itemd = { 'return' : 'return_', 'file' : 'file_', 'print' : 'print_', } def get_converters(reader): converters = None for i, row in enumerate(reader): if i==0: converters = [mybool]*len(row) if checkrows and i>checkrows: break #print i, len(names), len(row) #print 'converters', zip(converters, row) for j, (name, item) in enumerate(zip(names, row)): func = converterd.get(j) if func is None: func = converterd.get(name) if func is None: #if not item.strip(): continue func = converters[j] if len(item.strip()): func = get_func(name, item, func) else: # how should we handle custom converters and defaults? func = with_default_value(func, None) converters[j] = func return converters # Get header and remove invalid characters needheader = names is None if needheader: for row in reader: #print 'csv2rec', row if len(row) and row[0].startswith(comments): continue headers = row break # remove these chars delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""") delete.add('"') names = [] seen = dict() for i, item in enumerate(headers): item = item.strip().lower().replace(' ', '_') item = ''.join([c for c in item if c not in delete]) if not len(item): item = 'column%d'%i item = itemd.get(item, item) cnt = seen.get(item, 0) if cnt>0: names.append(item + '_%d'%cnt) else: names.append(item) seen[item] = cnt+1 else: if cbook.is_string_like(names): names = [n.strip() for n in names.split(',')] # get the converter functions by inspecting checkrows converters = get_converters(reader) if converters is None: raise ValueError('Could not find any valid data in CSV file') # reset the reader and start over fh.seek(0) reader = csv.reader(fh, delimiter=delimiter) process_skiprows(reader) if needheader: skipheader = reader.next() # iterate over the remaining rows and convert the data to date # objects, ints, or floats as approriate rows = [] rowmasks = [] for i, row in enumerate(reader): if not len(row): continue if row[0].startswith(comments): continue rows.append([func(name, val) for func, name, val in zip(converters, names, row)]) rowmasks.append([ismissing(name, val) for name, val in zip(names, row)]) fh.close() if not len(rows): return None if use_mrecords and np.any(rowmasks): try: from numpy.ma import mrecords except ImportError: raise RuntimeError('numpy 1.05 or later is required for masked array support') else: r = mrecords.fromrecords(rows, names=names, mask=rowmasks) else: r = np.rec.fromrecords(rows, names=names) return r # a series of classes for describing the format intentions of various rec views class FormatObj: def tostr(self, x): return self.toval(x) def toval(self, x): return str(x) def fromstr(self, s): return s class FormatString(FormatObj): def tostr(self, x): val = repr(x) return val[1:-1] #class FormatString(FormatObj): # def tostr(self, x): # return '"%r"'%self.toval(x) class FormatFormatStr(FormatObj): def __init__(self, fmt): self.fmt = fmt def tostr(self, x): if x is None: return 'None' return self.fmt%self.toval(x) class FormatFloat(FormatFormatStr): def __init__(self, precision=4, scale=1.): FormatFormatStr.__init__(self, '%%1.%df'%precision) self.precision = precision self.scale = scale def toval(self, x): if x is not None: x = x * self.scale return x def fromstr(self, s): return float(s)/self.scale class FormatInt(FormatObj): def tostr(self, x): return '%d'%int(x) def toval(self, x): return int(x) def fromstr(self, s): return int(s) class FormatBool(FormatObj): def toval(self, x): return str(x) def fromstr(self, s): return bool(s) class FormatPercent(FormatFloat): def __init__(self, precision=4): FormatFloat.__init__(self, precision, scale=100.) class FormatThousands(FormatFloat): def __init__(self, precision=4): FormatFloat.__init__(self, precision, scale=1e-3) class FormatMillions(FormatFloat): def __init__(self, precision=4): FormatFloat.__init__(self, precision, scale=1e-6) class FormatDate(FormatObj): def __init__(self, fmt): self.fmt = fmt def toval(self, x): if x is None: return 'None' return x.strftime(self.fmt) def fromstr(self, x): import dateutil.parser return dateutil.parser.parse(x).date() class FormatDatetime(FormatDate): def __init__(self, fmt='%Y-%m-%d %H:%M:%S'): FormatDate.__init__(self, fmt) def fromstr(self, x): import dateutil.parser return dateutil.parser.parse(x) defaultformatd = { np.bool_ : FormatBool(), np.int16 : FormatInt(), np.int32 : FormatInt(), np.int64 : FormatInt(), np.float32 : FormatFloat(), np.float64 : FormatFloat(), np.object_ : FormatObj(), np.string_ : FormatString(), } def get_formatd(r, formatd=None): 'build a formatd guaranteed to have a key for every dtype name' if formatd is None: formatd = dict() for i, name in enumerate(r.dtype.names): dt = r.dtype[name] format = formatd.get(name) if format is None: format = defaultformatd.get(dt.type, FormatObj()) formatd[name] = format return formatd def csvformat_factory(format): format = copy.deepcopy(format) if isinstance(format, FormatFloat): format.scale = 1. # override scaling for storage format.fmt = '%r' return format def rec2txt(r, header=None, padding=3, precision=3): """ Returns a textual representation of a record array. *r*: numpy recarray *header*: list of column headers *padding*: space between each column *precision*: number of decimal places to use for floats. Set to an integer to apply to all floats. Set to a list of integers to apply precision individually. Precision for non-floats is simply ignored. Example:: precision=[0,2,3] Output:: ID Price Return ABC 12.54 0.234 XYZ 6.32 -0.076 """ if cbook.is_numlike(precision): precision = [precision]*len(r.dtype) def get_type(item,atype=int): tdict = {None:int, int:float, float:str} try: atype(str(item)) except: return get_type(item,tdict[atype]) return atype def get_justify(colname, column, precision): ntype = type(column[0]) if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_: length = max(len(colname),column.itemsize) return 0, length+padding, "%s" # left justify if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_: length = max(len(colname),np.max(map(len,map(str,column)))) return 1, length+padding, "%d" # right justify # JDH: my powerbook does not have np.float96 using np 1.3.0 """ In [2]: np.__version__ Out[2]: '1.3.0.dev5948' In [3]: !uname -a Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386 In [4]: np.float96 --------------------------------------------------------------------------- AttributeError Traceback (most recent call la """ if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_: fmt = "%." + str(precision) + "f" length = max(len(colname),np.max(map(len,map(lambda x:fmt%x,column)))) return 1, length+padding, fmt # right justify return 0, max(len(colname),np.max(map(len,map(str,column))))+padding, "%s" if header is None: header = r.dtype.names justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)] justify_pad_prec_spacer = [] for i in range(len(justify_pad_prec)): just,pad,prec = justify_pad_prec[i] if i == 0: justify_pad_prec_spacer.append((just,pad,prec,0)) else: pjust,ppad,pprec = justify_pad_prec[i-1] if pjust == 0 and just == 1: justify_pad_prec_spacer.append((just,pad-padding,prec,0)) elif pjust == 1 and just == 0: justify_pad_prec_spacer.append((just,pad,prec,padding)) else: justify_pad_prec_spacer.append((just,pad,prec,0)) def format(item, just_pad_prec_spacer): just, pad, prec, spacer = just_pad_prec_spacer if just == 0: return spacer*' ' + str(item).ljust(pad) else: if get_type(item) == float: item = (prec%float(item)) elif get_type(item) == int: item = (prec%int(item)) return item.rjust(pad) textl = [] textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)])) for i, row in enumerate(r): textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)])) if i==0: textl[0] = textl[0].rstrip() text = os.linesep.join(textl) return text def rec2csv(r, fname, delimiter=',', formatd=None, missing='', missingd=None): """ Save the data from numpy recarray *r* into a comma-/space-/tab-delimited file. The record array dtype names will be used for column headers. *fname*: can be a filename or a file handle. Support for gzipped files is automatic, if the filename ends in '.gz' .. seealso:: :func:`csv2rec`: For information about *missing* and *missingd*, which can be used to fill in masked values into your CSV file. """ if missingd is None: missingd = dict() def with_mask(func): def newfunc(val, mask, mval): if mask: return mval else: return func(val) return newfunc formatd = get_formatd(r, formatd) funcs = [] for i, name in enumerate(r.dtype.names): funcs.append(with_mask(csvformat_factory(formatd[name]).tostr)) fh, opened = cbook.to_filehandle(fname, 'w', return_opened=True) writer = csv.writer(fh, delimiter=delimiter) header = r.dtype.names writer.writerow(header) # Our list of specials for missing values mvals = [] for name in header: mvals.append(missingd.get(name, missing)) ismasked = False if len(r): row = r[0] ismasked = hasattr(row, '_fieldmask') for row in r: if ismasked: row, rowmask = row.item(), row._fieldmask.item() else: rowmask = [False] * len(row) writer.writerow([func(val, mask, mval) for func, val, mask, mval in zip(funcs, row, rowmask, mvals)]) if opened: fh.close() def griddata(x,y,z,xi,yi): """ ``zi = griddata(x,y,z,xi,yi)`` fits a surface of the form *z* = *f*(*x*, *y*) to the data in the (usually) nonuniformly spaced vectors (*x*, *y*, *z*). :func:`griddata` interpolates this surface at the points specified by (*xi*, *yi*) to produce *zi*. *xi* and *yi* must describe a regular grid, can be either 1D or 2D, but must be monotonically increasing. A masked array is returned if any grid points are outside convex hull defined by input data (no extrapolation is done). Uses natural neighbor interpolation based on Delaunay triangulation. By default, this algorithm is provided by the :mod:`matplotlib.delaunay` package, written by Robert Kern. The triangulation algorithm in this package is known to fail on some nearly pathological cases. For this reason, a separate toolkit (:mod:`mpl_tookits.natgrid`) has been created that provides a more robust algorithm fof triangulation and interpolation. This toolkit is based on the NCAR natgrid library, which contains code that is not redistributable under a BSD-compatible license. When installed, this function will use the :mod:`mpl_toolkits.natgrid` algorithm, otherwise it will use the built-in :mod:`matplotlib.delaunay` package. The natgrid matplotlib toolkit can be downloaded from http://sourceforge.net/project/showfiles.php?group_id=80706&package_id=142792 """ try: from mpl_toolkits.natgrid import _natgrid, __version__ _use_natgrid = True except ImportError: import matplotlib.delaunay as delaunay from matplotlib.delaunay import __version__ _use_natgrid = False if not griddata._reported: if _use_natgrid: verbose.report('using natgrid version %s' % __version__) else: verbose.report('using delaunay version %s' % __version__) griddata._reported = True if xi.ndim != yi.ndim: raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)") if xi.ndim != 1 and xi.ndim != 2: raise TypeError("inputs xi and yi must be 1D or 2D.") if not len(x)==len(y)==len(z): raise TypeError("inputs x,y,z must all be 1D arrays of the same length") # remove masked points. if hasattr(z,'mask'): x = x.compress(z.mask == False) y = y.compress(z.mask == False) z = z.compressed() if _use_natgrid: # use natgrid toolkit if available. if xi.ndim == 2: xi = xi[0,:] yi = yi[:,0] # override default natgrid internal parameters. _natgrid.seti('ext',0) _natgrid.setr('nul',np.nan) # cast input arrays to doubles (this makes a copy) x = x.astype(np.float) y = y.astype(np.float) z = z.astype(np.float) xo = xi.astype(np.float) yo = yi.astype(np.float) if min(xo[1:]-xo[0:-1]) < 0 or min(yo[1:]-yo[0:-1]) < 0: raise ValueError, 'output grid defined by xi,yi must be monotone increasing' # allocate array for output (buffer will be overwritten by nagridd) zo = np.empty((yo.shape[0],xo.shape[0]), np.float) _natgrid.natgridd(x,y,z,xo,yo,zo) else: # use Robert Kern's delaunay package from scikits (default) if xi.ndim != yi.ndim: raise TypeError("inputs xi and yi must have same number of dimensions (1 or 2)") if xi.ndim != 1 and xi.ndim != 2: raise TypeError("inputs xi and yi must be 1D or 2D.") if xi.ndim == 1: xi,yi = np.meshgrid(xi,yi) # triangulate data tri = delaunay.Triangulation(x,y) # interpolate data interp = tri.nn_interpolator(z) zo = interp(xi,yi) # mask points on grid outside convex hull of input data. if np.any(np.isnan(zo)): zo = np.ma.masked_where(np.isnan(zo),zo) return zo griddata._reported = False ################################################## # Linear interpolation algorithms ################################################## def less_simple_linear_interpolation( x, y, xi, extrap=False ): """ This function provides simple (but somewhat less so than :func:`cbook.simple_linear_interpolation`) linear interpolation. :func:`simple_linear_interpolation` will give a list of point between a start and an end, while this does true linear interpolation at an arbitrary set of points. This is very inefficient linear interpolation meant to be used only for a small number of points in relatively non-intensive use cases. For real linear interpolation, use scipy. """ if cbook.is_scalar(xi): xi = [xi] x = np.asarray(x) y = np.asarray(y) xi = np.asarray(xi) s = list(y.shape) s[0] = len(xi) yi = np.tile( np.nan, s ) for ii,xx in enumerate(xi): bb = x == xx if np.any(bb): jj, = np.nonzero(bb) yi[ii] = y[jj[0]] elif xx<x[0]: if extrap: yi[ii] = y[0] elif xx>x[-1]: if extrap: yi[ii] = y[-1] else: jj, = np.nonzero(x<xx) jj = max(jj) yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj]) return yi def slopes(x,y): """ :func:`slopes` calculates the slope *y*'(*x*) The slope is estimated using the slope obtained from that of a parabola through any three consecutive points. This method should be superior to that described in the appendix of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel W. Stineman (Creative Computing July 1980) in at least one aspect: Circles for interpolation demand a known aspect ratio between *x*- and *y*-values. For many functions, however, the abscissa are given in different dimensions, so an aspect ratio is completely arbitrary. The parabola method gives very similar results to the circle method for most regular cases but behaves much better in special cases. Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de (inspired by a original implementation by Halldor Bjornsson, Icelandic Meteorological Office, March 2006 halldor at vedur.is) """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) yp=np.zeros(y.shape, np.float_) dx=x[1:] - x[:-1] dy=y[1:] - y[:-1] dydx = dy/dx yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1]) yp[0] = 2.0 * dy[0]/dx[0] - yp[1] yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2] return yp def stineman_interp(xi,x,y,yp=None): """ Given data vectors *x* and *y*, the slope vector *yp* and a new abscissa vector *xi*, the function :func:`stineman_interp` uses Stineman interpolation to calculate a vector *yi* corresponding to *xi*. Here's an example that generates a coarse sine curve, then interpolates over a finer abscissa:: x = linspace(0,2*pi,20); y = sin(x); yp = cos(x) xi = linspace(0,2*pi,40); yi = stineman_interp(xi,x,y,yp); plot(x,y,'o',xi,yi) The interpolation method is described in the article A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell W. Stineman. The article appeared in the July 1980 issue of Creative Computing with a note from the editor stating that while they were: not an academic journal but once in a while something serious and original comes in adding that this was "apparently a real solution" to a well known problem. For *yp* = *None*, the routine automatically determines the slopes using the :func:`slopes` routine. *x* is assumed to be sorted in increasing order. For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine tries an extrapolation. The relevance of the data obtained from this, of course, is questionable... Original implementation by Halldor Bjornsson, Icelandic Meteorolocial Office, March 2006 halldor at vedur.is Completely reworked and optimized for Python by Norbert Nemec, Institute of Theoretical Physics, University or Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de """ # Cast key variables as float. x=np.asarray(x, np.float_) y=np.asarray(y, np.float_) assert x.shape == y.shape N=len(y) if yp is None: yp = slopes(x,y) else: yp=np.asarray(yp, np.float_) xi=np.asarray(xi, np.float_) yi=np.zeros(xi.shape, np.float_) # calculate linear slopes dx = x[1:] - x[:-1] dy = y[1:] - y[:-1] s = dy/dx #note length of s is N-1 so last element is #N-2 # find the segment each xi is in # this line actually is the key to the efficiency of this implementation idx = np.searchsorted(x[1:-1], xi) # now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1] # except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1] # the y-values that would come out from a linear interpolation: sidx = s.take(idx) xidx = x.take(idx) yidx = y.take(idx) xidxp1 = x.take(idx+1) yo = yidx + sidx * (xi - xidx) # the difference that comes when using the slopes given in yp dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point dy1dy2 = dy1*dy2 # The following is optimized for Python. The solution actually # does more calculations than necessary but exploiting the power # of numpy, this is far more efficient than coding a loop by hand # in Python yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1, ((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)), 0.0, 1/(dy1+dy2),)) return yi ################################################## # Code related to things in and around polygons ################################################## def inside_poly(points, verts): """ *points* is a sequence of *x*, *y* points. *verts* is a sequence of *x*, *y* vertices of a polygon. Return value is a sequence of indices into points for the points that are inside the polygon. """ res, = np.nonzero(nxutils.points_inside_poly(points, verts)) return res def poly_below(xmin, xs, ys): """ Given a sequence of *xs* and *ys*, return the vertices of a polygon that has a horizontal base at *xmin* and an upper bound at the *ys*. *xmin* is a scalar. Intended for use with :meth:`matplotlib.axes.Axes.fill`, eg:: xv, yv = poly_below(0, x, y) ax.fill(xv, yv) """ if ma.isMaskedArray(xs) or ma.isMaskedArray(ys): nx = ma else: nx = np xs = nx.asarray(xs) ys = nx.asarray(ys) Nx = len(xs) Ny = len(ys) assert(Nx==Ny) x = xmin*nx.ones(2*Nx) y = nx.ones(2*Nx) x[:Nx] = xs y[:Nx] = ys y[Nx:] = ys[::-1] return x, y def poly_between(x, ylower, yupper): """ Given a sequence of *x*, *ylower* and *yupper*, return the polygon that fills the regions between them. *ylower* or *yupper* can be scalar or iterable. If they are iterable, they must be equal in length to *x*. Return value is *x*, *y* arrays for use with :meth:`matplotlib.axes.Axes.fill`. """ if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x): nx = ma else: nx = np Nx = len(x) if not cbook.iterable(ylower): ylower = ylower*nx.ones(Nx) if not cbook.iterable(yupper): yupper = yupper*nx.ones(Nx) x = nx.concatenate( (x, x[::-1]) ) y = nx.concatenate( (yupper, ylower[::-1]) ) return x,y def is_closed_polygon(X): """ Tests whether first and last object in a sequence are the same. These are presumably coordinates on a polygonal curve, in which case this function tests if that curve is closed. """ return np.all(X[0] == X[-1]) def contiguous_regions(mask): """ return a list of (ind0, ind1) such that mask[ind0:ind1].all() is True and we cover all such regions TODO: this is a pure python implementation which probably has a much faster numpy impl """ in_region = None boundaries = [] for i, val in enumerate(mask): if in_region is None and val: in_region = i elif in_region is not None and not val: boundaries.append((in_region, i)) in_region = None if in_region is not None: boundaries.append((in_region, i+1)) return boundaries ################################################## # Vector and path length geometry calculations ################################################## def vector_lengths( X, P=2., axis=None ): """ Finds the length of a set of vectors in *n* dimensions. This is like the :func:`numpy.norm` function for vectors, but has the ability to work over a particular axis of the supplied array or matrix. Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the elements of *X* along the given axis. If *axis* is *None*, compute over all elements of *X*. """ X = np.asarray(X) return (np.sum(X**(P),axis=axis))**(1./P) def distances_along_curve( X ): """ Computes the distance between a set of successive points in *N* dimensions. Where *X* is an *M* x *N* array or matrix. The distances between successive rows is computed. Distance is the standard Euclidean distance. """ X = np.diff( X, axis=0 ) return vector_lengths(X,axis=1) def path_length(X): """ Computes the distance travelled along a polygonal curve in *N* dimensions. Where *X* is an *M* x *N* array or matrix. Returns an array of length *M* consisting of the distance along the curve at each point (i.e., the rows of *X*). """ X = distances_along_curve(X) return np.concatenate( (np.zeros(1), np.cumsum(X)) ) def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y): """ Converts a quadratic Bezier curve to a cubic approximation. The inputs are the *x* and *y* coordinates of the three control points of a quadratic curve, and the output is a tuple of *x* and *y* coordinates of the four control points of the cubic curve. """ # c0x, c0y = q0x, q0y c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y) c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y) # c3x, c3y = q2x, q2y return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
agpl-3.0
RomainBrault/scikit-learn
examples/decomposition/plot_kernel_pca.py
353
2011
""" ========== Kernel PCA ========== This example shows that Kernel PCA is able to find a projection of the data that makes data linearly separable. """ print(__doc__) # Authors: Mathieu Blondel # Andreas Mueller # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA, KernelPCA from sklearn.datasets import make_circles np.random.seed(0) X, y = make_circles(n_samples=400, factor=.3, noise=.05) kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10) X_kpca = kpca.fit_transform(X) X_back = kpca.inverse_transform(X_kpca) pca = PCA() X_pca = pca.fit_transform(X) # Plot results plt.figure() plt.subplot(2, 2, 1, aspect='equal') plt.title("Original space") reds = y == 0 blues = y == 1 plt.plot(X[reds, 0], X[reds, 1], "ro") plt.plot(X[blues, 0], X[blues, 1], "bo") plt.xlabel("$x_1$") plt.ylabel("$x_2$") X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50)) X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T # projection on the first principal component (in the phi space) Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape) plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower') plt.subplot(2, 2, 2, aspect='equal') plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro") plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo") plt.title("Projection by PCA") plt.xlabel("1st principal component") plt.ylabel("2nd component") plt.subplot(2, 2, 3, aspect='equal') plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro") plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo") plt.title("Projection by KPCA") plt.xlabel("1st principal component in space induced by $\phi$") plt.ylabel("2nd component") plt.subplot(2, 2, 4, aspect='equal') plt.plot(X_back[reds, 0], X_back[reds, 1], "ro") plt.plot(X_back[blues, 0], X_back[blues, 1], "bo") plt.title("Original space after inverse transform") plt.xlabel("$x_1$") plt.ylabel("$x_2$") plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35) plt.show()
bsd-3-clause
notkarol/banjin
experiment/python_word_matching_speed.py
1
4650
#!/usr/bin/python # Takes in a dictionary of words # Verifies that all functions return the same answers # Generates random hands from the probability of getting tiles from the bunch # Then prints out how long each function takes to find all matching words # Generates various hand sizes to see if there's any scaling import matplotlib.pyplot as plt import numpy as np import pickle import os import sys import timeit # Naive list way of matching wordbank def f0_list(hand, wordbank): results = [] for w_i in range(len(wordbank)): match = True for i in range(26): if hand[i] < wordbank[w_i][i]: match = False break if match: results.append(w_i) return results # A for loop and some numpy def f1_list(hand, wordbank): results = [] for w_i in range(len(wordbank)): if min(list(map(lambda x: x[1] - x[0], zip(wordbank[w_i], hand)))) >= 0: results.append(w_i) return results # Naive way using numpy def f0_np(hand, wordbank): results = [] for w_i in range(len(wordbank)): match = True for i in range(26): if hand[i] < wordbank[w_i,i]: match = False break if match: results.append(w_i) return results # A for loop and some numpy def f1_np(hand, wordbank): results = [] for w_i in range(len(wordbank)): if not np.any((hand - wordbank[w_i]) < 0): results.append(w_i) return results # A for loop and some numpy def f2_np(hand, wordbank): results = [] for w_i in range(len(wordbank)): if np.min(hand - wordbank[w_i]) >= 0: results.append(w_i) return results # Vectorized sum and difference def f3_np(hand, wordbank): return np.where(np.sum((wordbank - hand) > 0, axis=1) == 0)[0] # vectorized just using any def f4_np(hand, wordbank): return np.where(np.any(wordbank > hand, axis=1) == 0)[0] # Prepare a 2D list and a 2D np array of letter frequencies with open(sys.argv[1]) as f: words = [x.split()[0] for x in f.readlines()] wordbank_list = [[0] * 26 for _ in range(len(words))] wordbank_np = np.zeros((len(words), 26)) for w_i in range(len(words)): for letter in sorted(words[w_i]): pos = ord(letter) - 65 wordbank_list[w_i][pos] += 1 wordbank_np[w_i][pos] += 1 # Arrays for keeping track of functions and data-specific wordbanks hand_sizes = list(range(2, 9)) functions = {'list' : [f0_list, f1_list], 'numpy': [f0_np, f1_np, f2_np, f3_np, f4_np]} wordbanks = {'list' : wordbank_list, 'numpy': wordbank_np} n_iter = 10 if len(sys.argv) < 3 else int(sys.argv[2]) timings = {} for datatype in functions: timings[datatype] = np.zeros((max(hand_sizes) + 1, n_iter, len(functions[datatype]))) # Verify that our functions give the same answers for datatype in functions: for func in functions[datatype]: print(datatype, func(wordbanks[datatype][len(wordbank_list) // 2], wordbanks[datatype])) # Time each word imports = 'from __main__ import functions, wordbanks' for counter in range(n_iter): for hand_size in hand_sizes: # Get a specific hand size hand = [13,3,3,6,18,3,4,3,12,2,2,5,3,8,11,3,2,9,6,9,6,3,3,2,3,2] while sum(hand) > hand_size: pos = np.random.randint(sum(hand)) for i in range(len(hand)): pos -= hand[i] if pos < 0: hand[i] -= 1 break hand = str(hand) # For this hand go wild for datatype in functions: for f_i in range(len(functions[datatype])): cmd = 'functions["%s"][%i](%s, wordbanks["%s"])' % (datatype, f_i, hand, datatype) timings[datatype][hand_size, counter, f_i] += timeit.timeit(cmd, imports, number=8) print("\rCompleted %.1f%%" % (100 * (counter + 1) / n_iter), end='') print() # Save words and timings in case we're doing a long-lasting operation filename = 'word_matching_timings_%s.pkl' % os.path.basename(sys.argv[1]) with open(filename, 'wb') as f: print("Saving", filename) pickle.dump((words, wordbanks, timings), f) # Show Results for datatype in functions: means = np.mean(timings[datatype], axis=1) for f_i in range(means.shape[1]): plt.semilogy(hand_sizes, means[:, f_i][min(hand_sizes):], label='%s F%i' % (datatype, f_i)) plt.legend(loc='center left', bbox_to_anchor=(0.85, 0.5)) plt.xlabel("Hand Size") plt.ylabel("Execution Time") plt.title("Word Matching") plt.show()
mit
PatrickOReilly/scikit-learn
examples/model_selection/plot_validation_curve.py
141
1931
""" ========================== Plotting Validation Curves ========================== In this plot you can see the training scores and validation scores of an SVM for different values of the kernel parameter gamma. For very low values of gamma, you can see that both the training score and the validation score are low. This is called underfitting. Medium values of gamma will result in high values for both scores, i.e. the classifier is performing fairly well. If gamma is too high, the classifier will overfit, which means that the training score is good but the validation score is poor. """ print(__doc__) import matplotlib.pyplot as plt import numpy as np from sklearn.datasets import load_digits from sklearn.svm import SVC from sklearn.model_selection import validation_curve digits = load_digits() X, y = digits.data, digits.target param_range = np.logspace(-6, -1, 5) train_scores, test_scores = validation_curve( SVC(), X, y, param_name="gamma", param_range=param_range, cv=10, scoring="accuracy", n_jobs=1) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.title("Validation Curve with SVM") plt.xlabel("$\gamma$") plt.ylabel("Score") plt.ylim(0.0, 1.1) lw = 2 plt.semilogx(param_range, train_scores_mean, label="Training score", color="darkorange", lw=lw) plt.fill_between(param_range, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.2, color="darkorange", lw=lw) plt.semilogx(param_range, test_scores_mean, label="Cross-validation score", color="navy", lw=lw) plt.fill_between(param_range, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.2, color="navy", lw=lw) plt.legend(loc="best") plt.show()
bsd-3-clause
mortonjt/scipy
scipy/signal/wavelets.py
23
10483
from __future__ import division, print_function, absolute_import import numpy as np from numpy.dual import eig from scipy.special import comb from scipy import linspace, pi, exp from scipy.signal import convolve __all__ = ['daub', 'qmf', 'cascade', 'morlet', 'ricker', 'cwt'] def daub(p): """ The coefficients for the FIR low-pass filter producing Daubechies wavelets. p>=1 gives the order of the zero at f=1/2. There are 2p filter coefficients. Parameters ---------- p : int Order of the zero at f=1/2, can have values from 1 to 34. Returns ------- daub : ndarray Return """ sqrt = np.sqrt if p < 1: raise ValueError("p must be at least 1.") if p == 1: c = 1 / sqrt(2) return np.array([c, c]) elif p == 2: f = sqrt(2) / 8 c = sqrt(3) return f * np.array([1 + c, 3 + c, 3 - c, 1 - c]) elif p == 3: tmp = 12 * sqrt(10) z1 = 1.5 + sqrt(15 + tmp) / 6 - 1j * (sqrt(15) + sqrt(tmp - 15)) / 6 z1c = np.conj(z1) f = sqrt(2) / 8 d0 = np.real((1 - z1) * (1 - z1c)) a0 = np.real(z1 * z1c) a1 = 2 * np.real(z1) return f / d0 * np.array([a0, 3 * a0 - a1, 3 * a0 - 3 * a1 + 1, a0 - 3 * a1 + 3, 3 - a1, 1]) elif p < 35: # construct polynomial and factor it if p < 35: P = [comb(p - 1 + k, k, exact=1) for k in range(p)][::-1] yj = np.roots(P) else: # try different polynomial --- needs work P = [comb(p - 1 + k, k, exact=1) / 4.0**k for k in range(p)][::-1] yj = np.roots(P) / 4 # for each root, compute two z roots, select the one with |z|>1 # Build up final polynomial c = np.poly1d([1, 1])**p q = np.poly1d([1]) for k in range(p - 1): yval = yj[k] part = 2 * sqrt(yval * (yval - 1)) const = 1 - 2 * yval z1 = const + part if (abs(z1)) < 1: z1 = const - part q = q * [1, -z1] q = c * np.real(q) # Normalize result q = q / np.sum(q) * sqrt(2) return q.c[::-1] else: raise ValueError("Polynomial factorization does not work " "well for p too large.") def qmf(hk): """ Return high-pass qmf filter from low-pass Parameters ---------- hk : array_like Coefficients of high-pass filter. """ N = len(hk) - 1 asgn = [{0: 1, 1: -1}[k % 2] for k in range(N + 1)] return hk[::-1] * np.array(asgn) def cascade(hk, J=7): """ Return (x, phi, psi) at dyadic points ``K/2**J`` from filter coefficients. Parameters ---------- hk : array_like Coefficients of low-pass filter. J : int, optional Values will be computed at grid points ``K/2**J``. Default is 7. Returns ------- x : ndarray The dyadic points ``K/2**J`` for ``K=0...N * (2**J)-1`` where ``len(hk) = len(gk) = N+1``. phi : ndarray The scaling function ``phi(x)`` at `x`: ``phi(x) = sum(hk * phi(2x-k))``, where k is from 0 to N. psi : ndarray, optional The wavelet function ``psi(x)`` at `x`: ``phi(x) = sum(gk * phi(2x-k))``, where k is from 0 to N. `psi` is only returned if `gk` is not None. Notes ----- The algorithm uses the vector cascade algorithm described by Strang and Nguyen in "Wavelets and Filter Banks". It builds a dictionary of values and slices for quick reuse. Then inserts vectors into final vector at the end. """ N = len(hk) - 1 if (J > 30 - np.log2(N + 1)): raise ValueError("Too many levels.") if (J < 1): raise ValueError("Too few levels.") # construct matrices needed nn, kk = np.ogrid[:N, :N] s2 = np.sqrt(2) # append a zero so that take works thk = np.r_[hk, 0] gk = qmf(hk) tgk = np.r_[gk, 0] indx1 = np.clip(2 * nn - kk, -1, N + 1) indx2 = np.clip(2 * nn - kk + 1, -1, N + 1) m = np.zeros((2, 2, N, N), 'd') m[0, 0] = np.take(thk, indx1, 0) m[0, 1] = np.take(thk, indx2, 0) m[1, 0] = np.take(tgk, indx1, 0) m[1, 1] = np.take(tgk, indx2, 0) m *= s2 # construct the grid of points x = np.arange(0, N * (1 << J), dtype=np.float) / (1 << J) phi = 0 * x psi = 0 * x # find phi0, and phi1 lam, v = eig(m[0, 0]) ind = np.argmin(np.absolute(lam - 1)) # a dictionary with a binary representation of the # evaluation points x < 1 -- i.e. position is 0.xxxx v = np.real(v[:, ind]) # need scaling function to integrate to 1 so find # eigenvector normalized to sum(v,axis=0)=1 sm = np.sum(v) if sm < 0: # need scaling function to integrate to 1 v = -v sm = -sm bitdic = {} bitdic['0'] = v / sm bitdic['1'] = np.dot(m[0, 1], bitdic['0']) step = 1 << J phi[::step] = bitdic['0'] phi[(1 << (J - 1))::step] = bitdic['1'] psi[::step] = np.dot(m[1, 0], bitdic['0']) psi[(1 << (J - 1))::step] = np.dot(m[1, 1], bitdic['0']) # descend down the levels inserting more and more values # into bitdic -- store the values in the correct location once we # have computed them -- stored in the dictionary # for quicker use later. prevkeys = ['1'] for level in range(2, J + 1): newkeys = ['%d%s' % (xx, yy) for xx in [0, 1] for yy in prevkeys] fac = 1 << (J - level) for key in newkeys: # convert key to number num = 0 for pos in range(level): if key[pos] == '1': num += (1 << (level - 1 - pos)) pastphi = bitdic[key[1:]] ii = int(key[0]) temp = np.dot(m[0, ii], pastphi) bitdic[key] = temp phi[num * fac::step] = temp psi[num * fac::step] = np.dot(m[1, ii], pastphi) prevkeys = newkeys return x, phi, psi def morlet(M, w=5.0, s=1.0, complete=True): """ Complex Morlet wavelet. Parameters ---------- M : int Length of the wavelet. w : float, optional Omega0. Default is 5 s : float, optional Scaling factor, windowed from ``-s*2*pi`` to ``+s*2*pi``. Default is 1. complete : bool, optional Whether to use the complete or the standard version. Returns ------- morlet : (M,) ndarray See Also -------- scipy.signal.gausspulse Notes ----- The standard version:: pi**-0.25 * exp(1j*w*x) * exp(-0.5*(x**2)) This commonly used wavelet is often referred to simply as the Morlet wavelet. Note that this simplified version can cause admissibility problems at low values of w. The complete version:: pi**-0.25 * (exp(1j*w*x) - exp(-0.5*(w**2))) * exp(-0.5*(x**2)) The complete version of the Morlet wavelet, with a correction term to improve admissibility. For w greater than 5, the correction term is negligible. Note that the energy of the return wavelet is not normalised according to s. The fundamental frequency of this wavelet in Hz is given by ``f = 2*s*w*r / M`` where r is the sampling rate. """ x = linspace(-s * 2 * pi, s * 2 * pi, M) output = exp(1j * w * x) if complete: output -= exp(-0.5 * (w**2)) output *= exp(-0.5 * (x**2)) * pi**(-0.25) return output def ricker(points, a): """ Return a Ricker wavelet, also known as the "Mexican hat wavelet". It models the function: ``A (1 - x^2/a^2) exp(-x^2/2 a^2)``, where ``A = 2/sqrt(3a)pi^1/4``. Parameters ---------- points : int Number of points in `vector`. Will be centered around 0. a : scalar Width parameter of the wavelet. Returns ------- vector : (N,) ndarray Array of length `points` in shape of ricker curve. Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> points = 100 >>> a = 4.0 >>> vec2 = signal.ricker(points, a) >>> print(len(vec2)) 100 >>> plt.plot(vec2) >>> plt.show() """ A = 2 / (np.sqrt(3 * a) * (np.pi**0.25)) wsq = a**2 vec = np.arange(0, points) - (points - 1.0) / 2 xsq = vec**2 mod = (1 - xsq / wsq) gauss = np.exp(-xsq / (2 * wsq)) total = A * mod * gauss return total def cwt(data, wavelet, widths): """ Continuous wavelet transform. Performs a continuous wavelet transform on `data`, using the `wavelet` function. A CWT performs a convolution with `data` using the `wavelet` function, which is characterized by a width parameter and length parameter. Parameters ---------- data : (N,) ndarray data on which to perform the transform. wavelet : function Wavelet function, which should take 2 arguments. The first argument is the number of points that the returned vector will have (len(wavelet(width,length)) == length). The second is a width parameter, defining the size of the wavelet (e.g. standard deviation of a gaussian). See `ricker`, which satisfies these requirements. widths : (M,) sequence Widths to use for transform. Returns ------- cwt: (M, N) ndarray Will have shape of (len(widths), len(data)). Notes ----- >>> length = min(10 * width[ii], len(data)) >>> cwt[ii,:] = scipy.signal.convolve(data, wavelet(length, ... width[ii]), mode='same') Examples -------- >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> t = np.linspace(-1, 1, 200, endpoint=False) >>> sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2) >>> widths = np.arange(1, 31) >>> cwtmatr = signal.cwt(sig, signal.ricker, widths) >>> plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto', ... vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max()) >>> plt.show() """ output = np.zeros([len(widths), len(data)]) for ind, width in enumerate(widths): wavelet_data = wavelet(min(10 * width, len(data)), width) output[ind, :] = convolve(data, wavelet_data, mode='same') return output
bsd-3-clause
broadinstitute/cms
cms/power/power_func.py
1
8625
## functions for analyzing empirical/simulated CMS output ## last updated 09.14.2017 vitti@broadinstitute.org import matplotlib as mp mp.use('agg') import matplotlib.pyplot as plt import numpy as np import math from scipy.stats import percentileofscore ################### ## DEFINE SCORES ## ################### def write_master_likesfile(writefilename, model, selpop, freq,basedir, miss = "neut",): '''adapted from run_likes_func.py''' writefile = open(writefilename, 'w') for score in ['ihs', 'nsl', 'delihh']: hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_causal.txt"#_smoothed.txt" misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt" #assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename)) writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n") for score in ['xpehh', 'fst', 'deldaf']: hitlikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_causal.txt"#_smoothed.txt" misslikesfilename = basedir + model + "/" + score + "/likes_sel" + str(selpop) + "_choose_" + str(freq) + "_" + miss + ".txt"#"_smoothed.txt" #assert(os.path.isfile(hitlikesfilename) and os.path.isfile(misslikesfilename)) writefile.write(hitlikesfilename + "\n" + misslikesfilename + "\n") writefile.close() print("wrote to: " + writefilename) return ############### ## REGION ID ## ############### def get_window(istart, physpos, scores, windowlen = 100000): window_scores = [scores[istart]] startpos = physpos[istart] pos = startpos iscore = istart while pos < (startpos + windowlen): iscore += 1 if iscore >= len(scores): break window_scores.append(scores[iscore]) pos = physpos[iscore] #print(str(pos) + " " + str(startpos)) return window_scores def check_outliers(scorelist, cutoff = 3): numscores = len(scorelist) outliers = [item for item in scorelist if item > cutoff] numoutliers = len(outliers) percentage = (float(numoutliers) / float(numscores)) * 100. return percentage def check_rep_windows(physpos, scores, windowlen = 100000, cutoff = 3, totalchrlen=1000000): ''' previous implementation: !!!! this is going to result in false positives whenever I have a small uptick right near the edge of the replicate ''' #check window defined by each snp as starting point rep_percentages = [] numSnps = len(physpos) numWindows = 0 #get exhaustive windows and stop at chrom edge for isnp in range(numSnps): if physpos[isnp] + windowlen < totalchrlen: numWindows +=1 else: #print(str(physpos[isnp]) + "\t") break for iPos in range(numWindows): window_scores = get_window(iPos, physpos, scores, windowlen) percentage = check_outliers(window_scores, cutoff) rep_percentages.append(percentage) return rep_percentages def merge_windows(chrom_signif, windowlen, maxGap = 100000): print('should implement this using bedtools') starts, ends = [], [] contig = False this_windowlen = 0 starting_pos = 0 if len(chrom_signif) > 0: for i_start in range(len(chrom_signif) - 1): if not contig: starts.append(chrom_signif[i_start]) this_windowlen = windowlen #unmerged, default starting_pos = chrom_signif[i_start] if ((chrom_signif[i_start] + this_windowlen) > chrom_signif[i_start + 1]): #contiguous contig = True this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos #or, could also be contiguous in the situation where the next snp is not within this window because there doesn't exist such a snp elif chrom_signif[i_start +1] >=(chrom_signif[i_start] + this_windowlen) and chrom_signif[i_start +1] < (chrom_signif[i_start] + maxGap): contig = True this_windowlen = chrom_signif[i_start +1] + windowlen - starting_pos else: contig = False if not contig: windowend = chrom_signif[i_start] + windowlen ends.append(windowend) if contig: #last region is overlapped by its predecssor ends.append(chrom_signif[-1] + windowlen) else: starts.append(chrom_signif[-1]) ends.append(chrom_signif[-1] + windowlen) assert len(starts) == len(ends) return starts, ends ########################## ## POWER & SIGNIFICANCE ## ########################## def calc_pr(all_percentages, threshhold): numNeutReps_exceedThresh = 0 totalnumNeutReps = len(all_percentages) for irep in range(totalnumNeutReps): if len(all_percentages[irep]) != 0: if max(all_percentages[irep]) > threshhold: numNeutReps_exceedThresh +=1 numNeutReps_exceedThresh, totalnumNeutReps = float(numNeutReps_exceedThresh), float(totalnumNeutReps) if totalnumNeutReps != 0: pr = numNeutReps_exceedThresh / totalnumNeutReps else: pr = 0 print('ERROR; empty set') return pr def get_causal_rank(values, causal_val): if np.isnan(causal_val): return(float('nan')) assert(causal_val in values) cleanvals = [] for item in values: if not np.isnan(item) and not np.isinf(item): cleanvals.append(item) values = cleanvals values.sort() values.reverse() causal_rank = values.index(causal_val) return causal_rank def get_cdf_from_causal_ranks(causal_ranks): numbins = max(causal_ranks) #? heuristic counts, bins = np.histogram(causal_ranks, bins=numbins, normed = True) #doublecheck cdf = np.cumsum(counts) return bins, cdf def get_pval(all_simscores, thisScore): r = np.searchsorted(all_simscores,thisScore) n = len(all_simscores) pval = 1. - ((r + 1.) / (n + 1.)) if pval > 0: #pval *= nSnps #Bonferroni return pval else: #print("r: " +str(r) + " , n: " + str(n)) pval = 1. - (r/(n+1)) #pval *= nSnps #Bonferroni return pval ############### ## VISUALIZE ## ############### def quick_plot(ax, pos, val, ylabel,causal_index=-1): ax.scatter(pos, val, s=.8) if causal_index != -1: ax.scatter(pos[causal_index], val[causal_index], color='r', s=4) for tick in ax.yaxis.get_major_ticks(): tick.label.set_fontsize('6') ax.set_ylabel(ylabel, fontsize='6') #ax.set_xlim([0, 1500000]) #make flexible? ax.yaxis.set_label_position('right') #ax.set_ylim([min(val), max(val)]) return ax def plot_dist(allvals, savefilename= "/web/personal/vitti/test.png", numBins=1000): #print(allvals) #get rid of nans and infs #cleanvals = [item for item in allvals if not np.isnan(item)] #allvals = cleanvals allvals = np.array(allvals) allvals = allvals[~np.isnan(allvals)] allvals = allvals[~np.isinf(allvals)] #allvals = list(allvals) #print(allvals) print("percentile for score = 10: " + str(percentileofscore(allvals, 10))) print("percentile for score = 15: " + str(percentileofscore(allvals, 15))) if len(allvals) > 0: f, ax = plt.subplots(1) ax.hist(allvals, bins=numBins) plt.savefig(savefilename) print('plotted to ' + savefilename) return def plotManhattan(ax, neut_rep_scores, emp_scores, chrom_pos, nSnps, maxSkipVal = 0, zscores = True): #neut_rep_scores.sort() #print('sorted neutral scores...') lastpos = 0 for chrom in range(1,23): ichrom = chrom-1 if ichrom%2 == 0: plotcolor = "darkblue" else: plotcolor = "lightblue" if zscores == True: #http://stackoverflow.com/questions/3496656/convert-z-score-z-value-standard-score-to-p-value-for-normal-distribution-in?rq=1 #Z SCORE cf SG email 103116 #pvals = [get_pval(neut_rep_scores, item) for item in emp_scores[ichrom]] pvalues = [] for item in emp_scores[ichrom]: if item < maxSkipVal: #speed up this process by ignoring anything obviously insignificant pval = 1 else: #print('scipy') #sys.exit() pval = scipy.stats.norm.sf(abs(item)) pvalues.append(pval) #else: # pval = get_pval(neut_rep_scores, item) #pvalues.append(pval) print("calculated pvalues for chrom " + str(chrom)) chrom_pos = range(lastpos, lastpos + len(pvalues)) logtenpvals = [(-1. * math.log10(pval)) for pval in pvalues] ax.scatter(chrom_pos, logtenpvals, color =plotcolor, s=.5) lastpos = chrom_pos[-1] else: chrom_pos = range(lastpos, lastpos + len(emp_scores[ichrom])) ax.scatter(chrom_pos, emp_scores[ichrom], color=plotcolor, s=.5) lastpos = chrom_pos[-1] return ax def plotManhattan_extended(ax, emp_scores, chrom_pos, chrom): ''' makes a figure more like in Karlsson 2013 instead of Grossman 2013''' ax.plot(chrom_pos, emp_scores, linestyle='None', marker=".", markersize=.3, color="black") ax.set_ylabel('chr' + str(chrom), fontsize=6, rotation='horizontal') labels = ax.get_yticklabels() ax.set_yticklabels(labels, fontsize=6) ax.set_axis_bgcolor('LightGray') return ax
bsd-2-clause
vshtanko/scikit-learn
examples/applications/plot_prediction_latency.py
234
11277
""" ================== Prediction Latency ================== This is an example showing the prediction latency of various scikit-learn estimators. The goal is to measure the latency one can expect when doing predictions either in bulk or atomic (i.e. one by one) mode. The plots represent the distribution of the prediction latency as a boxplot. """ # Authors: Eustache Diemert <eustache@diemert.fr> # License: BSD 3 clause from __future__ import print_function from collections import defaultdict import time import gc import numpy as np import matplotlib.pyplot as plt from scipy.stats import scoreatpercentile from sklearn.datasets.samples_generator import make_regression from sklearn.ensemble.forest import RandomForestRegressor from sklearn.linear_model.ridge import Ridge from sklearn.linear_model.stochastic_gradient import SGDRegressor from sklearn.svm.classes import SVR def _not_in_sphinx(): # Hack to detect whether we are running by the sphinx builder return '__file__' in globals() def atomic_benchmark_estimator(estimator, X_test, verbose=False): """Measure runtime prediction of each instance.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_instances, dtype=np.float) for i in range(n_instances): instance = X_test[i, :] start = time.time() estimator.predict(instance) runtimes[i] = time.time() - start if verbose: print("atomic_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose): """Measure runtime prediction of the whole input.""" n_instances = X_test.shape[0] runtimes = np.zeros(n_bulk_repeats, dtype=np.float) for i in range(n_bulk_repeats): start = time.time() estimator.predict(X_test) runtimes[i] = time.time() - start runtimes = np.array(list(map(lambda x: x / float(n_instances), runtimes))) if verbose: print("bulk_benchmark runtimes:", min(runtimes), scoreatpercentile( runtimes, 50), max(runtimes)) return runtimes def benchmark_estimator(estimator, X_test, n_bulk_repeats=30, verbose=False): """ Measure runtimes of prediction in both atomic and bulk mode. Parameters ---------- estimator : already trained estimator supporting `predict()` X_test : test input n_bulk_repeats : how many times to repeat when evaluating bulk mode Returns ------- atomic_runtimes, bulk_runtimes : a pair of `np.array` which contain the runtimes in seconds. """ atomic_runtimes = atomic_benchmark_estimator(estimator, X_test, verbose) bulk_runtimes = bulk_benchmark_estimator(estimator, X_test, n_bulk_repeats, verbose) return atomic_runtimes, bulk_runtimes def generate_dataset(n_train, n_test, n_features, noise=0.1, verbose=False): """Generate a regression dataset with the given parameters.""" if verbose: print("generating dataset...") X, y, coef = make_regression(n_samples=n_train + n_test, n_features=n_features, noise=noise, coef=True) X_train = X[:n_train] y_train = y[:n_train] X_test = X[n_train:] y_test = y[n_train:] idx = np.arange(n_train) np.random.seed(13) np.random.shuffle(idx) X_train = X_train[idx] y_train = y_train[idx] std = X_train.std(axis=0) mean = X_train.mean(axis=0) X_train = (X_train - mean) / std X_test = (X_test - mean) / std std = y_train.std(axis=0) mean = y_train.mean(axis=0) y_train = (y_train - mean) / std y_test = (y_test - mean) / std gc.collect() if verbose: print("ok") return X_train, y_train, X_test, y_test def boxplot_runtimes(runtimes, pred_type, configuration): """ Plot a new `Figure` with boxplots of prediction runtimes. Parameters ---------- runtimes : list of `np.array` of latencies in micro-seconds cls_names : list of estimator class names that generated the runtimes pred_type : 'bulk' or 'atomic' """ fig, ax1 = plt.subplots(figsize=(10, 6)) bp = plt.boxplot(runtimes, ) cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] plt.setp(ax1, xticklabels=cls_infos) plt.setp(bp['boxes'], color='black') plt.setp(bp['whiskers'], color='black') plt.setp(bp['fliers'], color='red', marker='+') ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Prediction Time per Instance - %s, %d feats.' % ( pred_type.capitalize(), configuration['n_features'])) ax1.set_ylabel('Prediction Time (us)') plt.show() def benchmark(configuration): """Run the whole benchmark.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) stats = {} for estimator_conf in configuration['estimators']: print("Benchmarking", estimator_conf['instance']) estimator_conf['instance'].fit(X_train, y_train) gc.collect() a, b = benchmark_estimator(estimator_conf['instance'], X_test) stats[estimator_conf['name']] = {'atomic': a, 'bulk': b} cls_names = [estimator_conf['name'] for estimator_conf in configuration[ 'estimators']] runtimes = [1e6 * stats[clf_name]['atomic'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'atomic', configuration) runtimes = [1e6 * stats[clf_name]['bulk'] for clf_name in cls_names] boxplot_runtimes(runtimes, 'bulk (%d)' % configuration['n_test'], configuration) def n_feature_influence(estimators, n_train, n_test, n_features, percentile): """ Estimate influence of the number of features on prediction time. Parameters ---------- estimators : dict of (name (str), estimator) to benchmark n_train : nber of training instances (int) n_test : nber of testing instances (int) n_features : list of feature-space dimensionality to test (int) percentile : percentile at which to measure the speed (int [0-100]) Returns: -------- percentiles : dict(estimator_name, dict(n_features, percentile_perf_in_us)) """ percentiles = defaultdict(defaultdict) for n in n_features: print("benchmarking with %d features" % n) X_train, y_train, X_test, y_test = generate_dataset(n_train, n_test, n) for cls_name, estimator in estimators.items(): estimator.fit(X_train, y_train) gc.collect() runtimes = bulk_benchmark_estimator(estimator, X_test, 30, False) percentiles[cls_name][n] = 1e6 * scoreatpercentile(runtimes, percentile) return percentiles def plot_n_features_influence(percentiles, percentile): fig, ax1 = plt.subplots(figsize=(10, 6)) colors = ['r', 'g', 'b'] for i, cls_name in enumerate(percentiles.keys()): x = np.array(sorted([n for n in percentiles[cls_name].keys()])) y = np.array([percentiles[cls_name][n] for n in x]) plt.plot(x, y, color=colors[i], ) ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5) ax1.set_axisbelow(True) ax1.set_title('Evolution of Prediction Time with #Features') ax1.set_xlabel('#Features') ax1.set_ylabel('Prediction Time at %d%%-ile (us)' % percentile) plt.show() def benchmark_throughputs(configuration, duration_secs=0.1): """benchmark throughput for different estimators.""" X_train, y_train, X_test, y_test = generate_dataset( configuration['n_train'], configuration['n_test'], configuration['n_features']) throughputs = dict() for estimator_config in configuration['estimators']: estimator_config['instance'].fit(X_train, y_train) start_time = time.time() n_predictions = 0 while (time.time() - start_time) < duration_secs: estimator_config['instance'].predict(X_test[0]) n_predictions += 1 throughputs[estimator_config['name']] = n_predictions / duration_secs return throughputs def plot_benchmark_throughput(throughputs, configuration): fig, ax = plt.subplots(figsize=(10, 6)) colors = ['r', 'g', 'b'] cls_infos = ['%s\n(%d %s)' % (estimator_conf['name'], estimator_conf['complexity_computer']( estimator_conf['instance']), estimator_conf['complexity_label']) for estimator_conf in configuration['estimators']] cls_values = [throughputs[estimator_conf['name']] for estimator_conf in configuration['estimators']] plt.bar(range(len(throughputs)), cls_values, width=0.5, color=colors) ax.set_xticks(np.linspace(0.25, len(throughputs) - 0.75, len(throughputs))) ax.set_xticklabels(cls_infos, fontsize=10) ymax = max(cls_values) * 1.2 ax.set_ylim((0, ymax)) ax.set_ylabel('Throughput (predictions/sec)') ax.set_title('Prediction Throughput for different estimators (%d ' 'features)' % configuration['n_features']) plt.show() ############################################################################### # main code start_time = time.time() # benchmark bulk/atomic prediction speed for various regressors configuration = { 'n_train': int(1e3), 'n_test': int(1e2), 'n_features': int(1e2), 'estimators': [ {'name': 'Linear Model', 'instance': SGDRegressor(penalty='elasticnet', alpha=0.01, l1_ratio=0.25, fit_intercept=True), 'complexity_label': 'non-zero coefficients', 'complexity_computer': lambda clf: np.count_nonzero(clf.coef_)}, {'name': 'RandomForest', 'instance': RandomForestRegressor(), 'complexity_label': 'estimators', 'complexity_computer': lambda clf: clf.n_estimators}, {'name': 'SVR', 'instance': SVR(kernel='rbf'), 'complexity_label': 'support vectors', 'complexity_computer': lambda clf: len(clf.support_vectors_)}, ] } benchmark(configuration) # benchmark n_features influence on prediction speed percentile = 90 percentiles = n_feature_influence({'ridge': Ridge()}, configuration['n_train'], configuration['n_test'], [100, 250, 500], percentile) plot_n_features_influence(percentiles, percentile) # benchmark throughput throughputs = benchmark_throughputs(configuration) plot_benchmark_throughput(throughputs, configuration) stop_time = time.time() print("example run in %.2fs" % (stop_time - start_time))
bsd-3-clause