repo_name
stringlengths
7
92
path
stringlengths
5
129
copies
stringclasses
201 values
size
stringlengths
4
6
content
stringlengths
1.03k
375k
license
stringclasses
15 values
billy-inn/scikit-learn
examples/linear_model/plot_ransac.py
250
1673
""" =========================================== Robust linear model estimation using RANSAC =========================================== In this example we see how to robustly fit a linear model to faulty data using the RANSAC algorithm. """ import numpy as np from matplotlib import pyplot as plt from sklearn import linear_model, datasets n_samples = 1000 n_outliers = 50 X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1, n_informative=1, noise=10, coef=True, random_state=0) # Add outlier data np.random.seed(0) X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1)) y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers) # Fit line using all data model = linear_model.LinearRegression() model.fit(X, y) # Robustly fit linear model with RANSAC algorithm model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression()) model_ransac.fit(X, y) inlier_mask = model_ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) # Predict data of estimated models line_X = np.arange(-5, 5) line_y = model.predict(line_X[:, np.newaxis]) line_y_ransac = model_ransac.predict(line_X[:, np.newaxis]) # Compare estimated coefficients print("Estimated coefficients (true, normal, RANSAC):") print(coef, model.coef_, model_ransac.estimator_.coef_) plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers') plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers') plt.plot(line_X, line_y, '-k', label='Linear regressor') plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor') plt.legend(loc='lower right') plt.show()
bsd-3-clause
alexsavio/scikit-learn
examples/gaussian_process/plot_gpc_iris.py
81
2231
""" ===================================================== Gaussian process classification (GPC) on iris dataset ===================================================== This example illustrates the predicted probability of GPC for an isotropic and anisotropic RBF kernel on a two-dimensional version for the iris-dataset. The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by assigning different length-scales to the two feature dimensions. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. y = np.array(iris.target, dtype=int) h = .02 # step size in the mesh kernel = 1.0 * RBF([1.0]) gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y) kernel = 1.0 * RBF([1.0, 1.0]) gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) titles = ["Isotropic RBF", "Anisotropic RBF"] plt.figure(figsize=(10, 5)) for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)): # Plot the predicted probabilities. For that, we will assign a color to # each point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(1, 2, i + 1) Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape((xx.shape[0], xx.shape[1], 3)) plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower") # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y]) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title("%s, LML: %.3f" % (titles[i], clf.log_marginal_likelihood(clf.kernel_.theta))) plt.tight_layout() plt.show()
bsd-3-clause
bouhlelma/smt
smt/sampling_methods/tests/test_sampling_method_examples.py
3
1403
import unittest import matplotlib matplotlib.use("Agg") class Test(unittest.TestCase): def test_random(self): import numpy as np import matplotlib.pyplot as plt from smt.sampling_methods import Random xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) sampling = Random(xlimits=xlimits) num = 50 x = sampling(num) print(x.shape) plt.plot(x[:, 0], x[:, 1], "o") plt.xlabel("x") plt.ylabel("y") plt.show() def test_lhs(self): import numpy as np import matplotlib.pyplot as plt from smt.sampling_methods import LHS xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) sampling = LHS(xlimits=xlimits) num = 50 x = sampling(num) print(x.shape) plt.plot(x[:, 0], x[:, 1], "o") plt.xlabel("x") plt.ylabel("y") plt.show() def test_full_factorial(self): import numpy as np import matplotlib.pyplot as plt from smt.sampling_methods import FullFactorial xlimits = np.array([[0.0, 4.0], [0.0, 3.0]]) sampling = FullFactorial(xlimits=xlimits) num = 50 x = sampling(num) print(x.shape) plt.plot(x[:, 0], x[:, 1], "o") plt.xlabel("x") plt.ylabel("y") plt.show() if __name__ == "__main__": unittest.main()
bsd-3-clause
vortex-ape/scikit-learn
examples/datasets/plot_random_multilabel_dataset.py
278
3402
""" ============================================== Plot randomly generated multilabel dataset ============================================== This illustrates the `datasets.make_multilabel_classification` dataset generator. Each sample consists of counts of two features (up to 50 in total), which are differently distributed in each of two classes. Points are labeled as follows, where Y means the class is present: ===== ===== ===== ====== 1 2 3 Color ===== ===== ===== ====== Y N N Red N Y N Blue N N Y Yellow Y Y N Purple Y N Y Orange Y Y N Green Y Y Y Brown ===== ===== ===== ====== A star marks the expected sample for each class; its size reflects the probability of selecting that class label. The left and right examples highlight the ``n_labels`` parameter: more of the samples in the right plot have 2 or 3 labels. Note that this two-dimensional example is very degenerate: generally the number of features would be much greater than the "document length", while here we have much larger documents than vocabulary. Similarly, with ``n_classes > n_features``, it is much less likely that a feature distinguishes a particular class. """ from __future__ import print_function import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_multilabel_classification as make_ml_clf print(__doc__) COLORS = np.array(['!', '#FF3333', # red '#0198E1', # blue '#BF5FFF', # purple '#FCD116', # yellow '#FF7216', # orange '#4DBD33', # green '#87421F' # brown ]) # Use same random seed for multiple calls to make_multilabel_classification to # ensure same distributions RANDOM_SEED = np.random.randint(2 ** 10) def plot_2d(ax, n_labels=1, n_classes=3, length=50): X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2, n_classes=n_classes, n_labels=n_labels, length=length, allow_unlabeled=False, return_distributions=True, random_state=RANDOM_SEED) ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4] ).sum(axis=1)), marker='.') ax.scatter(p_w_c[0] * length, p_w_c[1] * length, marker='*', linewidth=.5, edgecolor='black', s=20 + 1500 * p_c ** 2, color=COLORS.take([1, 2, 4])) ax.set_xlabel('Feature 0 count') return p_c, p_w_c _, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4)) plt.subplots_adjust(bottom=.15) p_c, p_w_c = plot_2d(ax1, n_labels=1) ax1.set_title('n_labels=1, length=50') ax1.set_ylabel('Feature 1 count') plot_2d(ax2, n_labels=3) ax2.set_title('n_labels=3, length=50') ax2.set_xlim(left=0, auto=True) ax2.set_ylim(bottom=0, auto=True) plt.show() print('The data was generated from (random_state=%d):' % RANDOM_SEED) print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t') for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T): print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
bsd-3-clause
jefffohl/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py
72
2225
import matplotlib import inspect import warnings # ipython relies on interactive_bk being defined here from matplotlib.rcsetup import interactive_bk __all__ = ['backend','show','draw_if_interactive', 'new_figure_manager', 'backend_version'] backend = matplotlib.get_backend() # validates, to match all_backends def pylab_setup(): 'return new_figure_manager, draw_if_interactive and show for pylab' # Import the requested backend into a generic module object if backend.startswith('module://'): backend_name = backend[9:] else: backend_name = 'backend_'+backend backend_name = backend_name.lower() # until we banish mixed case backend_name = 'matplotlib.backends.%s'%backend_name.lower() backend_mod = __import__(backend_name, globals(),locals(),[backend_name]) # Things we pull in from all backends new_figure_manager = backend_mod.new_figure_manager # image backends like pdf, agg or svg do not need to do anything # for "show" or "draw_if_interactive", so if they are not defined # by the backend, just do nothing def do_nothing_show(*args, **kwargs): frame = inspect.currentframe() fname = frame.f_back.f_code.co_filename if fname in ('<stdin>', '<ipython console>'): warnings.warn(""" Your currently selected backend, '%s' does not support show(). Please select a GUI backend in your matplotlibrc file ('%s') or with matplotlib.use()""" % (backend, matplotlib.matplotlib_fname())) def do_nothing(*args, **kwargs): pass backend_version = getattr(backend_mod,'backend_version', 'unknown') show = getattr(backend_mod, 'show', do_nothing_show) draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing) # Additional imports which only happen for certain backends. This section # should probably disappear once all backends are uniform. if backend.lower() in ['wx','wxagg']: Toolbar = backend_mod.Toolbar __all__.append('Toolbar') matplotlib.verbose.report('backend %s version %s' % (backend,backend_version)) return new_figure_manager, draw_if_interactive, show
gpl-3.0
sohyongsheng/kaggle-carvana
plot_learning_curves.py
1
2337
import numpy import matplotlib.pyplot import pylab import sys def plot_learning_curves(experiment, epochs, train_losses, cross_validation_losses, dice_scores, x_limits = None, y_limits = None): axes = matplotlib.pyplot.figure().gca() x_axis = axes.get_xaxis() x_axis.set_major_locator(pylab.MaxNLocator(integer = True)) matplotlib.pyplot.plot(epochs, train_losses) matplotlib.pyplot.plot(epochs, cross_validation_losses) matplotlib.pyplot.plot(epochs, dice_scores) matplotlib.pyplot.legend(['Training loss', 'Cross validation loss', 'Dice scores']) matplotlib.pyplot.xlabel('Epochs') matplotlib.pyplot.ylabel('Loss or Dice score') matplotlib.pyplot.title(experiment) if x_limits is not None: matplotlib.pyplot.xlim(x_limits) if y_limits is not None: matplotlib.pyplot.ylim(y_limits) output_directory = './results/' + experiment + '/learningCurves/' image_file = output_directory + 'learning_curves.png' matplotlib.pyplot.tight_layout() matplotlib.pyplot.savefig(image_file) def process_results(experiment, x_limits, y_limits): output_directory = './results/' + experiment + '/learningCurves/' train_losses = numpy.load(output_directory + 'train_losses.npy') cross_validation_losses = numpy.load(output_directory + 'cross_validation_losses.npy') dice_scores = numpy.load(output_directory + 'dice_scores.npy') epochs = numpy.arange(1, len(train_losses) + 1) plot_learning_curves(experiment, epochs, train_losses, cross_validation_losses, dice_scores, x_limits, y_limits) training_curves = numpy.column_stack((epochs, train_losses, cross_validation_losses, dice_scores)) numpy.savetxt( output_directory + 'training_curves.csv', training_curves, fmt = '%d, %.5f, %.5f, %.5f', header = 'Epochs, Train loss, Cross validation loss, Dice scores' ) if __name__ == '__main__': dice_score_limits = [0.995, 0.997] loss_limits = [0.02, 0.08] x_limits = [1, 150] # Assign either dice_score_limits or loss_limits depending on what you want to focus on. y_limits = loss_limits # experiments = ['experiment' + str(i) for i in [53, 60, 61]] experiments = ['my_solution'] for experiment in experiments: process_results(experiment, x_limits, y_limits)
gpl-3.0
JesseLivezey/plankton
pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py
5
4839
""" WRITEME """ import logging from ..linear import LinearTransform from .unshared_conv import FilterActs, ImgActs from theano.compat.six.moves import xrange from theano.sandbox import cuda if cuda.cuda_available: import gpu_unshared_conv # register optimizations import numpy as np try: import matplotlib.pyplot as plt except ImportError: pass logger = logging.getLogger(__name__) class LocalDot(LinearTransform): """ LocalDot is an linear operation computationally similar to convolution in the spatial domain, except that whereas convolution applying a single filter or set of filters across an image, the LocalDot has different filterbanks for different points in the image. Mathematically, this is a general linear transform except for a restriction that filters are 0 outside of a spatially localized patch within the image. Image shape is 5-tuple: color_groups colors_per_group rows cols images Filterbank shape is 7-tuple (!) 0 row_positions 1 col_positions 2 colors_per_group 3 height 4 width 5 color_groups 6 filters_per_group The result of left-multiplication a 5-tuple with shape: filter_groups filters_per_group row_positions col_positions images Parameters ---------- filters : WRITEME irows : WRITEME Image rows icols : WRITEME Image columns subsample : WRITEME padding_start : WRITEME filters_shape : WRITEME message : WRITEME """ def __init__(self, filters, irows, icols=None, subsample=(1, 1), padding_start=None, filters_shape=None, message=""): LinearTransform.__init__(self, [filters]) self._filters = filters if filters_shape is None: self._filters_shape = tuple(filters.get_value(borrow=True).shape) else: self._filters_shape = tuple(filters_shape) self._irows = irows if icols is None: self._icols = irows else: self._icols = icols if self._icols != self._irows: raise NotImplementedError('GPU code at least needs square imgs') self._subsample = tuple(subsample) self._padding_start = padding_start if len(self._filters_shape) != 7: raise TypeError('need 7-tuple filter shape', self._filters_shape) if self._subsample[0] != self._subsample[1]: raise ValueError('subsampling must be same in rows and cols') self._filter_acts = FilterActs(self._subsample[0]) self._img_acts = ImgActs(module_stride=self._subsample[0]) if message: self._message = message else: self._message = filters.name def rmul(self, x): """ .. todo:: WRITEME """ assert x.ndim == 5 return self._filter_acts(x, self._filters) def rmul_T(self, x): """ .. todo:: WRITEME """ return self._img_acts(self._filters, x, self._irows, self._icols) def col_shape(self): """ .. todo:: WRITEME """ ishape = self.row_shape() + (-99,) fshape = self._filters_shape hshape, = self._filter_acts.infer_shape(None, (ishape, fshape)) assert hshape[-1] == -99 return hshape[:-1] def row_shape(self): """ .. todo:: WRITEME """ fshape = self._filters_shape fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2] fgroups, filters_per_group = fshape[-2:] return fgroups, fcolors, self._irows, self._icols def print_status(self): """ .. todo:: WRITEME """ raise NotImplementedError("TODO: fix dependence on non-existent " "ndarray_status function") """print ndarray_status( self._filters.get_value(borrow=True), msg='%s{%s}'% (self.__class__.__name__, self._message)) """ def imshow_gray(self): """ .. todo:: WRITEME """ filters = self._filters.get_value() modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape logger.info(filters.shape) rval = np.zeros(( modR * (rows + 1) - 1, modC * (cols + 1) - 1, )) for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)): for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)): rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0] plt.imshow(rval, cmap='gray') return rval
bsd-3-clause
snario/geopandas
geopandas/plotting.py
2
9645
from __future__ import print_function import numpy as np from six import next from six.moves import xrange def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5): """ Plot a single Polygon geometry """ from descartes.patch import PolygonPatch a = np.asarray(poly.exterior) # without Descartes, we could make a Patch of exterior ax.add_patch(PolygonPatch(poly, facecolor=facecolor, alpha=alpha)) ax.plot(a[:, 0], a[:, 1], color=edgecolor) for p in poly.interiors: x, y = zip(*p.coords) ax.plot(x, y, color=edgecolor) def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5): """ Can safely call with either Polygon or Multipolygon geometry """ if geom.type == 'Polygon': plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha) elif geom.type == 'MultiPolygon': for poly in geom.geoms: plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha) def plot_linestring(ax, geom, color='black', linewidth=1): """ Plot a single LineString geometry """ a = np.array(geom) ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth) def plot_multilinestring(ax, geom, color='red', linewidth=1): """ Can safely call with either LineString or MultiLineString geometry """ if geom.type == 'LineString': plot_linestring(ax, geom, color=color, linewidth=linewidth) elif geom.type == 'MultiLineString': for line in geom.geoms: plot_linestring(ax, line, color=color, linewidth=linewidth) def plot_point(ax, pt, marker='o', markersize=2): """ Plot a single Point geometry """ ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, linewidth=0) def gencolor(N, colormap='Set1'): """ Color generator intended to work with one of the ColorBrewer qualitative color scales. Suggested values of colormap are the following: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 (although any matplotlib colormap will work). """ from matplotlib import cm # don't use more than 9 discrete colors n_colors = min(N, 9) cmap = cm.get_cmap(colormap, n_colors) colors = cmap(range(n_colors)) for i in xrange(N): yield colors[i % n_colors] def plot_series(s, colormap='Set1', axes=None, **color_kwds): """ Plot a GeoSeries Generate a plot of a GeoSeries geometry with matplotlib. Parameters ---------- Series The GeoSeries to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. colormap : str (default 'Set1') The name of a colormap recognized by matplotlib. Any colormap will work, but categorical colormaps are generally recommended. Examples of useful discrete colormaps include: Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3 axes : matplotlib.pyplot.Artist (default None) axes on which to draw the plot **color_kwds : dict Color options to be passed on to plot_polygon Returns ------- matplotlib axes instance """ import matplotlib.pyplot as plt if axes is None: fig = plt.gcf() fig.add_subplot(111, aspect='equal') ax = plt.gca() else: ax = axes color = gencolor(len(s), colormap=colormap) for geom in s: if geom.type == 'Polygon' or geom.type == 'MultiPolygon': plot_multipolygon(ax, geom, facecolor=next(color), **color_kwds) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=next(color)) elif geom.type == 'Point': plot_point(ax, geom) plt.draw() return ax def plot_dataframe(s, column=None, colormap=None, categorical=False, legend=False, axes=None, scheme=None, k=5, **color_kwds ): """ Plot a GeoDataFrame Generate a plot of a GeoDataFrame with matplotlib. If a column is specified, the plot coloring will be based on values in that column. Otherwise, a categorical plot of the geometries in the `geometry` column will be generated. Parameters ---------- GeoDataFrame The GeoDataFrame to be plotted. Currently Polygon, MultiPolygon, LineString, MultiLineString and Point geometries can be plotted. column : str (default None) The name of the column to be plotted. categorical : bool (default False) If False, colormap will reflect numerical values of the column being plotted. For non-numerical columns (or if column=None), this will be set to True. colormap : str (default 'Set1') The name of a colormap recognized by matplotlib. legend : bool (default False) Plot a legend (Experimental; currently for categorical plots only) axes : matplotlib.pyplot.Artist (default None) axes on which to draw the plot scheme : pysal.esda.mapclassify.Map_Classifier Choropleth classification schemes k : int (default 5) Number of classes (ignored if scheme is None) **color_kwds : dict Color options to be passed on to plot_polygon Returns ------- matplotlib axes instance """ import matplotlib.pyplot as plt from matplotlib.lines import Line2D from matplotlib.colors import Normalize from matplotlib import cm if column is None: return plot_series(s.geometry, colormap=colormap, axes=axes, **color_kwds) else: if s[column].dtype is np.dtype('O'): categorical = True if categorical: if colormap is None: colormap = 'Set1' categories = list(set(s[column].values)) categories.sort() valuemap = dict([(k, v) for (v, k) in enumerate(categories)]) values = [valuemap[k] for k in s[column]] else: values = s[column] if scheme is not None: values = __pysal_choro(values, scheme, k=k) cmap = norm_cmap(values, colormap, Normalize, cm) if axes is None: fig = plt.gcf() fig.add_subplot(111, aspect='equal') ax = plt.gca() else: ax = axes for geom, value in zip(s.geometry, values): if geom.type == 'Polygon' or geom.type == 'MultiPolygon': plot_multipolygon(ax, geom, facecolor=cmap.to_rgba(value), **color_kwds) elif geom.type == 'LineString' or geom.type == 'MultiLineString': plot_multilinestring(ax, geom, color=cmap.to_rgba(value)) # TODO: color point geometries elif geom.type == 'Point': plot_point(ax, geom) if legend: if categorical: patches = [] for value, cat in enumerate(categories): patches.append(Line2D([0], [0], linestyle="none", marker="o", alpha=color_kwds.get('alpha', 0.5), markersize=10, markerfacecolor=cmap.to_rgba(value))) ax.legend(patches, categories, numpoints=1, loc='best') else: # TODO: show a colorbar raise NotImplementedError plt.draw() return ax def __pysal_choro(values, scheme, k=5): """ Wrapper for choropleth schemes from PySAL for use with plot_dataframe Parameters ---------- values Series to be plotted scheme pysal.esda.mapclassify classificatin scheme ['Equal_interval'|'Quantiles'|'Fisher_Jenks'] k number of classes (2 <= k <=9) Returns ------- values Series with values replaced with class identifier if PySAL is available, otherwise the original values are used """ try: from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks schemes = {} schemes['equal_interval'] = Equal_Interval schemes['quantiles'] = Quantiles schemes['fisher_jenks'] = Fisher_Jenks s0 = scheme scheme = scheme.lower() if scheme not in schemes: scheme = 'quantiles' print('Unrecognized scheme: ', s0) print('Using Quantiles instead') if k < 2 or k > 9: print('Invalid k: ', k) print('2<=k<=9, setting k=5 (default)') k = 5 binning = schemes[scheme](values, k) values = binning.yb except ImportError: print('PySAL not installed, setting map to default') return values def norm_cmap(values, cmap, normalize, cm): """ Normalize and set colormap Parameters ---------- values Series or array to be normalized cmap matplotlib Colormap normalize matplotlib.colors.Normalize cm matplotlib.cm Returns ------- n_cmap mapping of normalized values to colormap (cmap) """ mn, mx = min(values), max(values) norm = normalize(vmin=mn, vmax=mx) n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap) return n_cmap
bsd-3-clause
shirtsgroup/pygo
analysis/MBAR_foldingcurve_umbrella.py
1
6397
#!/usr/bin/python2.4 import sys import numpy import pymbar # for MBAR analysis import timeseries # for timeseries analysis import os import os.path import pdb # for debugging from optparse import OptionParser import MBAR_pmfQz import wham import MBAR_pmfQ import cPickle def parse_args(): parser=OptionParser() #parser.add_option("-t", "--temprange", nargs=2, default=[300.0,450.0], type="float", dest="temprange", help="temperature range of replicas") parser.add_option("-r", "--replicas", default=24, type="int",dest="replicas", help="number of replicas (default: 24)") parser.add_option("-n", "--N_max", default=100000, type="int",dest="N_max", help="number of data points to read in (default: 100k)") parser.add_option("-s", "--skip", default=1, type="int",dest="skip", help="skip every n data points") parser.add_option("--direc", dest="direc", help="Qtraj_singleprot.txt file location") parser.add_option("--tfile", dest="tfile", default="/home/edz3fz/proteinmontecarlo/T.txt", help="file of temperatures (default: T.txt)") parser.add_option('--cpt', action="store_true", default=False, help="use checkpoint files, if they exist") (options,args) = parser.parse_args() return options def get_ukln(args,N_max,K,Z,beta_k,spring_constant,U_kn,z_kn,N_k): print 'Computing reduced potential energies...' u_kln = numpy.zeros([K,K,N_max], numpy.float32) for k in range(K): for l in range(K): #z_index = l/(len(T)) # z is outer dimension #T_index = l%(len(T)) # T is inner dimension dz = z_kn[k,0:N_k[k]] - Z[l] u_kln[k,l,0:N_k[k]] = beta_k[l] * (U_kn[k,0:N_k[k]] + spring_constant[l]*(dz)**2) return u_kln def get_mbar(args, beta_k, Z, U_kn, N_k, u_kln): if args.cpt: if os.path.exists('%s/f_k_foldingcurve.npy' % args.direc): print 'Reading in free energies from %s/f_k.npy' % args.direc f_k = numpy.load('%s/f_k.npy' % args.direc) mbar = pymbar.MBAR(u_kln,N_k,initial_f_k = f_k, maximum_iterations=0,verbose=True,use_optimized=1) return mbar print 'Using WHAM to generate historgram-based initial guess of dimensionless free energies f_k...' #beta_k = numpy.array(beta_k.tolist()*len(Z)) #f_k = wham.histogram_wham(beta_k, U_kn, N_k) print 'Initializing MBAR...' mbar = pymbar.MBAR(u_kln, N_k, #initial_f_k = f_k, use_optimized='', verbose=True) mbar_file = '%s/f_k_foldingcurve.npy' % args.direc print 'Saving free energies to %s' % mbar_file saving = True if saving: numpy.save(mbar_file, mbar.f_k) return mbar def main(): options = parse_args() kB = 0.00831447/4.184 #Boltzmann constant T = numpy.loadtxt(options.tfile) Z = numpy.arange(9,31.5,1.5) print 'Initial temperature states are', T print 'Distance states are', Z K = len(T)*len(Z) spring_constant = numpy.ones(K) # read in data U_kn, Q_kn, z_kn, N_max = MBAR_pmfQz.read_data(options, K, Z, T, spring_constant[0]) # subsample the data U_kn, Q_kn, z_kn, N_k = MBAR_pmfQz.subsample(U_kn,Q_kn,z_kn,K,N_max) # insert unweighted states T_new = numpy.arange(200,410,10) T_new = numpy.array([200,210,220,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300,305,310,315,320,325,330,335,340,345,350,375,400]) Z_new = numpy.zeros(len(T_new)) K_new = len(T_new) print 'inserting unweighted temperature states', T_new # update states print 'Inserting blank states' Z = Z.tolist() Z = [x for x in Z for _ in range(len(T))] Z = numpy.concatenate((numpy.array(Z),Z_new)) T = numpy.array(T.tolist()*(K/len(T))) T = numpy.concatenate((T,T_new)) K += K_new spring_constant = numpy.concatenate((spring_constant,numpy.zeros(K_new))) print 'all temperature states are ', T print 'all surface states are ', Z print 'there are a total of %i states' % K N_k = numpy.concatenate((N_k,numpy.zeros(K_new))) U_kn = numpy.concatenate((U_kn,numpy.zeros([K_new,N_max]))) Q_kn = numpy.concatenate((Q_kn,numpy.zeros([K_new,N_max]))) z_kn = numpy.concatenate((z_kn,numpy.zeros([K_new,N_max]))) beta_k = 1/(kB*T) u_kln = get_ukln(options, N_max, K, Z, beta_k, spring_constant, U_kn, z_kn, N_k) print "Initializing MBAR..." # Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better) mbar = get_mbar(options,beta_k,Z,U_kn,N_k,u_kln) print "Computing Expectations for E..." (E_expect, dE_expect) = mbar.computeExpectations(u_kln)*(beta_k)**(-1) print "Computing Expectations for E^2..." (E2_expect,dE2_expect) = mbar.computeExpectations(u_kln*u_kln)*(beta_k)**(-2) print "Computing Expectations for Q..." (Q,dQ) = mbar.computeExpectations(Q_kn) print "Computing Heat Capacity as ( <E^2> - <E>^2 ) / ( R*T^2 )..." Cv = numpy.zeros([K], numpy.float64) dCv = numpy.zeros([K], numpy.float64) for i in range(K): Cv[i] = (E2_expect[i] - (E_expect[i]*E_expect[i])) / ( kB * T[i] * T[i]) dCv[i] = 2*dE_expect[i]**2 / (kB *T[i]*T[i]) # from propagation of error numpy.save(options.direc+'/foldingcurve_umbrella',numpy.array([T, Q, dQ])) numpy.save(options.direc+'/heatcap_umbrella',numpy.array([T, Cv, dCv])) # pdb.set_trace() # # print 'Computing PMF(Q) at 325 K' # nbins = 25 # target_temperature = 325 # target_beta = 1.0/(kB*target_temperature) # nbins, bin_centers, bin_counts, bin_kn = get_bins(nbins,K,N_max,Q_kn) # u_kn = target_beta*U_kn # f_i, d2f_i = mbar.computePMF_states(u_kn, bin_kn, nbins) # pmf_file = '%s/pmfQ_umbrella_%i.pkl' % (options.direc, target_temperature) # f = file(pmf_file, 'wb') # print 'Saving target temperature, bin centers, f_i, df_i to %s' % pmf_file # cPickle.dump(target_temperature,f) # cPickle.dump(bin_centers,f) # cPickle.dump(f_i,f) # cPickle.dump(d2f_i,f) # f.close() # # try: # import matplotlib.pyplot as plt # plt.figure(1) # plt.plot(T,Q,'k') # plt.errorbar(T, Q, yerr=dQ) # plt.xlabel('Temperature (K)') # plt.ylabel('Q fraction native contacts') # plt.savefig(options.direc+'/foldingcurve_umbrella.png') # plt.show() # except: # pass # if __name__ == '__main__': main()
gpl-2.0
rahul-c1/scikit-learn
benchmarks/bench_multilabel_metrics.py
11
7258
#!/usr/bin/env python """ A comparison of multilabel target formats and metrics over them """ from __future__ import division from __future__ import print_function from timeit import timeit from functools import partial import itertools import argparse import sys import matplotlib.pyplot as plt import scipy.sparse as sp import numpy as np from sklearn.datasets import make_multilabel_classification from sklearn.metrics import (f1_score, accuracy_score, hamming_loss, jaccard_similarity_score) from sklearn.utils.testing import ignore_warnings METRICS = { 'f1': f1_score, 'f1-by-sample': partial(f1_score, average='samples'), 'accuracy': accuracy_score, 'hamming': hamming_loss, 'jaccard': jaccard_similarity_score, } FORMATS = { 'sequences': lambda y: [list(np.flatnonzero(s)) for s in y], 'dense': lambda y: y, 'csr': lambda y: sp.csr_matrix(y), 'csc': lambda y: sp.csc_matrix(y), } @ignore_warnings def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())), formats=tuple(v for k, v in sorted(FORMATS.items())), samples=1000, classes=4, density=.2, n_times=5): """Times metric calculations for a number of inputs Parameters ---------- metrics : array-like of callables (1d or 0d) The metric functions to time. formats : array-like of callables (1d or 0d) These may transform a dense indicator matrix into multilabel representation. samples : array-like of ints (1d or 0d) The number of samples to generate as input. classes : array-like of ints (1d or 0d) The number of classes in the input. density : array-like of ints (1d or 0d) The density of positive labels in the input. n_times : int Time calling the metric n_times times. Returns ------- array of floats shaped like (metrics, formats, samples, classes, density) Time in seconds. """ metrics = np.atleast_1d(metrics) samples = np.atleast_1d(samples) classes = np.atleast_1d(classes) density = np.atleast_1d(density) formats = np.atleast_1d(formats) out = np.zeros((len(metrics), len(formats), len(samples), len(classes), len(density)), dtype=float) it = itertools.product(samples, classes, density) for i, (s, c, d) in enumerate(it): _, y_true = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, return_indicator=True, random_state=42) _, y_pred = make_multilabel_classification(n_samples=s, n_features=1, n_classes=c, n_labels=d * c, return_indicator=True, random_state=84) for j, f in enumerate(formats): f_true = f(y_true) f_pred = f(y_pred) for k, metric in enumerate(metrics): t = timeit(partial(metric, f_true, f_pred), number=n_times) out[k, j].flat[i] = t return out def _tabulate(results, metrics, formats): """Prints results by metric and format Uses the last ([-1]) value of other fields """ column_width = max(max(len(k) for k in formats) + 1, 8) first_width = max(len(k) for k in metrics) head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats)) row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats)) print(head_fmt.format('Metric', *formats, cw=column_width, fw=first_width)) for metric, row in zip(metrics, results[:, :, -1, -1, -1]): print(row_fmt.format(metric, *row, cw=column_width, fw=first_width)) def _plot(results, metrics, formats, title, x_ticks, x_label, format_markers=('x', '|', 'o', '+'), metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')): """ Plot the results by metric, format and some other variable given by x_label """ fig = plt.figure('scikit-learn multilabel metrics benchmarks') plt.title(title) ax = fig.add_subplot(111) for i, metric in enumerate(metrics): for j, format in enumerate(formats): ax.plot(x_ticks, results[i, j].flat, label='{}, {}'.format(metric, format), marker=format_markers[j], color=metric_colors[i % len(metric_colors)]) ax.set_xlabel(x_label) ax.set_ylabel('Time (s)') ax.legend() plt.show() if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument('metrics', nargs='*', default=sorted(METRICS), help='Specifies metrics to benchmark, defaults to all. ' 'Choices are: '.format(sorted(METRICS))) ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS), help='Specifies multilabel formats to benchmark ' '(defaults to all).') ap.add_argument('--samples', type=int, default=1000, help='The number of samples to generate') ap.add_argument('--classes', type=int, default=10, help='The number of classes') ap.add_argument('--density', type=float, default=.2, help='The average density of labels per sample') ap.add_argument('--plot', choices=['classes', 'density', 'samples'], default=None, help='Plot time with respect to this parameter varying ' 'up to the specified value') ap.add_argument('--n-steps', default=10, type=int, help='Plot this many points for each metric') ap.add_argument('--n-times', default=5, type=int, help="Time performance over n_times trials") args = ap.parse_args() if args.plot is not None: max_val = getattr(args, args.plot) if args.plot in ('classes', 'samples'): min_val = 2 else: min_val = 0 steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:] if args.plot in ('classes', 'samples'): steps = np.unique(np.round(steps).astype(int)) setattr(args, args.plot, steps) if args.metrics is None: args.metrics = sorted(METRICS) if args.formats is None: args.formats = sorted(FORMATS) results = benchmark([METRICS[k] for k in args.metrics], [FORMATS[k] for k in args.formats], args.samples, args.classes, args.density, args.n_times) _tabulate(results, args.metrics, args.formats) if args.plot is not None: print('Displaying plot', file=sys.stderr) title = ('Multilabel metrics with %s' % ', '.join('{0}={1}'.format(field, getattr(args, field)) for field in ['samples', 'classes', 'density'] if args.plot != field)) _plot(results, args.metrics, args.formats, title, steps, args.plot)
bsd-3-clause
dominicmeroux/Reading-In-and-Analyzing-Calendar-Data-by-Interfacing-Between-MySQL-and-Python
Utilization-Report-MySQL.py
1
18653
from __future__ import print_function from icalendar import * from datetime import date, datetime, timedelta import mysql.connector from mysql.connector import errorcode import pickle import csv import pandas from pandas.io import sql import matplotlib.pyplot as plt import xlsxwriter import numpy as np import os import re import glob import pytz from StringIO import StringIO #from zipfile import ZipFile from urllib import urlopen import calendar_parser as cp # for calendar_parser, I downloaded the Python file created for this package # https://github.com/oblique63/Python-GoogleCalendarParser/blob/master/calendar_parser.py # and saved it in the working directory with my Python file (Jupyter Notebook file). # In calendar_parser.py, their function _fix_timezone is very crucial for my code to # display the correct local time. USER = # enter database username PASS = # enter database password HOST = # enter hostname, e.g. '127.0.0.1' cnx = mysql.connector.connect(user=USER, password=PASS, host=HOST) cursor = cnx.cursor() # Approach / Code modified from MySQL Connector web page DB_NAME = "CalDb" # 1) Creates database if it doesn't already exist # 2) Then connects to the database def create_database(cursor): try: cursor.execute( "CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME)) except mysql.connector.Error as err: print("Failed creating database: {}".format(err)) exit(1) try: cnx.database = DB_NAME except mysql.connector.Error as err: if err.errno == errorcode.ER_BAD_DB_ERROR: create_database(cursor) cnx.database = DB_NAME else: print(err) exit(1) # Create table specifications TABLES = {} TABLES['eBike'] = ( "CREATE TABLE IF NOT EXISTS `eBike` (" " `eBikeName` varchar(10)," " `Organizer` varchar(100)," " `Created` datetime NOT NULL," " `Start` datetime NOT NULL," " `End` datetime NOT NULL" ") ENGINE=InnoDB") # If table does not already exist, this code will create it based on specifications for name, ddl in TABLES.iteritems(): try: print("Creating table {}: ".format(name), end='') cursor.execute(ddl) except mysql.connector.Error as err: if err.errno == errorcode.ER_TABLE_EXISTS_ERROR: print("already exists.") else: print(err.msg) else: print("OK") # Obtain current count from each calendar to read in and add additional entries only cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Gold'") GoldExistingCount = cursor.fetchall() cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Blue'") BlueExistingCount = cursor.fetchall() # Declare lists eBikeName = [] Organizer = [] DTcreated = [] DTstart = [] DTend = [] Counter = 0 Cal1URL = # Google Calendar URL (from Calendar Settings -> Private Address) Cal2URL = # URL of second Google Calendar...can scale this code to as many calendars as desired # at an extremily large number (e.g. entire company level), could modify and use parallel processing (e.g. PySpark) Blue = Cal1URL Gold = Cal2URL URL_list = [Blue, Gold] for i in URL_list: Counter = 0 b = urlopen(i) cal = Calendar.from_ical(b.read()) timezones = cal.walk('VTIMEZONE') if (i == Blue): BlueLen = len(cal.walk()) elif (i == Gold): GoldLen = len(cal.walk()) #print (cal) #print ("Stuff") #print (cal.subcomponents) for k in cal.walk(): if k.name == "VEVENT": Counter += 1 if (i == Blue): if BlueLen - Counter > GoldExistingCount[0][0]: eBikeName.append('Blue') Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) ) DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) ) DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) ) DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) ) #print (k.property_items('ATTENDEE')) elif (i == Gold): if GoldLen - Counter > BlueExistingCount[0][0]: eBikeName.append('Gold') Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) ) DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) ) DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) ) DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) ) b.close() # Now that calendar data is fully read in, create a list with data in a format for # entering into the MySQL database. # # At this point, if the MySQL Connector component is not desired, other approaches # include creating a Pandas dataframe or something else. # For reference, a Pandas dataframe could be created with the following command: # df = pandas.DataFrame({'ORGANIZER' : Organizer,'CREATED' : DTcreated, 'DTSTART' : DTstart,'DTEND': DTend}) eBikeData = [] ##################################################### for i in range(len(DTcreated)): # Add in condition that the organizer email address cannot be 'none' or any other P&T Management email if (Organizer[i] != 'None' and Organizer[i] != 'lauren.bennett@berkeley.edu' and Organizer[i] != 'dmeroux@berkeley.edu' and Organizer[i] != 'berkeley.edu_534da9tjgdsahifulshf42lfbo@group.calendar.google.com'): eBikeData.append((eBikeName[i], Organizer[i], DTcreated[i], DTstart[i], DTend[i])) # Insert calendar data into MySQL table eBike cursor.executemany("INSERT INTO eBike (eBikeName, Organizer, Created, Start, End) VALUES (%s, %s, %s, %s, %s)", eBikeData) cnx.commit() # Find emails associated with reservations created at latest 6 days ago cursor.execute("SELECT DISTINCT Organizer FROM eBike WHERE DATEDIFF(CURDATE(), Start) <= 6 AND DATEDIFF(CURDATE(), Start) >= 0") WeeklyEmail = cursor.fetchall() Email = [] for i in range(len(WeeklyEmail)): Email.append(WeeklyEmail[i][0]) if(Email[i] != 'None'): print(Email[i]) # https://xlsxwriter.readthedocs.org # Workbook Document Name workbook = xlsxwriter.Workbook('E-BikeUpdate' + datetime.strftime(datetime.now(), "%Y-%m-%d") + '.xlsx') # Define 'bold' format bold = workbook.add_format({'bold': True}) format1 = workbook.add_format({'bold': 1, 'bg_color': '#3CDAE5', 'font_color': '#092A51'}) format2 = workbook.add_format({'bold': 1, 'bg_color': '#DA7BD0', 'font_color': '#A50202'}) # Add Intro Sheet worksheet = workbook.add_worksheet('INTRO') worksheet.write('A1', 'Sheet', bold) worksheet.write('A2', 'Ebike_Rides_by_User') worksheet.write('A3', 'Trips_by_Res_Time') worksheet.write('A4', 'Trips_by_Weekday') worksheet.write('A5', 'Utilization') worksheet.write('A6', 'Aggregate_Advance_Reservation') worksheet.write('A7', 'Time_Series_Advance_Reservation') worksheet.write('B1', 'Description', bold) worksheet.write('B2', 'Total E-Bike Rides by User Email') worksheet.write('B3', 'Total E-Bike Rides by Reservation Hour') worksheet.write('B4', 'Total E-Bike Rides by Weekday') worksheet.write('B5', 'Average and Maximum Percent and Hours Utilization') worksheet.write('B6', 'Number of Days E-Bikes Were Reserved in Advance, by Count of Reservations') worksheet.write('B7', 'Number of Days E-Bikes Were Reserved in Advance, by Reservation Start Datetime') ### Total e-Bike Rides by User cursor.execute("SELECT Organizer, COUNT(*) AS Total_Rides FROM eBike GROUP BY Organizer ORDER BY Total_Rides DESC;") TotalRides_by_User = cursor.fetchall() # Worksheet Name worksheet1 = workbook.add_worksheet('Ebike_Rides_by_User') # Column Names worksheet1.write('A1', 'User', bold) worksheet1.write('B1', 'Total Rides', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for UserEmail, UserRideCount in (TotalRides_by_User): worksheet1.write(row, col, UserEmail) worksheet1.write(row, col + 1, UserRideCount) row += 1 # Conditional Formatting: E-bike Users with 20+ Rides worksheet1.conditional_format('B1:B9999', {'type': 'cell', 'criteria': '>=', 'value': 20, 'format': format1}) ### Total Trips by Reservation Time cursor.execute("SELECT EXTRACT(HOUR FROM Start) AS Hour_24, DATE_FORMAT(Start, '%h %p') AS Reservation_Time, COUNT(*) AS Total_Rides FROM eBike GROUP BY Reservation_Time, Hour_24 ORDER BY Hour_24 ASC") Trips_by_Time = cursor.fetchall() # Worksheet Name worksheet2 = workbook.add_worksheet('Trips_by_Res_Time') # Data. # Column Names worksheet2.write('A1', 'Reservation Start Time', bold) worksheet2.write('B1', 'Total Rides', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Hour_24, Reservation_Time, Total_Rides in (Trips_by_Time): worksheet2.write(row, col, Reservation_Time) worksheet2.write(row, col + 1, Total_Rides) row += 1 # Add Chart chart = workbook.add_chart({'type': 'line'}) # Add Data to Chart chart.add_series({ 'categories': '=Trips_by_Res_Time!$A$2:$A$16', 'values': '=Trips_by_Res_Time!$B$2:$B$16', 'fill': {'color': '#791484'}, 'border': {'color': '#52B7CB'} }) # Format Chart chart.set_title({ 'name': 'Total Rides by Reservation Start Time', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB', }, }) chart.set_x_axis({ 'name': 'Reservation Start Time', 'empty_cells': 'gaps', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'name': 'Arial', 'color': '#52B7CB', }, }) chart.set_y_axis({ 'name': 'Total Rides', 'empty_cells': 'gaps', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'italic': True, 'color': '#52B7CB', }, }) # Remove Legend chart.set_legend({'position': 'none'}) # Insert Chart worksheet2.insert_chart('E1', chart) # GO TO END OF DATA ### Total Trips by Weekday cursor.execute("SELECT DAYNAME(Start) AS Weekday, COUNT(*) AS Total_Rides FROM eBike GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')") Trips_by_Weekday = cursor.fetchall() # Worksheet Name worksheet3 = workbook.add_worksheet('Trips_by_Weekday') # Column Names worksheet3.write('A1', 'Weekday', bold) worksheet3.write('B1', 'Total Rides', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Weekday, Total_Rides_by_Weekday in (Trips_by_Weekday): worksheet3.write(row, col, Weekday) worksheet3.write(row, col + 1, Total_Rides_by_Weekday) row += 1 # Add Chart chart = workbook.add_chart({'type': 'line'}) # Add Data to Chart chart.add_series({ 'categories': '=Trips_by_Weekday!$A$2:$A$8)', 'values': '=Trips_by_Weekday!$B$2:$B$8)', 'fill': {'color': '#791484'}, 'border': {'color': '#52B7CB'} }) # Format Chart chart.set_title({ 'name': 'Total Rides by Weekday', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB', }, }) chart.set_x_axis({ 'name': 'Weekday', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'name': 'Arial', 'color': '#52B7CB', }, }) chart.set_y_axis({ 'name': 'Total Rides', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'italic': True, 'color': '#52B7CB', }, }) # Remove Legend chart.set_legend({'position': 'none'}) # Insert Chart worksheet3.insert_chart('E1', chart) ### Average and Maximum Hours and Percent Utilization by Weekday cursor.execute("SELECT DAYNAME(Start) AS Weekday, MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Max_Hours, (MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Max_PCT_Utilization, AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Avg_Hours, (AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Avg_PCT_Utilization FROM eBike WHERE (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 < 95 GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')") Avg_Max_Hours_PCTutilization_by_Weekday = cursor.fetchall() # Worksheet Name worksheet4 = workbook.add_worksheet('Utilization') # Column Names worksheet4.write('A1', 'Weekday', bold) worksheet4.write('B1', 'Maximum Reservation Duration (hrs)', bold) worksheet4.write('C1', 'Maximum Percentage Utilization', bold) worksheet4.write('D1', 'Average Reservation Duration (hrs)', bold) worksheet4.write('E1', 'Average Percent Utilization', bold) worksheet4.write('F1', 'NOTE: A small handfull of outliers above 95% utilization are excluded', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Weekday_AMH, Max_Hours, Max_PCT_Utilization, Avg_Hours, Avg_PCT_Utilization in (Avg_Max_Hours_PCTutilization_by_Weekday): worksheet4.write(row, col, Weekday_AMH) worksheet4.write(row, col + 1, Max_Hours) worksheet4.write(row, col + 2, Max_PCT_Utilization) worksheet4.write(row, col + 3, Avg_Hours) worksheet4.write(row, col + 4, Avg_PCT_Utilization) row += 1 # Conditional Formatting: Percent Utilization Greater Than 50 worksheet4.conditional_format('E2:E8', {'type': 'cell', 'criteria': '>=', 'value': 30, 'format': format1}) ############################################ cursor.execute("SELECT Start, End, DAYNAME(Start) AS Weekday, ((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Hours, (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS PCT_Utilization FROM eBike ORDER BY (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 DESC") Utilization = cursor.fetchall() worksheet4.write('A11', 'Reservation Start', bold) worksheet4.write('B11', 'Reservation End', bold) worksheet4.write('C11', 'Weekday', bold) worksheet4.write('D11', 'Hours Reserved', bold) worksheet4.write('E11', 'Percent Utilization', bold) row += 3 col = 0 count = 12 for Start, End, Day, Hour, PCT_Utilization in (Utilization): worksheet4.write(row, col, Start) ########################## https://xlsxwriter.readthedocs.io/working_with_dates_and_time.html worksheet4.write(row, col + 1, End) ##### worksheet4.write(row, col + 2, Day) ##### worksheet4.write(row, col + 3, Hour) worksheet4.write(row, col + 4, PCT_Utilization) row += 1 if (PCT_Utilization > 95.0): count += 1 # Add Chart chart = workbook.add_chart({'type': 'column'}) # Add Data to Chart chart.add_series({ 'values': '=Utilization!$E$'+str(count)+':$E$'+str(len(Utilization)), 'fill': {'color': '#52B7CB'}, 'border': {'color': '#52B7CB'} }) count = 0 # Format Chart chart.set_title({ 'name': 'Percent Utilization', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB', }, }) chart.set_x_axis({ 'name': 'Reservation', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'name': 'Arial', 'color': '#52B7CB', }, }) chart.set_y_axis({ 'name': 'Percent Utilization', 'name_font': { 'name': 'Calibri', 'color': '#52B7CB' }, 'num_font': { 'italic': True, 'color': '#52B7CB', }, }) # Remove Legend chart.set_legend({'position': 'none'}) # Insert Chart worksheet4.insert_chart('G4', chart) #### ### How far in advance reservations are created # How far in advance reservations are created cursor.execute("SELECT DATEDIFF(Start, Created) AS Days_Advance_Reservation, COUNT(*) AS Number_Reserved_Trips FROM eBike WHERE DATEDIFF(Start, Created) >= 0 GROUP BY Days_Advance_Reservation ORDER BY Days_Advance_Reservation DESC") Advance_Reservation = cursor.fetchall() # Worksheet Name worksheet5 = workbook.add_worksheet('Aggregate_Advance_Reservation') # Column Names worksheet5.write('A1', 'Days E-Bike was Reserved Ahead of Time', bold) worksheet5.write('B1', 'Total Reservations', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for Days_Advance_Reservation, Number_Reserved_Trips in (Advance_Reservation): worksheet5.write(row, col, Days_Advance_Reservation) worksheet5.write(row, col + 1, Number_Reserved_Trips) row += 1 worksheet5.conditional_format('B2:B9999', {'type': 'cell', 'criteria': '>=', 'value': 5, 'format': format2}) # Time series of how far in advance reservations are created cursor.execute("SELECT Start, DATEDIFF(Start, Created) AS Days_Advance_Reservation FROM eBike WHERE DATEDIFF(Start, Created) > 0 ORDER BY Start ASC") Time_Series_Advance_Reservation = cursor.fetchall() Starts = [] for i in range(0, len(Time_Series_Advance_Reservation)): Starts.append(str(Time_Series_Advance_Reservation[i][0])) # Worksheet Name worksheet6 = workbook.add_worksheet('Time_Series_Advance_Reservation') # Column Names worksheet6.write('A1', 'Reservation Start Date', bold) worksheet6.write('B1', 'Days E-Bike was Reserved Ahead of Time', bold) # Declare Starting Point for row, col row = 1 col = 0 # Iterate over the data and write it out row by row for StartVal in Starts: worksheet6.write(row, col, StartVal) row += 1 row = 1 for Start, Days_Advance_Reservation in (Time_Series_Advance_Reservation): worksheet6.write(row, col + 1, Days_Advance_Reservation) row += 1 # Add Chart chart = workbook.add_chart({'type': 'line'}) worksheet6.conditional_format('B2:B9999', {'type': 'cell', 'criteria': '>=', 'value': 5, 'format': format2}) workbook.close() cursor.close() cnx.close()
mit
gviejo/ThalamusPhysio
python/main_pop_pca.py
1
15802
import numpy as np import pandas as pd # from matplotlib.pyplot import plot,show,draw import scipy.io from functions import * import _pickle as cPickle import time import os, sys import ipyparallel import neuroseries as nts data_directory = '/mnt/DataGuillaume/MergedData/' datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#') # to know which neurons to keep theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True) theta = pd.DataFrame( index = theta_ses['rem'], columns = ['phase', 'pvalue', 'kappa'], data = theta_mod['rem']) tmp2 = theta.index[theta.isnull().any(1)].values tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values tmp = np.unique(np.concatenate([tmp2,tmp3])) theta_modth = theta.drop(tmp, axis = 0) neurons_index = theta_modth.index.values bins1 = np.arange(-1005, 1010, 25)*1000 times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int') premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD bins2 = np.arange(-1012.5,1025,25)*1000 tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)} clients = ipyparallel.Client() print(clients.ids) dview = clients.direct_view() def compute_pop_pca(session): data_directory = '/mnt/DataGuillaume/MergedData/' import numpy as np import scipy.io import scipy.stats import _pickle as cPickle import time import os, sys import neuroseries as nts from functions import loadShankStructure, loadSpikeData, loadEpoch, loadThetaMod, loadSpeed, loadXML, loadRipples, loadLFP, downsample, getPeaksandTroughs, butter_bandpass_filter import pandas as pd # to know which neurons to keep data_directory = '/mnt/DataGuillaume/MergedData/' datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#') theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True) theta = pd.DataFrame( index = theta_ses['rem'], columns = ['phase', 'pvalue', 'kappa'], data = theta_mod['rem']) tmp2 = theta.index[theta.isnull().any(1)].values tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values tmp = np.unique(np.concatenate([tmp2,tmp3])) theta_modth = theta.drop(tmp, axis = 0) neurons_index = theta_modth.index.values bins1 = np.arange(-1005, 1010, 25)*1000 times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int') premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)} posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)} bins2 = np.arange(-1012.5,1025,25)*1000 tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)} # for session in datasets: # for session in datasets[0:15]: # for session in ['Mouse12/Mouse12-120815']: start_time = time.clock() print(session) generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat') shankStructure = loadShankStructure(generalinfo) if len(generalinfo['channelStructure'][0][0][1][0]) == 2: hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1 else: hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1 spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus']) wake_ep = loadEpoch(data_directory+session, 'wake') sleep_ep = loadEpoch(data_directory+session, 'sleep') sws_ep = loadEpoch(data_directory+session, 'sws') rem_ep = loadEpoch(data_directory+session, 'rem') sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3) sws_ep = sleep_ep.intersect(sws_ep) rem_ep = sleep_ep.intersect(rem_ep) speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep) speed_ep = nts.IntervalSet(speed[speed>2.5].index.values[0:-1], speed[speed>2.5].index.values[1:]).drop_long_intervals(26000).merge_close_intervals(50000) wake_ep = wake_ep.intersect(speed_ep).drop_short_intervals(3000000) n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml') rip_ep,rip_tsd = loadRipples(data_directory+session) hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1] hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()]) all_neurons = np.array(list(spikes.keys())) mod_neurons = np.array([int(n.split("_")[1]) for n in neurons_index if session.split("/")[1] in n]) if len(sleep_ep) > 1: store = pd.HDFStore("/mnt/DataGuillaume/population_activity_25ms/"+session.split("/")[1]+".h5") # all_pop = store['allwake'] pre_pop = store['presleep'] pos_pop = store['postsleep'] store.close() store = pd.HDFStore("/mnt/DataGuillaume/population_activity_100ms/"+session.split("/")[1]+".h5") all_pop = store['allwake'] # pre_pop = store['presleep'] # pos_pop = store['postsleep'] store.close() def compute_eigen(popwak): popwak = popwak - popwak.mean(0) popwak = popwak / (popwak.std(0)+1e-8) from sklearn.decomposition import PCA pca = PCA(n_components = popwak.shape[1]) xy = pca.fit_transform(popwak.values) pc = pca.explained_variance_ > (1 + np.sqrt(1/(popwak.shape[0]/popwak.shape[1])))**2.0 eigen = pca.components_[pc] lambdaa = pca.explained_variance_[pc] return eigen, lambdaa def compute_score(ep_pop, eigen, lambdaa, thr): ep_pop = ep_pop - ep_pop.mean(0) ep_pop = ep_pop / (ep_pop.std(0)+1e-8) a = ep_pop.values score = np.zeros(len(ep_pop)) for i in range(len(eigen)): if lambdaa[i] >= thr: score += (np.dot(a, eigen[i])**2.0 - np.dot(a**2.0, eigen[i]**2.0)) score = nts.Tsd(t = ep_pop.index.values, d = score) return score def compute_rip_score(tsd, score, bins): times = np.floor(((bins[0:-1] + (bins[1] - bins[0])/2)/1000)).astype('int') rip_score = pd.DataFrame(index = times, columns = []) for r,i in zip(tsd.index.values,range(len(tsd))): xbins = (bins + r).astype('int') y = score.groupby(pd.cut(score.index.values, bins=xbins, labels = times)).mean() if ~y.isnull().any(): rip_score[r] = y return rip_score def get_xmin(ep, minutes): duree = (ep['end'] - ep['start'])/1000/1000/60 tmp = ep.iloc[np.where(np.ceil(duree.cumsum()) <= minutes + 1)[0]] return nts.IntervalSet(tmp['start'], tmp['end']) pre_ep = nts.IntervalSet(sleep_ep['start'][0], sleep_ep['end'][0]) post_ep = nts.IntervalSet(sleep_ep['start'][1], sleep_ep['end'][1]) pre_sws_ep = sws_ep.intersect(pre_ep) pos_sws_ep = sws_ep.intersect(post_ep) pre_sws_ep = get_xmin(pre_sws_ep.iloc[::-1], 30) pos_sws_ep = get_xmin(pos_sws_ep, 30) if pre_sws_ep.tot_length('s')/60 > 5.0 and pos_sws_ep.tot_length('s')/60 > 5.0: for hd in range(3): if hd == 0 or hd == 2: index = np.where(hd_info_neuron == 0)[0] elif hd == 1: index = np.where(hd_info_neuron == 1)[0] if hd == 0: index = np.intersect1d(index, mod_neurons) elif hd == 2: index = np.intersect1d(index, np.setdiff1d(all_neurons, mod_neurons)) allpop = all_pop[index].copy() prepop = nts.TsdFrame(pre_pop[index].copy()) pospop = nts.TsdFrame(pos_pop[index].copy()) # prepop25ms = nts.TsdFrame(pre_pop_25ms[index].copy()) # pospop25ms = nts.TsdFrame(pos_pop_25ms[index].copy()) if allpop.shape[1] and allpop.shape[1] > 5: eigen,lambdaa = compute_eigen(allpop) seuil = 1.2 if np.sum(lambdaa > seuil): pre_score = compute_score(prepop, eigen, lambdaa, seuil) pos_score = compute_score(pospop, eigen, lambdaa, seuil) prerip_score = compute_rip_score(rip_tsd.restrict(pre_sws_ep), pre_score, bins1) posrip_score = compute_rip_score(rip_tsd.restrict(pos_sws_ep), pos_score, bins1) # pre_score_25ms = compute_score(prepop25ms, eigen) # pos_score_25ms = compute_score(pospop25ms, eigen) # prerip25ms_score = compute_rip_score(rip_tsd.restrict(pre_ep), pre_score_25ms, bins2) # posrip25ms_score = compute_rip_score(rip_tsd.restrict(post_ep), pos_score_25ms, bins2) # prerip25ms_score = prerip25ms_score - prerip25ms_score.mean(0) # posrip25ms_score = posrip25ms_score - posrip25ms_score.mean(0) # prerip25ms_score = prerip25ms_score / prerip25ms_score.std(0) # posrip25ms_score = posrip25ms_score / posrip25ms_score.std(0) # prerip25ms_score = prerip25ms_score.loc[-500:500] # posrip25ms_score = posrip25ms_score.loc[-500:500] # sys.exit() # tmp = pd.concat([pd.DataFrame(prerip25ms_score.idxmax().values, columns = ['pre']),pd.DataFrame(posrip25ms_score.idxmax().values, columns = ['pos'])],axis = 1) # tmp = pd.DataFrame(data = [[prerip25ms_score.mean(1).idxmax(), posrip25ms_score.mean(1).idxmax()]], columns = ['pre', 'pos']) # tsmax[hd] = tsmax[hd].append(tmp, ignore_index = True) premeanscore[hd]['rip'][session] = prerip_score.mean(1) posmeanscore[hd]['rip'][session] = posrip_score.mean(1) # if len(rem_ep.intersect(pre_ep)) and len(rem_ep.intersect(post_ep)): # premeanscore[hd]['rem'].loc[session,'mean'] = pre_score.restrict(rem_ep.intersect(pre_ep)).mean() # posmeanscore[hd]['rem'].loc[session,'mean'] = pos_score.restrict(rem_ep.intersect(post_ep)).mean() # premeanscore[hd]['rem'].loc[session,'std'] = pre_score.restrict(rem_ep.intersect(pre_ep)).std() # posmeanscore[hd]['rem'].loc[session,'std'] = pos_score.restrict(rem_ep.intersect(post_ep)).std() return [premeanscore, posmeanscore, tsmax] # sys.exit() a = dview.map_sync(compute_pop_pca, datasets) prescore = {i:pd.DataFrame(index = times) for i in range(3)} posscore = {i:pd.DataFrame(index = times) for i in range(3)} for i in range(len(a)): for j in range(3): if len(a[i][0][j]['rip'].columns): s = a[i][0][j]['rip'].columns[0] prescore[j][s] = a[i][0][j]['rip'] posscore[j][s] = a[i][1][j]['rip'] # prescore = premeanscore # posscore = posmeanscore from pylab import * titles = ['non hd mod', 'hd', 'non hd non mod'] figure() for i in range(3): subplot(1,3,i+1) times = prescore[i].index.values # for s in premeanscore[i]['rip'].index.values: # plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue') # plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red') plot(times, gaussFilt(prescore[i].mean(1).values, (1,)), label = 'pre', color = 'blue', linewidth = 2) plot(times, gaussFilt(posscore[i].mean(1).values, (1,)), label = 'post', color = 'red', linewidth = 2) legend() title(titles[i]) show() sys.exit() ######################################### # search for peak in 25 ms array ######################################## tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(2)} for i in range(len(a)): for hd in range(2): tsmax[hd] = tsmax[hd].append(a[i][2][hd], ignore_index = True) from pylab import * plot(tsmax[0]['pos'], np.ones(len(tsmax[0]['pos'])), 'o') plot(tsmax[0]['pos'].mean(), [1], '|', markersize = 10) plot(tsmax[1]['pos'], np.zeros(len(tsmax[1]['pos'])), 'o') plot(tsmax[1]['pos'].mean(), [0], '|', markersize = 10) sys.exit() ######################################### # SAVING ######################################## store = pd.HDFStore("../figures/figures_articles/figure3/pca_analysis_3.h5") for i,j in zip(range(3),('nohd_mod', 'hd', 'nohd_nomod')): store.put(j+'pre_rip', prescore[i]) store.put(j+'pos_rip', posscore[i]) store.close() # a = dview.map_sync(compute_population_correlation, datasets[0:15]) # for i in range(len(a)): # if type(a[i]) is dict: # s = list(a[i].keys())[0] # premeanscore.loc[s] = a[i][s]['pre'] # posmeanscore.loc[s] = a[i][s]['pos'] from pylab import * titles = ['non hd', 'hd'] figure() for i in range(2): subplot(1,3,i+1) times = premeanscore[i]['rip'].columns.values # for s in premeanscore[i]['rip'].index.values: # plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue') # plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red') plot(times, gaussFilt(premeanscore[i]['rip'].mean(0).values, (1,)), label = 'pre', color = 'blue', linewidth = 2) plot(times, gaussFilt(posmeanscore[i]['rip'].mean(0).values, (1,)),label = 'post', color = 'red', linewidth = 2) legend() title(titles[i]) subplot(1,3,3) bar([1,2], [premeanscore[0]['rem'].mean(0)['mean'], premeanscore[1]['rem'].mean(0)['mean']]) bar([3,4], [posmeanscore[0]['rem'].mean(0)['mean'], posmeanscore[1]['rem'].mean(0)['mean']]) xticks([1,2], ['non hd', 'hd']) xticks([3,4], ['non hd', 'hd']) show() figure() subplot(121) times = premeanscore[0]['rip'].columns.values for s in premeanscore[0]['rip'].index.values: print(s) plot(times, premeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'blue') plot(premeanscore[0]['rip'].mean(0)) subplot(122) for s in posmeanscore[0]['rip'].index.values: plot(times, posmeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'red') plot(posmeanscore[0]['rip'].mean(0)) show()
gpl-3.0
AstroFloyd/LearningPython
Fitting/scipy.optimize.least_squares.py
1
2724
#!/bin/env python3 # https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html """Solve a curve fitting problem using robust loss function to take care of outliers in the data. Define the model function as y = a + b * exp(c * t), where t is a predictor variable, y is an observation and a, b, c are parameters to estimate. """ import numpy as np from scipy.optimize import least_squares # Function which generates the data with noise and outliers: def gen_data(x, a, b, c, noise=0, n_outliers=0, random_state=0): y = a + b*x + c*x**2 rnd = np.random.RandomState(random_state) error = noise * rnd.randn(x.size) outliers = rnd.randint(0, x.size, n_outliers) error[outliers] *= 10 return y + error # Function for computing residuals: def resFun(c, x, y): return c[0] + c[1] * x + c[2] * x**2 - y trueCoefs = [-5, 1, 3] sigma = 1.5 print("True coefficients: ", trueCoefs) print("Sigma: ", sigma) f = np.poly1d(trueCoefs) xDat = np.linspace(0, 2, 20) errors = sigma*np.random.normal(size=len(xDat)) yDat = f(xDat) + errors # Initial estimate of parameters: # x0 = np.array([1.0, 1.0, 0.0]) x0 = np.array([-4.0, 2.0, 5.0]) # Compute a standard least-squares solution: res = least_squares(resFun, x0, args=(xDat, yDat)) #print('res: ', res) print('Success: ', res.success) print('Cost: ', res.cost) print('Optimality: ', res.optimality) print('Coefficients: ', res.x) print('Grad: ', res.grad) print('Residuals: ', res.fun) Chi2 = sum(res.fun**2) redChi2 = Chi2/(len(xDat)-len(res.x)) # Reduced Chi^2 = Chi^2 / (n-m) print("Chi2: ", Chi2, res.cost*2) print("Red. Chi2: ", redChi2) # Plot all the curves. We see that by selecting an appropriate loss we can get estimates close to # optimal even in the presence of strong outliers. But keep in mind that generally it is recommended to try # 'soft_l1' or 'huber' losses first (if at all necessary) as the other two options may cause difficulties in # optimization process. y_true = gen_data(xDat, trueCoefs[2], trueCoefs[1], trueCoefs[0]) y_lsq = gen_data(xDat, *res.x) print() #exit() import matplotlib.pyplot as plt #plt.style.use('dark_background') # Invert colours #plt.plot(xDat, yDat, 'o') plt.errorbar(xDat, yDat, yerr=errors, fmt='ro') # Plot red circles with actual error bars plt.plot(xDat, y_true, 'k', linewidth=2, label='true') plt.plot(xDat, y_lsq, label='linear loss') plt.xlabel("t") plt.ylabel("y") plt.legend() plt.tight_layout() # plt.show() plt.savefig('scipy.optimize.least_squares.png') # Save the plot as png plt.close() # Close the plot in order to start a new one later
gpl-3.0
scienceopen/spectral_analysis
scripts/FilterDesign.py
1
3846
#!/usr/bin/env python """ Design FIR filter coefficients using Parks-McClellan or windowing algorithm and plot filter transfer function. Michael Hirsch, Ph.D. example for PiRadar CW prototype, writing filter coefficients for use by filters.f90: ./FilterDesign.py 9950 10050 100e3 -L 4096 -m firwin -o cwfir.asc Refs: http://www.iowahills.com/5FIRFiltersPage.html """ import numpy as np from pathlib import Path import scipy.signal as signal from matplotlib.pyplot import show, figure from argparse import ArgumentParser from signal_subspace.plots import plotfilt try: import seaborn as sns sns.set_context("talk") except ImportError: pass def computefir(fc, L: int, ofn, fs: int, method: str): """ bandpass FIR design https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firwin.html http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.remez.html L: number of taps output: b: FIR filter coefficients """ assert len(fc) == 2, "specify lower and upper bandpass filter corner frequencies in Hz." if method == "remez": b = signal.remez(numtaps=L, bands=[0, 0.9 * fc[0], fc[0], fc[1], 1.1 * fc[1], 0.5 * fs], desired=[0, 1, 0], Hz=fs) elif method == "firwin": b = signal.firwin(L, [fc[0], fc[1]], window="blackman", pass_zero=False, nyq=fs // 2) elif method == "firwin2": b = signal.firwin2( L, [0, fc[0], fc[1], fs // 2], [0, 1, 1, 0], window="blackman", nyq=fs // 2, # antisymmetric=True, ) else: raise ValueError(f"unknown filter design method {method}") if ofn: ofn = Path(ofn).expanduser() print(f"writing {ofn}") # FIXME make binary with ofn.open("w") as h: h.write(f"{b.size}\n") # first line is number of coefficients b.tofile(h, sep=" ") # second line is space-delimited coefficents return b def butterplot(fs, fc): """ https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html """ b, a = signal.butter(4, 100, "low", analog=True) w, h = signal.freqs(b, a) ax = figure().gca() ax.semilogx(fs * 0.5 / np.pi * w, 20 * np.log10(abs(h))) ax.set_title("Butterworth filter frequency response") ax.set_xlabel("Frequency [Hz]") ax.set_ylabel("Amplitude [dB]") ax.grid(which="both", axis="both") ax.axvline(fc, color="green") # cutoff frequency ax.set_ylim(-50, 0) def chebyshevplot(fs): """ https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheby1.html#scipy.signal.cheby1 """ b, a = signal.cheby1(4, 5, 100, "high", analog=True) w, h = signal.freqs(b, a) ax = figure().gca() ax.semilogx(w, 20 * np.log10(abs(h))) ax.set_title("Chebyshev Type I frequency response (rp=5)") ax.set_xlabel("Frequency [radians / second]") ax.set_ylabel("Amplitude [dB]") ax.grid(which="both", axis="both") ax.axvline(100, color="green") # cutoff frequency ax.axhline(-5, color="green") # rp def main(): p = ArgumentParser() p.add_argument("fc", help="lower,upper bandpass filter corner frequences [Hz]", nargs=2, type=float) p.add_argument("fs", help="optional sampling frequency [Hz]", type=float) p.add_argument("-o", "--ofn", help="output coefficient file to write") p.add_argument("-L", help="number of coefficients for FIR filter", type=int, default=63) p.add_argument("-m", "--method", help="filter design method [remez,firwin,firwin2]", default="firwin") p.add_argument("-k", "--filttype", help="filter type: low, high, bandpass", default="low") p = p.parse_args() b = computefir(p.fc, p.L, p.ofn, p.fs, p.method) plotfilt(b, p.fs, p.ofn) show() if __name__ == "__main__": main()
mit
biocore/qiita
qiita_db/meta_util.py
2
20723
r""" Util functions (:mod: `qiita_db.meta_util`) =========================================== ..currentmodule:: qiita_db.meta_util This module provides utility functions that use the ORM objects. ORM objects CANNOT import from this file. Methods ------- ..autosummary:: :toctree: generated/ get_lat_longs """ # ----------------------------------------------------------------------------- # Copyright (c) 2014--, The Qiita Development Team. # # Distributed under the terms of the BSD 3-clause License. # # The full license is in the file LICENSE, distributed with this software. # ----------------------------------------------------------------------------- from os import stat, rename from os.path import join, relpath, basename from time import strftime, localtime import matplotlib.pyplot as plt import matplotlib as mpl from base64 import b64encode from urllib.parse import quote from io import BytesIO from datetime import datetime from collections import defaultdict, Counter from tarfile import open as topen, TarInfo from hashlib import md5 from re import sub from json import loads, dump, dumps from qiita_db.util import create_nested_path from qiita_core.qiita_settings import qiita_config, r_client from qiita_core.configuration_manager import ConfigurationManager import qiita_db as qdb def _get_data_fpids(constructor, object_id): """Small function for getting filepath IDS associated with data object Parameters ---------- constructor : a subclass of BaseData E.g., RawData, PreprocessedData, or ProcessedData object_id : int The ID of the data object Returns ------- set of int """ with qdb.sql_connection.TRN: obj = constructor(object_id) return {fpid for fpid, _, _ in obj.get_filepaths()} def validate_filepath_access_by_user(user, filepath_id): """Validates if the user has access to the filepath_id Parameters ---------- user : User object The user we are interested in filepath_id : int The filepath id Returns ------- bool If the user has access or not to the filepath_id Notes ----- Admins have access to all files so True is always returned """ TRN = qdb.sql_connection.TRN with TRN: if user.level == "admin": # admins have access all files return True sql = """SELECT (SELECT array_agg(artifact_id) FROM qiita.artifact_filepath WHERE filepath_id = {0}) AS artifact, (SELECT array_agg(study_id) FROM qiita.sample_template_filepath WHERE filepath_id = {0}) AS sample_info, (SELECT array_agg(prep_template_id) FROM qiita.prep_template_filepath WHERE filepath_id = {0}) AS prep_info, (SELECT array_agg(analysis_id) FROM qiita.analysis_filepath WHERE filepath_id = {0}) AS analysis""".format(filepath_id) TRN.add(sql) arid, sid, pid, anid = TRN.execute_fetchflatten() # artifacts if arid: # [0] cause we should only have 1 artifact = qdb.artifact.Artifact(arid[0]) if artifact.visibility == 'public': # TODO: https://github.com/biocore/qiita/issues/1724 if artifact.artifact_type in ['SFF', 'FASTQ', 'FASTA', 'FASTA_Sanger', 'per_sample_FASTQ']: study = artifact.study has_access = study.has_access(user, no_public=True) if (not study.public_raw_download and not has_access): return False return True else: study = artifact.study if study: # let's take the visibility via the Study return artifact.study.has_access(user) else: analysis = artifact.analysis return analysis in ( user.private_analyses | user.shared_analyses) # sample info files elif sid: # the visibility of the sample info file is given by the # study visibility # [0] cause we should only have 1 return qdb.study.Study(sid[0]).has_access(user) # prep info files elif pid: # the prep access is given by it's artifacts, if the user has # access to any artifact, it should have access to the prep # [0] cause we should only have 1 pt = qdb.metadata_template.prep_template.PrepTemplate( pid[0]) a = pt.artifact # however, the prep info file could not have any artifacts attached # , in that case we will use the study access level if a is None: return qdb.study.Study(pt.study_id).has_access(user) else: if (a.visibility == 'public' or a.study.has_access(user)): return True else: for c in a.descendants.nodes(): if ((c.visibility == 'public' or c.study.has_access(user))): return True return False # analyses elif anid: # [0] cause we should only have 1 aid = anid[0] analysis = qdb.analysis.Analysis(aid) return analysis in ( user.private_analyses | user.shared_analyses) return False def update_redis_stats(): """Generate the system stats and save them in redis Returns ------- list of str artifact filepaths that are not present in the file system """ STUDY = qdb.study.Study number_studies = {'public': 0, 'private': 0, 'sandbox': 0} number_of_samples = {'public': 0, 'private': 0, 'sandbox': 0} num_studies_ebi = 0 num_samples_ebi = 0 number_samples_ebi_prep = 0 stats = [] missing_files = [] per_data_type_stats = Counter() for study in STUDY.iter(): st = study.sample_template if st is None: continue # counting samples submitted to EBI-ENA len_samples_ebi = sum([esa is not None for esa in st.ebi_sample_accessions.values()]) if len_samples_ebi != 0: num_studies_ebi += 1 num_samples_ebi += len_samples_ebi samples_status = defaultdict(set) for pt in study.prep_templates(): pt_samples = list(pt.keys()) pt_status = pt.status if pt_status == 'public': per_data_type_stats[pt.data_type()] += len(pt_samples) samples_status[pt_status].update(pt_samples) # counting experiments (samples in preps) submitted to EBI-ENA number_samples_ebi_prep += sum([ esa is not None for esa in pt.ebi_experiment_accessions.values()]) # counting studies if 'public' in samples_status: number_studies['public'] += 1 elif 'private' in samples_status: number_studies['private'] += 1 else: # note that this is a catch all for other status; at time of # writing there is status: awaiting_approval number_studies['sandbox'] += 1 # counting samples; note that some of these lines could be merged with # the block above but I decided to split it in 2 for clarity if 'public' in samples_status: number_of_samples['public'] += len(samples_status['public']) if 'private' in samples_status: number_of_samples['private'] += len(samples_status['private']) if 'sandbox' in samples_status: number_of_samples['sandbox'] += len(samples_status['sandbox']) # processing filepaths for artifact in study.artifacts(): for adata in artifact.filepaths: try: s = stat(adata['fp']) except OSError: missing_files.append(adata['fp']) else: stats.append( (adata['fp_type'], s.st_size, strftime('%Y-%m', localtime(s.st_mtime)))) num_users = qdb.util.get_count('qiita.qiita_user') num_processing_jobs = qdb.util.get_count('qiita.processing_job') lat_longs = dumps(get_lat_longs()) summary = {} all_dates = [] # these are some filetypes that are too small to plot alone so we'll merge # in other group_other = {'html_summary', 'tgz', 'directory', 'raw_fasta', 'log', 'biom', 'raw_sff', 'raw_qual', 'qza', 'html_summary_dir', 'qza', 'plain_text', 'raw_barcodes'} for ft, size, ym in stats: if ft in group_other: ft = 'other' if ft not in summary: summary[ft] = {} if ym not in summary[ft]: summary[ft][ym] = 0 all_dates.append(ym) summary[ft][ym] += size all_dates = sorted(set(all_dates)) # sorting summaries ordered_summary = {} for dt in summary: new_list = [] current_value = 0 for ad in all_dates: if ad in summary[dt]: current_value += summary[dt][ad] new_list.append(current_value) ordered_summary[dt] = new_list plot_order = sorted([(k, ordered_summary[k][-1]) for k in ordered_summary], key=lambda x: x[1]) # helper function to generate y axis, modified from: # http://stackoverflow.com/a/1094933 def sizeof_fmt(value, position): number = None for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(value) < 1024.0: number = "%3.1f%s" % (value, unit) break value /= 1024.0 if number is None: number = "%.1f%s" % (value, 'Yi') return number all_dates_axis = range(len(all_dates)) plt.locator_params(axis='y', nbins=10) plt.figure(figsize=(20, 10)) for k, v in plot_order: plt.plot(all_dates_axis, ordered_summary[k], linewidth=2, label=k) plt.xticks(all_dates_axis, all_dates) plt.legend() plt.grid() ax = plt.gca() ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(sizeof_fmt)) plt.xticks(rotation=90) plt.xlabel('Date') plt.ylabel('Storage space per data type') plot = BytesIO() plt.savefig(plot, format='png') plot.seek(0) img = 'data:image/png;base64,' + quote(b64encode(plot.getbuffer())) time = datetime.now().strftime('%m-%d-%y %H:%M:%S') portal = qiita_config.portal # making sure per_data_type_stats has some data so hmset doesn't fail if per_data_type_stats == {}: per_data_type_stats['No data'] = 0 vals = [ ('number_studies', number_studies, r_client.hmset), ('number_of_samples', number_of_samples, r_client.hmset), ('per_data_type_stats', dict(per_data_type_stats), r_client.hmset), ('num_users', num_users, r_client.set), ('lat_longs', (lat_longs), r_client.set), ('num_studies_ebi', num_studies_ebi, r_client.set), ('num_samples_ebi', num_samples_ebi, r_client.set), ('number_samples_ebi_prep', number_samples_ebi_prep, r_client.set), ('img', img, r_client.set), ('time', time, r_client.set), ('num_processing_jobs', num_processing_jobs, r_client.set)] for k, v, f in vals: redis_key = '%s:stats:%s' % (portal, k) # important to "flush" variables to avoid errors r_client.delete(redis_key) f(redis_key, v) # preparing vals to insert into DB vals = dumps(dict([x[:-1] for x in vals])) sql = """INSERT INTO qiita.stats_daily (stats, stats_timestamp) VALUES (%s, NOW())""" qdb.sql_connection.perform_as_transaction(sql, [vals]) return missing_files def get_lat_longs(): """Retrieve the latitude and longitude of all the public samples in the DB Returns ------- list of [float, float] The latitude and longitude for each sample in the database """ with qdb.sql_connection.TRN: # getting all the public studies studies = qdb.study.Study.get_by_status('public') results = [] if studies: # we are going to create multiple union selects to retrieve the # latigute and longitude of all available studies. Note that # UNION in PostgreSQL automatically removes duplicates sql_query = """ SELECT {0}, CAST(sample_values->>'latitude' AS FLOAT), CAST(sample_values->>'longitude' AS FLOAT) FROM qiita.sample_{0} WHERE sample_values->>'latitude' != 'NaN' AND sample_values->>'longitude' != 'NaN' AND isnumeric(sample_values->>'latitude') AND isnumeric(sample_values->>'longitude')""" sql = [sql_query.format(s.id) for s in studies] sql = ' UNION '.join(sql) qdb.sql_connection.TRN.add(sql) # note that we are returning set to remove duplicates results = qdb.sql_connection.TRN.execute_fetchindex() return results def generate_biom_and_metadata_release(study_status='public'): """Generate a list of biom/meatadata filepaths and a tgz of those files Parameters ---------- study_status : str, optional The study status to search for. Note that this should always be set to 'public' but having this exposed helps with testing. The other options are 'private' and 'sandbox' """ studies = qdb.study.Study.get_by_status(study_status) qiita_config = ConfigurationManager() working_dir = qiita_config.working_dir portal = qiita_config.portal bdir = qdb.util.get_db_files_base_dir() time = datetime.now().strftime('%m-%d-%y %H:%M:%S') data = [] for s in studies: # [0] latest is first, [1] only getting the filepath sample_fp = relpath(s.sample_template.get_filepaths()[0][1], bdir) for a in s.artifacts(artifact_type='BIOM'): if a.processing_parameters is None or a.visibility != study_status: continue merging_schemes, parent_softwares = a.merging_scheme software = a.processing_parameters.command.software software = '%s v%s' % (software.name, software.version) for x in a.filepaths: if x['fp_type'] != 'biom' or 'only-16s' in x['fp']: continue fp = relpath(x['fp'], bdir) for pt in a.prep_templates: categories = pt.categories() platform = '' target_gene = '' if 'platform' in categories: platform = ', '.join( set(pt.get_category('platform').values())) if 'target_gene' in categories: target_gene = ', '.join( set(pt.get_category('target_gene').values())) for _, prep_fp in pt.get_filepaths(): if 'qiime' not in prep_fp: break prep_fp = relpath(prep_fp, bdir) # format: (biom_fp, sample_fp, prep_fp, qiita_artifact_id, # platform, target gene, merging schemes, # artifact software/version, # parent sofware/version) data.append((fp, sample_fp, prep_fp, a.id, platform, target_gene, merging_schemes, software, parent_softwares)) # writing text and tgz file ts = datetime.now().strftime('%m%d%y-%H%M%S') tgz_dir = join(working_dir, 'releases') create_nested_path(tgz_dir) tgz_name = join(tgz_dir, '%s-%s-building.tgz' % (portal, study_status)) tgz_name_final = join(tgz_dir, '%s-%s.tgz' % (portal, study_status)) txt_lines = [ "biom fp\tsample fp\tprep fp\tqiita artifact id\tplatform\t" "target gene\tmerging scheme\tartifact software\tparent software"] with topen(tgz_name, "w|gz") as tgz: for biom_fp, sample_fp, prep_fp, aid, pform, tg, ms, asv, psv in data: txt_lines.append("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % ( biom_fp, sample_fp, prep_fp, aid, pform, tg, ms, asv, psv)) tgz.add(join(bdir, biom_fp), arcname=biom_fp, recursive=False) tgz.add(join(bdir, sample_fp), arcname=sample_fp, recursive=False) tgz.add(join(bdir, prep_fp), arcname=prep_fp, recursive=False) info = TarInfo(name='%s-%s-%s.txt' % (portal, study_status, ts)) txt_hd = BytesIO() txt_hd.write(bytes('\n'.join(txt_lines), 'ascii')) txt_hd.seek(0) info.size = len(txt_hd.read()) txt_hd.seek(0) tgz.addfile(tarinfo=info, fileobj=txt_hd) with open(tgz_name, "rb") as f: md5sum = md5() for c in iter(lambda: f.read(4096), b""): md5sum.update(c) rename(tgz_name, tgz_name_final) vals = [ ('filepath', tgz_name_final[len(working_dir):], r_client.set), ('md5sum', md5sum.hexdigest(), r_client.set), ('time', time, r_client.set)] for k, v, f in vals: redis_key = '%s:release:%s:%s' % (portal, study_status, k) # important to "flush" variables to avoid errors r_client.delete(redis_key) f(redis_key, v) def generate_plugin_releases(): """Generate releases for plugins """ ARCHIVE = qdb.archive.Archive qiita_config = ConfigurationManager() working_dir = qiita_config.working_dir commands = [c for s in qdb.software.Software.iter(active=True) for c in s.commands if c.post_processing_cmd is not None] tnow = datetime.now() ts = tnow.strftime('%m%d%y-%H%M%S') tgz_dir = join(working_dir, 'releases', 'archive') create_nested_path(tgz_dir) tgz_dir_release = join(tgz_dir, ts) create_nested_path(tgz_dir_release) for cmd in commands: cmd_name = cmd.name mschemes = [v for _, v in ARCHIVE.merging_schemes().items() if cmd_name in v] for ms in mschemes: ms_name = sub('[^0-9a-zA-Z]+', '', ms) ms_fp = join(tgz_dir_release, ms_name) create_nested_path(ms_fp) pfp = join(ms_fp, 'archive.json') archives = {k: loads(v) for k, v in ARCHIVE.retrieve_feature_values( archive_merging_scheme=ms).items() if v != ''} with open(pfp, 'w') as f: dump(archives, f) # now let's run the post_processing_cmd ppc = cmd.post_processing_cmd # concatenate any other parameters into a string params = ' '.join(["%s=%s" % (k, v) for k, v in ppc['script_params'].items()]) # append archives file and output dir parameters params = ("%s --fp_archive=%s --output_dir=%s" % ( params, pfp, ms_fp)) ppc_cmd = "%s %s %s" % ( ppc['script_env'], ppc['script_path'], params) p_out, p_err, rv = qdb.processing_job._system_call(ppc_cmd) p_out = p_out.rstrip() if rv != 0: raise ValueError('Error %d: %s' % (rv, p_out)) p_out = loads(p_out) # tgz-ing all files tgz_name = join(tgz_dir, 'archive-%s-building.tgz' % ts) tgz_name_final = join(tgz_dir, 'archive.tgz') with topen(tgz_name, "w|gz") as tgz: tgz.add(tgz_dir_release, arcname=basename(tgz_dir_release)) # getting the release md5 with open(tgz_name, "rb") as f: md5sum = md5() for c in iter(lambda: f.read(4096), b""): md5sum.update(c) rename(tgz_name, tgz_name_final) vals = [ ('filepath', tgz_name_final[len(working_dir):], r_client.set), ('md5sum', md5sum.hexdigest(), r_client.set), ('time', tnow.strftime('%m-%d-%y %H:%M:%S'), r_client.set)] for k, v, f in vals: redis_key = 'release-archive:%s' % k # important to "flush" variables to avoid errors r_client.delete(redis_key) f(redis_key, v)
bsd-3-clause
pratapvardhan/scikit-learn
examples/decomposition/plot_faces_decomposition.py
103
4394
""" ============================ Faces dataset decompositions ============================ This example applies to :ref:`olivetti_faces` different unsupervised matrix decomposition (dimension reduction) methods from the module :py:mod:`sklearn.decomposition` (see the documentation chapter :ref:`decompositions`) . """ print(__doc__) # Authors: Vlad Niculae, Alexandre Gramfort # License: BSD 3 clause import logging from time import time from numpy.random import RandomState import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.cluster import MiniBatchKMeans from sklearn import decomposition # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') n_row, n_col = 2, 3 n_components = n_row * n_col image_shape = (64, 64) rng = RandomState(0) ############################################################################### # Load faces data dataset = fetch_olivetti_faces(shuffle=True, random_state=rng) faces = dataset.data n_samples, n_features = faces.shape # global centering faces_centered = faces - faces.mean(axis=0) # local centering faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1) print("Dataset consists of %d faces" % n_samples) ############################################################################### def plot_gallery(title, images, n_col=n_col, n_row=n_row): plt.figure(figsize=(2. * n_col, 2.26 * n_row)) plt.suptitle(title, size=16) for i, comp in enumerate(images): plt.subplot(n_row, n_col, i + 1) vmax = max(comp.max(), -comp.min()) plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray, interpolation='nearest', vmin=-vmax, vmax=vmax) plt.xticks(()) plt.yticks(()) plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.) ############################################################################### # List of the different estimators, whether to center and transpose the # problem, and whether the transformer uses the clustering API. estimators = [ ('Eigenfaces - RandomizedPCA', decomposition.RandomizedPCA(n_components=n_components, whiten=True), True), ('Non-negative components - NMF', decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3), False), ('Independent components - FastICA', decomposition.FastICA(n_components=n_components, whiten=True), True), ('Sparse comp. - MiniBatchSparsePCA', decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8, n_iter=100, batch_size=3, random_state=rng), True), ('MiniBatchDictionaryLearning', decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1, n_iter=50, batch_size=3, random_state=rng), True), ('Cluster centers - MiniBatchKMeans', MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20, max_iter=50, random_state=rng), True), ('Factor Analysis components - FA', decomposition.FactorAnalysis(n_components=n_components, max_iter=2), True), ] ############################################################################### # Plot a sample of the input data plot_gallery("First centered Olivetti faces", faces_centered[:n_components]) ############################################################################### # Do the estimation and plot it for name, estimator, center in estimators: print("Extracting the top %d %s..." % (n_components, name)) t0 = time() data = faces if center: data = faces_centered estimator.fit(data) train_time = (time() - t0) print("done in %0.3fs" % train_time) if hasattr(estimator, 'cluster_centers_'): components_ = estimator.cluster_centers_ else: components_ = estimator.components_ if hasattr(estimator, 'noise_variance_'): plot_gallery("Pixelwise variance", estimator.noise_variance_.reshape(1, -1), n_col=1, n_row=1) plot_gallery('%s - Train time %.1fs' % (name, train_time), components_[:n_components]) plt.show()
bsd-3-clause
NunoEdgarGub1/scikit-learn
examples/cross_decomposition/plot_compare_cross_decomposition.py
142
4761
""" =================================== Compare cross decomposition methods =================================== Simple usage of various cross decomposition algorithms: - PLSCanonical - PLSRegression, with multivariate response, a.k.a. PLS2 - PLSRegression, with univariate response, a.k.a. PLS1 - CCA Given 2 multivariate covarying two-dimensional datasets, X, and Y, PLS extracts the 'directions of covariance', i.e. the components of each datasets that explain the most shared variance between both datasets. This is apparent on the **scatterplot matrix** display: components 1 in dataset X and dataset Y are maximally correlated (points lie around the first diagonal). This is also true for components 2 in both dataset, however, the correlation across datasets for different components is weak: the point cloud is very spherical. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA ############################################################################### # Dataset based latent variables model n = 500 # 2 latents vars: l1 = np.random.normal(size=n) l2 = np.random.normal(size=n) latents = np.array([l1, l1, l2, l2]).T X = latents + np.random.normal(size=4 * n).reshape((n, 4)) Y = latents + np.random.normal(size=4 * n).reshape((n, 4)) X_train = X[:n / 2] Y_train = Y[:n / 2] X_test = X[n / 2:] Y_test = Y[n / 2:] print("Corr(X)") print(np.round(np.corrcoef(X.T), 2)) print("Corr(Y)") print(np.round(np.corrcoef(Y.T), 2)) ############################################################################### # Canonical (symmetric) PLS # Transform data # ~~~~~~~~~~~~~~ plsca = PLSCanonical(n_components=2) plsca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test) # Scatter plot of scores # ~~~~~~~~~~~~~~~~~~~~~~ # 1) On diagonal plot X vs Y scores on each components plt.figure(figsize=(12, 8)) plt.subplot(221) plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train") plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 1: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") plt.subplot(224) plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train") plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test") plt.xlabel("x scores") plt.ylabel("y scores") plt.title('Comp. 2: X vs Y (test corr = %.2f)' % np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1]) plt.xticks(()) plt.yticks(()) plt.legend(loc="best") # 2) Off diagonal plot components 1 vs 2 for X and Y plt.subplot(222) plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train") plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test") plt.xlabel("X comp. 1") plt.ylabel("X comp. 2") plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)' % np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.subplot(223) plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train") plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test") plt.xlabel("Y comp. 1") plt.ylabel("Y comp. 2") plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)' % np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1]) plt.legend(loc="best") plt.xticks(()) plt.yticks(()) plt.show() ############################################################################### # PLS regression, with multivariate response, a.k.a. PLS2 n = 1000 q = 3 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) B = np.array([[1, 2] + [0] * (p - 2)] * q).T # each Yj = 1*X1 + 2*X2 + noize Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5 pls2 = PLSRegression(n_components=3) pls2.fit(X, Y) print("True B (such that: Y = XB + Err)") print(B) # compare pls2.coefs with B print("Estimated B") print(np.round(pls2.coefs, 1)) pls2.predict(X) ############################################################################### # PLS regression, with univariate response, a.k.a. PLS1 n = 1000 p = 10 X = np.random.normal(size=n * p).reshape((n, p)) y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5 pls1 = PLSRegression(n_components=3) pls1.fit(X, y) # note that the number of compements exceeds 1 (the dimension of y) print("Estimated betas") print(np.round(pls1.coefs, 1)) ############################################################################### # CCA (PLS mode B with symmetric deflation) cca = CCA(n_components=2) cca.fit(X_train, Y_train) X_train_r, Y_train_r = plsca.transform(X_train, Y_train) X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
bsd-3-clause
numairmansur/RoBO
examples/example_bagged_nets.py
1
1284
import sys import logging import numpy as np import matplotlib.pyplot as plt import robo.models.neural_network as robo_net import robo.models.bagged_networks as bn from robo.initial_design.init_random_uniform import init_random_uniform logging.basicConfig(stream=sys.stdout, level=logging.INFO) def f(x): return np.sinc(x * 10 - 5).sum(axis=1)[:, None] rng = np.random.RandomState(42) X = init_random_uniform(np.zeros(1), np.ones(1), 20, rng).astype(np.float32) Y = f(X) x = np.linspace(0, 1, 512, dtype=np.float32)[:, None] vals = f(x).astype(np.float32) plt.grid() plt.plot(x[:, 0], f(x)[:, 0], label="true", color="green") plt.plot(X[:, 0], Y[:, 0], "ro") model = bn.BaggedNets(robo_net.SGDNet, num_models=16, bootstrap_with_replacement=True, n_epochs=16384, error_threshold=1e-3, n_units=[32, 32, 32], dropout=0, batch_size=10, learning_rate=1e-3, shuffle_batches=True) m = model.train(X, Y) mean_pred, var_pred = model.predict(x) std_pred = np.sqrt(var_pred) plt.plot(x[:, 0], mean_pred[:, 0], label="bagged nets", color="blue") plt.fill_between(x[:, 0], mean_pred[:, 0] + std_pred[:, 0], mean_pred[:, 0] - std_pred[:, 0], alpha=0.2, color="blue") plt.legend() plt.show()
bsd-3-clause
apaloczy/ap_tools
utils.py
1
54151
# Description: General-purpose functions for personal use. # Author: André Palóczy # E-mail: paloczy@gmail.com __all__ = ['seasonal_avg', 'seasonal_std', 'deseason', 'blkavg', 'blkavgdir', 'blkavgt', 'blkapply', 'stripmsk', 'pydatetime2m_arr', 'm2pydatetime_arr', 'npdt2dt', 'dt2sfloat', 'doy2date', 'flowfun', 'cumsimp', 'rot_vec', 'avgdir', 'lon180to360', 'lon360to180', 'bbox2ij', 'xy2dist', 'get_xtrackline', 'get_arrdepth', 'fpointsbox', 'near', 'near2', 'mnear', 'refine', 'denan', 'standardize', 'linear_trend', 'thomas', 'point_in_poly', 'get_mask_from_poly', 'sphericalpolygon_area', 'greatCircleBearing', 'weim', 'smoo2', 'topo_slope', 'curvature_geometric', 'get_isobath', 'angle_isobath', 'isopyc_depth', 'whiten_zero', 'wind2stress', 'gen_dates', 'fmt_isobath', 'float2latex', 'mat2npz', 'bb_map', 'dots_dualcolor'] from os import system import numpy as np import matplotlib.pyplot as plt import matplotlib from matplotlib import path from mpl_toolkits.basemap import Basemap from datetime import datetime, timedelta from dateutil import rrule, parser from scipy.io import loadmat, savemat from scipy import signal from scipy.signal import savgol_filter from glob import glob from netCDF4 import Dataset, num2date, date2num # from pandas import rolling_window # FIXME, new pandas way of doing this is, e.g., arr = Series(...).rolling(...).mean() from pandas import Timestamp from gsw import distance from pygeodesy import Datums, VincentyError from pygeodesy.ellipsoidalVincenty import LatLon as LatLon from pygeodesy.sphericalNvector import LatLon as LatLon_sphere def seasonal_avg(t, F): """ USAGE ----- F_seasonal = seasonal_avg(t, F) Calculates the seasonal average of variable F(t). Assumes 't' is a 'datetime.datetime' object. """ tmo = np.array([ti.month for ti in t]) ftmo = [tmo==mo for mo in range(1, 13)] return np.array([F[ft].mean() for ft in ftmo]) def seasonal_std(t, F): """ USAGE ----- F_seasonal = seasonal_std(t, F) Calculates the seasonal standard deviation of variable F(t). Assumes 't' is a 'datetime.datetime' object. """ tmo = np.array([ti.month for ti in t]) ftmo = [tmo==mo for mo in range(1, 13)] return np.array([F[ft].std() for ft in ftmo]) def deseason(t, F): """ USAGE ----- F_nonssn = deseason(t, F) Removes the seasonal signal of variable F(t). Assumes 't' is a 'datetime.datetime' object. Also assumes that F is sampled monthly and only for complete years (i.e., t.size is a multiple of 12). """ Fssn = seasonal_avg(t, F) nyears = int(t.size/12) aux = np.array([]) for n in range(nyears): aux = np.concatenate((aux, Fssn)) return F - aux def blkavg(x, y, every=2): """ Block-averages a variable y(x). Returns its block average and standard deviation and new x axis. """ nx = x.size xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([]) for i in range(every, nx+every, every): yi = y[i-every:i] xblk = np.append(xblk, np.nanmean(x[i-every:i])) yblk = np.append(yblk, np.nanmean(yi)) yblkstd = np.append(yblkstd, np.nanstd(yi)) return xblk, yblk, yblkstd def blkavgdir(x, ydir, every=2, degrees=False, axis=None): """ Block-averages a PERIODIC variable ydir(x). Returns its block average and new x axis. """ nx = x.size xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([]) for i in range(every, nx+every, every): xblk = np.append(xblk, np.nanmean(x[i-every:i])) yblk = np.append(yblk, avgdir(ydir[i-every:i], degrees=degrees, axis=axis)) return xblk, yblk def blkavgt(t, x, every=2): """ Block-averages a variable x(t). Returns its block average and the new t axis. """ nt = t.size units = 'days since 01-01-01' calendar = 'proleptic_gregorian' t = date2num(t, units=units, calendar=calendar) tblk, xblk = np.array([]), np.array([]) for i in range(every, nt+every, every): xi = x[i-every:i] tblk = np.append(tblk, np.nanmean(t[i-every:i])) xblk = np.append(xblk, np.nanmean(xi)) tblk = num2date(tblk, units=units, calendar=calendar) return tblk, xblk def blkapply(x, f, nblks, overlap=0, demean=False, detrend=False, verbose=True): """ Divides array 'x' in 'nblks' blocks and applies function 'f' = f(x) on each block. """ x = np.array(x) assert callable(f), "f must be a function" nx = x.size ni = int(nx/nblks) # Number of data points in each chunk. y = np.zeros(ni) # Array that will receive each block. dn = int(round(ni - overlap*ni)) # How many indices to move forward with # each chunk (depends on the % overlap). # Demean/detrend the full record first (removes the lowest frequencies). # Then, also demean/detrend each block beffore applying f(). if demean: x = x - x.mean() if detrend: x = signal.detrend(x, type='linear') n=0 il, ir = 0, ni while ir<=nx: xn = x[il:ir] if demean: xn = xn - xn.mean() if detrend: xn = signal.detrend(xn, type='linear') y = y + f(xn) # Apply function and accumulate the current bock. il+=dn; ir+=dn n+=1 y /= n # Divide by number of blocks actually used. ncap = nx - il # Number of points left out at the end of array. if verbose: print("") print("Left last %d data points out (%.1f %% of all points)."%(ncap,100*ncap/nx)) if overlap>0: print("") print("Intended %d blocks, but could fit %d blocks, with"%(nblks,n)) print('overlap of %.1f %%, %d points per block.'%(100*overlap,dn)) print("") return y def stripmsk(arr, mask_invalid=False): if mask_invalid: arr = np.ma.masked_invalid(arr) if np.ma.isMA(arr): msk = arr.mask arr = arr.data arr[msk] = np.nan return arr def pydatetime2m_arr(pydt_arr): pydt_arr = np.array(pydt_arr) secperyr = 86400.0 timedt = timedelta(days=366) matdt = [] for pydt in pydt_arr.tolist(): m = pydt.toordinal() + timedt dfrac = pydt - datetime(pydt.year,pydt.month,pydt.day,0,0,0).seconds/secperyr matdt.append(m.toordinal() + dfrac) return np.array(matdt) def m2pydatetime_arr(mdatenum_arr): mdatenum_arr = np.array(mdatenum_arr) timedt = timedelta(days=366) pydt = [] for mdt in mdatenum_arr.tolist(): d = datetime.fromordinal(int(mdt)) dfrac = timedelta(days=mdt%1) - timedt pydt.append(d + dfrac) return np.array(pydt) def npdt2dt(tnp): """ USAGE ----- t_datetime = npdt2dt(t_numpydatetime64) Convert an array of numpy.datetime64 timestamps to datetime.datetime. """ return np.array([Timestamp(ti).to_pydatetime() for ti in tnp]) def dt2sfloat(t): """ USAGE ----- t_float = dt2sfloat(t_datetime) Convert an array of datetime.datetime timestamps to an array of floats representing elapsed seconds since the first timestamp. """ t = np.array(t) t0 = t[0] return np.array([(tn - t0).total_seconds() for tn in t]) def doy2date(doy, year=2017): """ USAGE ----- t = doy2date(doy, year=2017) Convert an array `doy` of decimal yeardays to an array of datetime.datetime timestamps. """ doy = np.array(doy)*86400 # [seconds/day]. tunit = 'seconds since %d-01-01 00:00:00'%year return np.array([num2date(dn, tunit) for dn in doy]) def flowfun(x, y, u, v, variable='psi', geographic=True): """ FLOWFUN Computes the potential PHI and the streamfunction PSI of a 2-dimensional flow defined by the matrices of velocity components U and V, so that d(PHI) d(PSI) d(PHI) d(PSI) u = ----- - ----- , v = ----- + ----- dx dy dx dy P = FLOWFUN(x,y,u,v) returns an array P of the same size as u and v, which can be the velocity potential (PHI) or the streamfunction (PSI) Because these scalar fields are defined up to the integration constant, their absolute values are such that PHI[0,0] = PSI[0,0] = 0. For a potential (irrotational) flow PSI = 0, and the Laplacian of PSI is equal to the divergence of the velocity field. A solenoidal (non-divergent) flow can be described by the streamfunction alone, and the Laplacian of the streamfunction is equal to the vorticity (curl) of the velocity field. The units of the grid coordinates are assumed to be consistent with the units of the velocity components, e.g., [m] and [m/s]. If variable=='psi', the streamfunction (PSI) is returned. If variable=='phi', the velocity potential (PHI) is returned. If geographic==True (default), (x,y) are assumed to be (longitude,latitude) and are converted to meters before computing (dx,dy). If geographic==False, (x,y) are assumed to be in meters. Uses function 'cumsimp()' (Simpson rule summation). Author: Kirill K. Pankratov, March 7, 1994. Source: http://www-pord.ucsd.edu/~matlab/stream.htm Translated to Python by André Palóczy, January 15, 2015. Modified by André Palóczy on January 15, 2015. """ x,y,u,v = map(np.asanyarray, (x,y,u,v)) if not x.shape==y.shape==u.shape==v.shape: print("Error: Arrays (x, y, u, v) must be of equal shape.") return ## Calculating grid spacings. if geographic: dlat, _ = np.gradient(y) _, dlon = np.gradient(x) deg2m = 111120.0 # [m/deg] dx = dlon*deg2m*np.cos(y*np.pi/180.) # [m] dy = dlat*deg2m # [m] else: dy, _ = np.gradient(y) _, dx = np.gradient(x) ly, lx = x.shape # Shape of the (x,y,u,v) arrays. ## Now the main computations. ## Integrate velocity fields to get potential and streamfunction. ## Use Simpson rule summation (function CUMSIMP). ## Compute velocity potential PHI (non-rotating part). if variable=='phi': cx = cumsimp(u[0,:]*dx[0,:]) # Compute x-integration constant cy = cumsimp(v[:,0]*dy[:,0]) # Compute y-integration constant cx = np.expand_dims(cx, 0) cy = np.expand_dims(cy, 1) phiy = cumsimp(v*dy) + np.tile(cx, (ly,1)) phix = cumsimp(u.T*dx.T).T + np.tile(cy, (1,lx)) phi = (phix + phiy)/2. return phi ## Compute streamfunction PSI (non-divergent part). if variable=='psi': cx = cumsimp(v[0,:]*dx[0,:]) # Compute x-integration constant cy = cumsimp(u[:,0]*dy[:,0]) # Compute y-integration constant cx = np.expand_dims(cx, 0) cy = np.expand_dims(cy, 1) psix = -cumsimp(u*dy) + np.tile(cx, (ly,1)) psiy = cumsimp(v.T*dx.T).T - np.tile(cy, (1,lx)) psi = (psix + psiy)/2. return psi def cumsimp(y): """ F = CUMSIMP(Y) Simpson-rule column-wise cumulative summation. Numerical approximation of a function F(x) such that Y(X) = dF/dX. Each column of the input matrix Y represents the value of the integrand Y(X) at equally spaced points X = 0,1,...size(Y,1). The output is a matrix F of the same size as Y. The first row of F is equal to zero and each following row is the approximation of the integral of each column of matrix Y up to the givem row. CUMSIMP assumes continuity of each column of the function Y(X) and uses Simpson rule summation. Similar to the command F = CUMSUM(Y), exept for zero first row and more accurate summation (under the assumption of continuous integrand Y(X)). Author: Kirill K. Pankratov, March 7, 1994. Source: http://www-pord.ucsd.edu/~matlab/stream.htm Translated to Python by André Palóczy, January 15, 2015. """ y = np.asanyarray(y) ## 3-point interpolation coefficients to midpoints. ## Second-order polynomial (parabolic) interpolation coefficients ## from Xbasis = [0 1 2] to Xint = [.5 1.5] c1 = 3/8. c2 = 6/8. c3 = -1/8. if y.ndim==1: y = np.expand_dims(y,1) f = np.zeros((y.size,1)) # Initialize summation array. squeeze_after = True elif y.ndim==2: f = np.zeros(y.shape) # Initialize summation array. squeeze_after = False else: print("Error: Input array has more than 2 dimensions.") return if y.size==2: # If only 2 elements in columns - simple average. f[1,:] = (y[0,:] + y[1,:])/2. return f else: # If more than two elements in columns - Simpson summation. ## Interpolate values of y to all midpoints. f[1:-1,:] = c1*y[:-2,:] + c2*y[1:-1,:] + c3*y[2:,:] f[2:,:] = f[2:,:] + c3*y[:-2,:] + c2*y[1:-1,:] + c1*y[2:,:] f[1,:] = f[1,:]*2 f[-1,:] = f[-1,:]*2 ## Simpson (1,4,1) rule. f[1:,:] = 2*f[1:,:] + y[:-1,:] + y[1:,:] f = np.cumsum(f, axis=0)/6. # Cumulative sum, 6 - denominator from the Simpson rule. if squeeze_after: f = f.squeeze() return f def rot_vec(u, v, angle=-45, degrees=True): """ USAGE ----- u_rot,v_rot = rot_vec(u,v,angle=-45.,degrees=True) Returns the rotated vector components (`u_rot`,`v_rot`) from the zonal-meridional input vector components (`u`,`v`). The rotation is done using the angle `angle` positive counterclockwise (trigonometric convention). If `degrees` is set to `True``(default), then `angle` is converted to radians. is Example ------- >>> from matplotlib.pyplot import quiver >>> from ap_tools.utils import rot_vec >>> u = -1. >>> v = -1. >>> u2,v2 = rot_vec(u,v, angle=-30.) """ u,v = map(np.asanyarray, (u,v)) if degrees: angle = angle*np.pi/180. # Degrees to radians. u_rot = +u*np.cos(angle) + v*np.sin(angle) # Usually the across-shore component. v_rot = -u*np.sin(angle) + v*np.cos(angle) # Usually the along-shore component. return u_rot,v_rot def avgdir(dirs, degrees=False, axis=None): """ USAGE ----- dirm = avgdir(dirs, degrees=False, axis=None) Calculate the mean direction of an array of directions 'dirs'. If 'degrees' is 'False' (default), the input directions must be in radians. If 'degrees' is 'True', the input directions must be in degrees. The direction angle is measured from the ZONAL axis, i.e., (0, 90, -90) deg are (Eastward, Northward, Southward). 180 and -180 deg are both Westward. If 'axis' is 'None' (default) the mean is calculated on the flattened array. Otherwise, 'axis' is the index of the axis to calculate the mean over. """ dirs = np.array(dirs) if degrees: dirs = dirs*np.pi/180 # Degrees to radians. uxs = np.cos(dirs) vys = np.sin(dirs) dirm = np.arctan2(vys.sum(axis=axis), uxs.sum(axis=axis)) if degrees: dirm = dirm*180/np.pi # From radians to degrees. return dirm def lon180to360(lon): """ Converts longitude values in the range [-180,+180] to longitude values in the range [0,360]. """ lon = np.asanyarray(lon) return (lon + 360.0) % 360.0 def lon360to180(lon): """ Converts longitude values in the range [0,360] to longitude values in the range [-180,+180]. """ lon = np.asanyarray(lon) return ((lon + 180.) % 360.) - 180. def bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True): """ USAGE ----- ilon_start, ilon_end, jlat_start, jlat_end = bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True) OR (ilon_start_left, ilon_end_left, jlat_start, jlat_end), (ilon_start_right, ilon_end_right, jlat_start, jlat_end) = ... ... bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True) Return indices for i,j that will completely cover the specified bounding box. 'lon' and 'lat' are 2D coordinate arrays (generated by meshgrid), and 'bbox' is a list like [lon_start, lon_end, lat_start, lat_end] describing the desired longitude-latitude box. If the specified bbox is such that it crosses the edges of the longitude array, two tuples of indices are returned. The first (second) tuple traces out the left (right) part of the bbox. If FIX_IDL is set to 'True' (default), the indices returned correspond to the "short route" around the globe, which amounts to assuming that the specified bbox crosses the International Date. If FIX_IDL is set to 'False', the "long route" is used instead. Example ------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> lon = np.arange(-180., 180.25, 0.25) >>> lat = np.arange(-90., 90.25, 0.25) >>> lon, lat = np.meshgrid(lon, lat) >>> h = np.sin(lon) + np.cos(lat) >>> i0, i1, j0, j1 = bbox2ij(lon, lat, bbox=[-71, -63., 39., 46]) >>> h_subset = h[j0:j1,i0:i1] >>> lon_subset = lon[j0:j1,i0:i1] >>> lat_subset = lat[j0:j1,i0:i1] >>> fig, ax = plt.subplots() >>> ax.pcolor(lon_subset,lat_subset,h_subset) >>> plt.axis('tight') Original function downloaded from http://gis.stackexchange.com/questions/71630/subsetting-a-curvilinear-netcdf-file-roms-model-output-using-a-lon-lat-boundin Modified by André Palóczy on August 20, 2016 to handle bboxes that cross the International Date Line or the edges of the longitude array. """ lon, lat, bbox = map(np.asanyarray, (lon, lat, bbox)) # Test whether the wanted bbox crosses the International Date Line (brach cut of the longitude array). dlon = bbox[:2].ptp() IDL_BBOX=dlon>180. IDL_BBOX=np.logical_and(IDL_BBOX, FIX_IDL) mypath = np.array([bbox[[0,1,1,0]], bbox[[2,2,3,3]]]).T p = path.Path(mypath) points = np.vstack((lon.flatten(), lat.flatten())).T n, m = lon.shape inside = p.contains_points(points).reshape((n, m)) # Fix mask if bbox goes throught the International Date Line. if IDL_BBOX: fcol=np.all(~inside, axis=0) flin=np.any(inside, axis=1) fcol, flin = map(np.expand_dims, (fcol, flin), (0, 1)) fcol = np.tile(fcol, (n, 1)) flin = np.tile(flin, (1, m)) inside=np.logical_and(flin, fcol) print("Bbox crosses the International Date Line.") ii, jj = np.meshgrid(range(m), range(n)) iiin, jjin = ii[inside], jj[inside] i0, i1, j0, j1 = min(iiin), max(iiin), min(jjin), max(jjin) SPLIT_BBOX=(i1-i0)==(m-1) # Test whether the wanted bbox crosses edges of the longitude array. # If wanted bbox crosses edges of the longitude array, return indices for the two boxes separately. if SPLIT_BBOX: Iiin = np.unique(iiin) ib0 = np.diff(Iiin).argmax() # Find edge of the inner side of the left bbox. ib1 = ib0 + 1 # Find edge of the inner side of the right bbox. Il, Ir = Iiin[ib0], Iiin[ib1] # Indices of the columns that bound the inner side of the two bboxes. print("Bbox crosses edges of the longitude array. Returning two sets of indices.") return (i0, Il, j0, j1), (Ir, i1, j0, j1) else: return i0, i1, j0, j1 def xy2dist(x, y, cyclic=False, datum='WGS84'): """ USAGE ----- d = xy2dist(x, y, cyclic=False, datum='WGS84') Calculates a distance axis from a line defined by longitudes and latitudes 'x' and 'y', using either the Vicenty formulae on an ellipsoidal earth (ellipsoid defaults to WGS84) or on a sphere (if datum=='Sphere'). Example ------- >>> yi, yf = -23.550520, 32.71573800 >>> xi, xf = -46.633309, -117.161084 >>> x, y = np.linspace(xi, xf), np.linspace(yi, yf) >>> d_ellipse = xy2dist(x, y, datum='WGS84')[-1]*1e-3 # [km]. >>> d_sphere = xy2dist(x, y, datum='Sphere')[-1]*1e-3 # [km]. >>> dd = np.abs(d_ellipse - d_sphere) >>> dperc = 100*dd/d_ellipse >>> msg = 'Difference of %.1f km over a %.0f km-long line (%.3f %% difference)'%(dd, d_ellipse, dperc) >>> print(msg) """ if datum!="Sphere": xy = [LatLon(y0, x0, datum=Datums[datum]) for x0, y0 in zip(x, y)] else: xy = [LatLon_sphere(y0, x0) for x0, y0 in zip(x, y)] d = np.array([xy[n].distanceTo(xy[n+1]) for n in range(len(xy)-1)]) return np.append(0, np.cumsum(d)) def get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=10): """ USAGE ----- lonp, latp = get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=13) Generates a great-circle line with length 2L (with L in km) that is perpendicular to the great-circle line defined by the input points (lon1, lat1) and (lon2, lat2). The spacing between the points along the output line is dL km. Assumes a spherical Earth. """ km2m = 1e3 L, dL = L*km2m, dL*km2m nh = int(L/dL) p1, p2 = LatLon_sphere(lat1, lon1), LatLon_sphere(lat2, lon2) angperp = p1.initialBearingTo(p2) + 90 angperpb = angperp + 180 pm = p1.midpointTo(p2) # Create perpendicular line starting from the midpoint. N = range(1, nh + 1) pperp = [] _ = [pperp.append(pm.destination(dL*n, angperpb)) for n in N] pperp.reverse() pperp.append(pm) _ = [pperp.append(pm.destination(dL*n, angperp)) for n in N] lonperp = np.array([p.lon for p in pperp]) latperp = np.array([p.lat for p in pperp]) return lonperp, latperp def get_arrdepth(arr): """ USAGE ----- arr_depths = get_arrdepth(arr) Determine number of nested levels in each element of an array of arrays of arrays... (or other array-like objects). """ arr = np.array(arr) # Make sure first level is an array. all_nlevs = [] for i in range(arr.size): nlev=0 wrk_arr = arr[i] while np.size(wrk_arr)>0: try: wrk_arr = np.array(wrk_arr[i]) except Exception: all_nlevs.append(nlev) nlev=0 break nlev+=1 return np.array(all_nlevs) def fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True): """ USAGE ----- fpts = fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True) Find points in a rectangle made with 2 ginput points. """ fpts = np.array([]) for n in range(nboxes): box = np.array(fig.ginput(n=2, timeout=0)) try: xb, yb = box[:,0], box[:,1] except IndexError: print("No points selected. Skipping box \# %d."%(n+1)) continue xl, xr, yd, yu = xb.min(), xb.max(), yb.min(), yb.max() xbox = np.array([xl, xr, xr, xl, xl]) ybox = np.array([yd, yd, yu, yu, yd]) fxbox, fybox = np.logical_and(x>xl, x<xr), np.logical_and(y>yd, y<yu) fptsi = np.logical_and(fxbox, fybox) if return_index: fptsi = np.where(fptsi)[0] fpts = np.append(fpts, fptsi) if plot: ax.plot(xbox, ybox, 'r', linestyle='solid', marker='o', ms=4) ax.plot(x[fptsi], y[fptsi], 'r', linestyle='none', marker='+', ms=5) plt.draw() fig.show() else: fig.close() if plot: plt.draw() fig.show() system("sleep %d"%pause_secs) return fpts def near(x, x0, npts=1, return_index=False): """ USAGE ----- xnear = near(x, x0, npts=1, return_index=False) Finds 'npts' points (defaults to 1) in array 'x' that are closest to a specified 'x0' point. If 'return_index' is True (defauts to False), then the indices of the closest points are returned. The indices are ordered in order of closeness. """ x = list(x) xnear = [] xidxs = [] for n in range(npts): idx = np.nanargmin(np.abs(np.array(x)-x0)) xnear.append(x.pop(idx)) if return_index: xidxs.append(idx) if return_index: # Sort indices according to the proximity of wanted points. xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()] xnear.sort() if npts==1: xnear = xnear[0] if return_index: xidxs = xidxs[0] else: xnear = np.array(xnear) if return_index: return xidxs else: return xnear def near2(x, y, x0, y0, npts=1, return_index=False): """ USAGE ----- xnear, ynear = near2(x, y, x0, y0, npts=1, return_index=False) Finds 'npts' points (defaults to 1) in arrays 'x' and 'y' that are closest to a specified '(x0, y0)' point. If 'return_index' is True (defauts to False), then the indices of the closest point(s) are returned. Example ------- >>> x = np.arange(0., 100., 0.25) >>> y = np.arange(0., 100., 0.25) >>> x, y = np.meshgrid(x, y) >>> x0, y0 = 44.1, 30.9 >>> xn, yn = near2(x, y, x0, y0, npts=1) >>> print("(x0, y0) = (%f, %f)"%(x0, y0)) >>> print("(xn, yn) = (%f, %f)"%(xn, yn)) """ x, y = map(np.array, (x, y)) shp = x.shape xynear = [] xyidxs = [] dx = x - x0 dy = y - y0 dr = dx**2 + dy**2 for n in range(npts): xyidx = np.unravel_index(np.nanargmin(dr), dims=shp) if return_index: xyidxs.append(xyidx) xyn = (x[xyidx], y[xyidx]) xynear.append(xyn) dr[xyidx] = np.nan if npts==1: xynear = xynear[0] if return_index: xyidxs = xyidxs[0] if return_index: return xyidxs else: return xynear def mnear(x, y, x0, y0): """ USAGE ----- xmin,ymin = mnear(x, y, x0, y0) Finds the the point in a (lons,lats) line that is closest to a specified (lon0,lat0) point. """ x,y,x0,y0 = map(np.asanyarray, (x,y,x0,y0)) point = (x0,y0) d = np.array([]) for n in range(x.size): xn,yn = x[n],y[n] dn = distance((xn,x0),(yn,y0)) # Calculate distance point-wise. d = np.append(d,dn) idx = d.argmin() return x[idx],y[idx] def refine(line, nref=100, close=True): """ USAGE ----- ref_line = refine(line, nref=100, close=True) Given a 1-D sequence of points 'line', returns a new sequence 'ref_line', which is built by linearly interpolating 'nref' points between each pair of subsequent points in the original line. If 'close' is True (default), the first value of the original line is repeated at the end of the refined line, as in a closed polygon. """ line = np.squeeze(np.asanyarray(line)) if close: line = np.append(line,line[0]) ref_line = np.array([]) for n in range(line.shape[0]-1): xi, xf = line[n], line[n+1] xref = np.linspace(xi,xf,nref) ref_line = np.append(ref_line, xref) return ref_line def point_in_poly(x,y,poly): """ USAGE ----- isinside = point_in_poly(x,y,poly) Determine if a point is inside a given polygon or not Polygon is a list of (x,y) pairs. This fuction returns True or False. The algorithm is called 'Ray Casting Method'. Source: http://pseentertainmentcorp.com/smf/index.php?topic=545.0 """ n = len(poly) inside = False p1x,p1y = poly[0] for i in range(n+1): p2x,p2y = poly[i % n] if y > min(p1y,p2y): if y <= max(p1y,p2y): if x <= max(p1x,p2x): if p1y != p2y: xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x if p1x == p2x or x <= xinters: inside = not inside p1x,p1y = p2x,p2y return inside def get_mask_from_poly(xp, yp, poly, verbose=False): """ USAGE ----- mask = get_mask_from_poly(xp, yp, poly, verbose=False) Given two arrays 'xp' and 'yp' of (x,y) coordinates (generated by meshgrid) and a polygon defined by an array of (x,y) coordinates 'poly', with shape = (n,2), return a boolean array 'mask', where points that lie inside 'poly' are set to 'True'. """ print('Building the polygon mask...') jmax, imax = xp.shape mask = np.zeros((jmax,imax)) for j in range(jmax): if verbose: print("Row %s of %s"%(j+1,jmax)) for i in range(imax): px, py = xp[j,i], yp[j,i] # Test if this point is within the polygon. mask[j,i] = point_in_poly(px, py, poly) return mask def sphericalpolygon_area(lons, lats, R=6371000.): """ USAGE ----- area = sphericalpolygon_area(lons, lats, R=6371000.) Calculates the area of a polygon on the surface of a sphere of radius R using Girard's Theorem, which states that the area of a polygon of great circles is R**2 times the sum of the angles between the polygons minus (N-2)*pi, where N is number of corners. R = 6371000 m (6371 km, default) is a typical value for the mean radius of the Earth. Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python """ lons, lats = map(np.asanyarray, (lons, lats)) N = lons.size angles = np.empty(N) for i in range(N): phiB1, phiA, phiB2 = np.roll(lats, i)[:3] LB1, LA, LB2 = np.roll(lons, i)[:3] # calculate angle with north (eastward) beta1 = greatCircleBearing(LA, phiA, LB1, phiB1) beta2 = greatCircleBearing(LA, phiA, LB2, phiB2) # calculate angle between the polygons and add to angle array angles[i] = np.arccos(np.cos(-beta1)*np.cos(-beta2) + np.sin(-beta1)*np.sin(-beta2)) return (np.sum(angles) - (N-2)*np.pi)*R**2 def greatCircleBearing(lon1, lat1, lon2, lat2): """ USAGE ----- angle = greatCircleBearing(lon1, lat1, lon2, lat2) Calculates the angle (positive eastward) a great circle passing through points (lon1,lat1) and (lon2,lat2) makes with true nirth. Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python """ lon1, lat1, lon2, lat2 = map(np.asanyarray, (lon1, lat1, lon2, lat2)) dLong = lon1 - lon2 d2r = np.pi/180. s = np.cos(d2r*lat2)*np.sin(d2r*dLong) c = np.cos(d2r*lat1)*np.sin(d2r*lat2) - np.sin(lat1*d2r)*np.cos(d2r*lat2)*np.cos(d2r*dLong) return np.arctan2(s, c) def weim(x, N, kind='hann', badflag=-9999, beta=14): """ Usage ----- xs = weim(x, N, kind='hann', badflag=-9999, beta=14) Description ----------- Calculates the smoothed array 'xs' from the original array 'x' using the specified window of type 'kind' and size 'N'. 'N' must be an odd number. Parameters ---------- x : 1D array Array to be smoothed. N : integer Window size. Must be odd. kind : string, optional One of the window types available in the numpy module: hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed. hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is discontinuous at the edges, and may produce undesired artifacts. blackman : Similar to the hann and hamming windows, with sharper ends. bartlett : Triangular-like. Its end-points are zeroed. kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter. For beta=0, the window is rectangular. As beta increases, the window gets narrower. Refer to the numpy functions for details about each window type. badflag : float, optional The bad data flag. Elements of the input array 'A' holding this value are ignored. beta : float, optional Shape parameter for the kaiser window. For windows other than the kaiser window, this parameter does nothing. Returns ------- xs : 1D array The smoothed array. --------------------------------------- André Palóczy Filho (paloczy@gmail.com) June 2012 ============================================================================================================== """ ########################################### ### Checking window type and dimensions ### ########################################### kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser'] if ( kind not in kinds ): raise ValueError('Invalid window type requested: %s'%kind) if np.mod(N,2) == 0: raise ValueError('Window size must be odd') ########################### ### Creating the window ### ########################### if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required). wstr = 'np.kaiser(N, beta)' else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required). if kind == 'hann': kind = 'hanning' wstr = 'np.' + kind + '(N)' w = eval(wstr) x = np.asarray(x).flatten() Fnan = np.isnan(x).flatten() ln = (N-1)/2 lx = x.size lf = lx - ln xs = np.nan*np.ones(lx) # Eliminating bad data from mean computation. fbad=x==badflag x[fbad] = np.nan for i in range(lx): if i <= ln: xx = x[:ln+i+1] ww = w[ln-i:] elif i >= lf: xx = x[i-ln:] ww = w[:lf-i-1] else: xx = x[i-ln:i+ln+1] ww = w.copy() f = ~np.isnan(xx) # Counting only NON-NaNs, both in the input array and in the window points. xx = xx[f] ww = ww[f] if f.sum() == 0: # Thou shalt not divide by zero. xs[i] = x[i] else: xs[i] = np.sum(xx*ww)/np.sum(ww) xs[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array. return xs def smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14): """ Usage ----- As = smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14) Description ----------- Calculates the smoothed array 'As' from the original array 'A' using the specified window of type 'kind' and shape ('hei','wid'). Parameters ---------- A : 2D array Array to be smoothed. hei : integer Window height. Must be odd and greater than or equal to 3. wid : integer Window width. Must be odd and greater than or equal to 3. kind : string, optional One of the window types available in the numpy module: hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed. hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is discontinuous at the edges, and may produce undesired artifacts. blackman : Similar to the hann and hamming windows, with sharper ends. bartlett : Triangular-like. Its end-points are zeroed. kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter. For beta=0, the window is rectangular. As beta increases, the window gets narrower. Refer to the numpy functions for details about each window type. badflag : float, optional The bad data flag. Elements of the input array 'A' holding this value are ignored. beta : float, optional Shape parameter for the kaiser window. For windows other than the kaiser window, this parameter does nothing. Returns ------- As : 2D array The smoothed array. --------------------------------------- André Palóczy Filho (paloczy@gmail.com) April 2012 ============================================================================================================== """ ########################################### ### Checking window type and dimensions ### ########################################### kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser'] if ( kind not in kinds ): raise ValueError('Invalid window type requested: %s'%kind) if ( np.mod(hei,2) == 0 ) or ( np.mod(wid,2) == 0 ): raise ValueError('Window dimensions must be odd') if (hei <= 1) or (wid <= 1): raise ValueError('Window shape must be (3,3) or greater') ############################## ### Creating the 2D window ### ############################## if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required). wstr = 'np.outer(np.kaiser(hei, beta), np.kaiser(wid, beta))' else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required). if kind == 'hann': kind = 'hanning' # computing outer product to make a 2D window out of the original 1d windows. wstr = 'np.outer(np.' + kind + '(hei), np.' + kind + '(wid))' wdw = eval(wstr) A = np.asanyarray(A) Fnan = np.isnan(A) imax, jmax = A.shape As = np.nan*np.ones( (imax, jmax) ) for i in range(imax): for j in range(jmax): ### Default window parameters. wupp = 0 wlow = hei wlef = 0 wrig = wid lh = np.floor(hei/2) lw = np.floor(wid/2) ### Default array ranges (functions of the i,j indices). upp = i-lh low = i+lh+1 lef = j-lw rig = j+lw+1 ################################################## ### Tiling window and input array at the edges ### ################################################## # Upper edge. if upp < 0: wupp = wupp-upp upp = 0 # Left edge. if lef < 0: wlef = wlef-lef lef = 0 # Bottom edge. if low > imax: ex = low-imax wlow = wlow-ex low = imax # Right edge. if rig > jmax: ex = rig-jmax wrig = wrig-ex rig = jmax ############################################### ### Computing smoothed value at point (i,j) ### ############################################### Ac = A[upp:low, lef:rig] wdwc = wdw[wupp:wlow, wlef:wrig] fnan = np.isnan(Ac) Ac[fnan] = 0; wdwc[fnan] = 0 # Eliminating NaNs from mean computation. fbad = Ac==badflag wdwc[fbad] = 0 # Eliminating bad data from mean computation. a = Ac * wdwc As[i,j] = a.sum() / wdwc.sum() As[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array. return As def denan(arr): """ USAGE ----- denaned_arr = denan(arr) Remove the NaNs from an array. """ f = np.isnan(arr) return arr[~f] def standardize(series): """ USAGE ----- series2 = standardize(series) Standardizes a series by subtracting its mean value and dividing by its standard deviation. The result is a dimensionless series. Inputs can be of type "np.array", or "Pandas.Series"/"Pandas.TimeSeries". """ Mean, Std = series.mean(), series.std() return (series - Mean)/Std def linear_trend(series, return_line=True): """ USAGE ----- line = linear_trend(series, return_line=True) OR b, a, x = linear_trend(series, return_line=False) Returns the linear fit (line = b*x + a) associated with the 'series' array. Adapted from pylab.detrend_linear. """ series = np.asanyarray(series) x = np.arange(series.size, dtype=np.float_) C = np.cov(x, series, bias=1) # Covariance matrix. b = C[0, 1]/C[0, 0] # Angular coefficient. a = series.mean() - b*x.mean() # Linear coefficient. line = b*x + a if return_line: return line else: return b, a, x def thomas(A, b): """ USAGE ----- x = thomas(A,b) Solve Ax = b (where A is a tridiagonal matrix) using the Thomas Algorithm. References ---------- For a step-by-step derivation of the algorithm, see e.g., http://www3.ul.ie/wlee/ms6021_thomas.pdf """ # Step 1: Sweep rows from top to bottom, # calculating gammas and rhos along the way. N = b.size gam = [float(A[0,1]/A[0,0])] rho = [float(b[0]/A[0,0])] for i in range(0, N): rho.append(float((b[i] - A[i,i-1]*rho[-1])/(A[i,i] - A[i,i-1]*gam[-1]))) if i<N-1: # No gamma in the last row. gam.append(float(A[i,i+1]/(A[i,i] - A[i,i-1]*gam[-1]))) # Step 2: Substitute solutions for unknowns # starting from the bottom row all the way up. x = [] # Vector of unknowns. x.append(rho.pop()) # Last row is already solved. for i in range(N-2, -1, -1): x.append(float(rho.pop() - gam.pop()*x[-1])) x.reverse() return np.array(x) def topo_slope(lon, lat, h): """ USAGE ----- lons, lats, slope = topo_slope(lon, lat, h) Calculates bottom slope for a topography fields 'h' at coordinates ('lon', 'lat') using first-order finite differences. The output arrays have shape (M-1,L-1), where M,L = h.shape(). """ lon,lat,h = map(np.asanyarray, (lon,lat,h)) deg2m = 1852.*60. # m/deg. deg2rad = np.pi/180. # rad/deg. x = lon*deg2m*np.cos(lat*deg2rad) y = lat*deg2m # First-order differences, accurate to O(dx) and O(dy), # respectively. sx = (h[:,1:] - h[:,:-1]) / (x[:,1:] - x[:,:-1]) sy = (h[1:,:] - h[:-1,:]) / (y[1:,:] - y[:-1,:]) # Finding the values of the derivatives sx and sy # at the same location in physical space. sx = 0.5*(sx[1:,:]+sx[:-1,:]) sy = 0.5*(sy[:,1:]+sy[:,:-1]) # Calculating the bottom slope. slope = np.sqrt(sx**2 + sy**2) # Finding the lon,lat coordinates of the # values of the derivatives sx and sy. lons = 0.5*(lon[1:,:]+lon[:-1,:]) lats = 0.5*(lat[1:,:]+lat[:-1,:]) lons = 0.5*(lons[:,1:]+lons[:,:-1]) lats = 0.5*(lats[:,1:]+lats[:,:-1]) return lons, lats, slope def curvature_geometric(x, y): """ USAGE ----- k = curvature_geometric(x, y) Estimates the curvature k of a 2D curve (x,y) using a geometric method. If your curve is given by two arrays, x and y, you can approximate its curvature at each point by the reciprocal of the radius of a circumscribing triangle with that point, the preceding point, and the succeeding point as vertices. The radius of such a triangle is one fourth the product of the three sides divided by its area. The curvature will be positive for curvature to the left and negative for curvature to the right as you advance along the curve. Note that if your data are too closely spaced together or subject to substantial noise errors, this formula will not be very accurate. Author: Roger Stafford Source: http://www.mathworks.com/matlabcentral/newsreader/view_thread/125637 Translated to Python by André Palóczy, January 19, 2015. """ x,y = map(np.asanyarray, (x,y)) x1 = x[:-2]; x2 = x[1:-1]; x3 = x[2:] y1 = y[:-2]; y2 = y[1:-1]; y3 = y[2:] ## a, b, and c are the three sides of the triangle. a = np.sqrt((x3-x2)**2 + (y3-y2)**2) b = np.sqrt((x1-x3)**2 + (y1-y3)**2) c = np.sqrt((x2-x1)**2 + (y2-y1)**2) ## A is the area of the triangle. A = 0.5*(x1*y2 + x2*y3 + x3*y1 - x1*y3 - x2*y1 - x3*y2) ## The reciprocal of the circumscribed radius, i.e., the curvature. k = 4.0*A/(a*b*c) return np.squeeze(k) def get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw): """ USAGE ----- lon_isob, lat_isob = get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw) Retrieves the 'lon_isob','lat_isob' coordinates of a wanted 'iso' isobath from a topography array 'topo', with 'lon_topo','lat_topo' coordinates. """ lon, lat, topo = map(np.array, (lon, lat, topo)) fig, ax = plt.subplots() cs = ax.contour(lon, lat, topo, [iso]) coll = cs.collections[0] ## Test all lines to find thel ongest one. ## This is assumed to be the wanted isobath. ncoll = len(coll.get_paths()) siz = np.array([]) for n in range(ncoll): path = coll.get_paths()[n] siz = np.append(siz, path.vertices.shape[0]) f = siz.argmax() xiso = coll.get_paths()[f].vertices[:, 0] yiso = coll.get_paths()[f].vertices[:, 1] plt.close() # Smooth the isobath with a moving window. # Periodize according to window length to avoid losing edges. if smooth_isobath: fleft = window_length//2 fright = -window_length//2 + 1 if cyclic: xl = xiso[:fleft] + 360 xr = xiso[fright:] - 360 yl = yiso[:fleft] yr = yiso[fright:] xiso = np.concatenate((xr, xiso, xl)) yiso = np.concatenate((yr, yiso, yl)) # xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME # yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME # else: # xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME # yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME return xiso, yiso def angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw): """ USAGE ----- lon_isob, lat_isob, angle = angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw) Returns the coordinates ('lon_isob', 'lat_isob') and the angle an isobath makes with the zonal direction for a topography array 'h' at coordinates ('lon', 'lat'). Defaults to the 100 m isobath. If 'smooth_isobath'==True, smooths the isobath with a rolling window of type 'win_type' and 'window_length' points wide. All keyword arguments are passed to 'pandas.rolling_window()'. If 'plot_map'==True, plots a map showing the isobath (and its soothed version if smooth_isobath==True). """ lon, lat, h = map(np.array, (lon, lat, h)) R = 6371000.0 # Mean radius of the earth in meters (6371 km), from gsw.constants.earth_radius. deg2rad = np.pi/180. # [rad/deg] # Extract isobath coordinates xiso, yiso = get_isobath(lon, lat, h, isobath) if cyclic: # Add cyclic point. xiso = np.append(xiso, xiso[0]) yiso = np.append(yiso, yiso[0]) # Smooth the isobath with a moving window. if smooth_isobath: xiso = rolling_window(xiso, window=window_length, win_type=win_type, **kw) yiso = rolling_window(yiso, window=window_length, win_type=win_type, **kw) # From the coordinates of the isobath, find the angle it forms with the # zonal axis, using points k+1 and k. shth = yiso.size-1 theta = np.zeros(shth) for k in range(shth): dyk = R*(yiso[k+1]-yiso[k]) dxk = R*(xiso[k+1]-xiso[k])*np.cos(yiso[k]*deg2rad) theta[k] = np.arctan2(dyk,dxk) xisom = 0.5*(xiso[1:] + xiso[:-1]) yisom = 0.5*(yiso[1:] + yiso[:-1]) # Plots map showing the extracted isobath. if plot_map: fig, ax = plt.subplots() m = bb_map([lon.min(), lon.max()], [lat.min(), lat.max()], projection='cyl', resolution='h', ax=ax) m.plot(xisom, yisom, color='b', linestyle='-', zorder=3, latlon=True) input("Press any key to continue.") plt.close() return xisom, yisom, theta def isopyc_depth(z, dens0, isopyc=1027.75, dzref=1.): """ USAGE ----- hisopyc = isopyc_depth(z, dens0, isopyc=1027.75) Calculates the spatial distribution of the depth of a specified isopycnal 'isopyc' (defaults to 1027.75 kg/m3) from a 3D density array rho0 (in kg/m3) with shape (nz,ny,nx) and a 1D depth array 'z' (in m) with shape (nz). 'dzref' is the desired resolution for the refined depth array (defaults to 1 m) which is generated for calculating the depth of the isopycnal. The smaller 'dzref', the smoother the resolution of the returned isopycnal depth array 'hisopyc'. """ z, dens0 = map(np.asanyarray, (z, dens0)) ny, nx = dens0.shape[1:] zref = np.arange(z.min(), z.max(), dzref) if np.ma.isMaskedArray(dens0): dens0 = np.ma.filled(dens0, np.nan) hisopyc = np.nan*np.ones((ny,nx)) for j in range(ny): for i in range(nx): dens0ij = dens0[:,j,i] if np.logical_or(np.logical_or(isopyc<np.nanmin(dens0ij), np.nanmax(dens0ij)<isopyc), np.isnan(dens0ij).all()): continue else: dens0ref = np.interp(zref, z, dens0ij) # Refined density profile. dens0refn = near(dens0ref, isopyc) fz=dens0ref==dens0refn try: hisopyc[j,i] = zref[fz] except ValueError: print("Warning: More than 1 (%d) nearest depths found. Using the median of the depths for point (j=%d,i=%d)."%(fz.sum(), j, i)) hisopyc[j,i] = np.nanmedian(zref[fz]) return hisopyc def whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9): """ USAGE ----- whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9) Changes to white the color of the 'n' (defaults to 1) neighboring patches about the zero contour created by a command like 'cs = ax.contourf(x, y, z)'. """ x, y, z = map(np.asanyarray, (x,y,z)) white = (1.,1.,1.) cslevs = cs.levels assert 0. in cslevs f0=np.where(cslevs==0.)[0][0] f0m, f0p = f0-n, f0+n c0m, c0p = cslevs[f0m], cslevs[f0p] ax.contourf(x, y, z, levels=[c0m, c0p], linestyles='none', colors=[white, white], cmap=None, zorder=zorder) def wind2stress(u, v, formula='large_pond1981-modified'): """ USAGE ----- taux,tauy = wind2stress(u, v, formula='mellor2004') Converts u,v wind vector components to taux,tauy wind stress vector components. """ rho_air = 1.226 # kg/m3 mag = np.sqrt(u**2+v**2) # m/s Cd = np.zeros( mag.shape ) # Drag coefficient. if formula=='large_pond1981-modified': # Large and Pond (1981) formula # modified for light winds, as # in Trenberth et al. (1990). f=mag<=1. Cd[f] = 2.18e-3 f=np.logical_and(mag>1.,mag<3.) Cd[f] = (0.62+1.56/mag[f])*1e-3 f=np.logical_and(mag>=3.,mag<10.) Cd[f] = 1.14e-3 f=mag>=10. Cd[f] = (0.49 + 0.065*mag[f])*1e-3 elif formula=='mellor2004': Cd = 7.5e-4 + 6.7e-5*mag else: np.disp('Unknown formula for Cd.') pass # Computing wind stress [N/m2] taux = rho_air*Cd*mag*u tauy = rho_air*Cd*mag*v return taux,tauy def gen_dates(start, end, dt='day', input_datetime=False): """ Returns a list of datetimes within the date range from `start` to `end`, at a `dt` time interval. `dt` can be 'second', 'minute', 'hour', 'day', 'week', 'month' or 'year'. If `input_datetime` is False (default), `start` and `end` must be a date in string form. If `input_datetime` is True, `start` and `end` must be datetime objects. Note ---- Modified from original function by Filipe Fernandes (ocefpaf@gmail.com). Example ------- >>> from ap_tools.utils import gen_dates >>> from datetime import datetime >>> start = '1989-08-19' >>> end = datetime.utcnow().strftime("%Y-%m-%d") >>> gen_dates(start, end, dt='day') """ DT = dict(second=rrule.SECONDLY, minute=rrule.MINUTELY, hour=rrule.HOURLY, day=rrule.DAILY, week=rrule.WEEKLY, month=rrule.MONTHLY, year=rrule.YEARLY) dt = DT[dt] if input_datetime: # Input are datetime objects. No parsing needed. dates = rrule.rrule(dt, dtstart=start, until=end) else: # Input in string form, parse into datetime objects. dates = rrule.rrule(dt, dtstart=parser.parse(start), until=parser.parse(end)) return list(dates) def fmt_isobath(cs, fontsize=8, fmt='%g', inline=True, inline_spacing=7, manual=True, **kw): """ Formats the labels of isobath contours. `manual` is set to `True` by default, but can be `False`, or a tuple/list of tuples with the coordinates of the labels. All options are passed to plt.clabel(). """ isobstrH = plt.clabel(cs, fontsize=fontsize, fmt=fmt, inline=inline, \ inline_spacing=inline_spacing, manual=manual, **kw) for ih in range(0, len(isobstrH)): # Appends 'm' for meters at the end of the label. isobstrh = isobstrH[ih] isobstr = isobstrh.get_text() isobstr = isobstr.replace('-','') + ' m' isobstrh.set_text(isobstr) def float2latex(f, ndigits=1): """ USAGE ----- texstr = float2latex(f, ndigits=1) Converts a float input into a latex-formatted string with 'ndigits' (defaults to 1). Adapted from: http://stackoverflow.com/questions/13490292/format-number-using-latex-notation-in-python """ float_str = "{0:.%se}"%ndigits float_str = float_str.format(f) base, exponent = float_str.split("e") return "${0} \times 10^{{{1}}}$".format(base, int(exponent)) def mat2npz(matname): """ USAGE ----- mat2npz(matname) Extract variables stored in a .mat file, and saves them in a .npz file. """ d = loadmat(matname) _ = d.pop('__header__') _ = d.pop('__globals__') _ = d.pop('__version__') npzname = matname[:-4] + '.npz' np.savez(npzname,**d) return None def bb_map(lons, lats, ax, projection='merc', resolution='i', drawparallels=True, drawmeridians=True): """ USAGE ----- m = bb_map(lons, lats, **kwargs) Returns a Basemap instance with lon,lat bounding limits inferred from the input arrays `lons`,`lats`. Coastlines, countries, states, parallels and meridians are drawn, and continents are filled. """ lons,lats = map(np.asanyarray, (lons,lats)) lonmin,lonmax = lons.min(),lons.max() latmin,latmax = lats.min(),lats.max() m = Basemap(llcrnrlon=lonmin, urcrnrlon=lonmax, llcrnrlat=latmin, urcrnrlat=latmax, projection=projection, resolution=resolution, ax=ax) plt.ioff() # Avoid showing the figure. m.fillcontinents(color='0.9', zorder=9) m.drawcoastlines(zorder=10) m.drawstates(zorder=10) m.drawcountries(linewidth=2.0, zorder=10) m.drawmapboundary(zorder=9999) if drawmeridians: m.drawmeridians(np.arange(np.floor(lonmin), np.ceil(lonmax), 1), linewidth=0.15, labels=[1, 0, 1, 0], zorder=12) if drawparallels: m.drawparallels(np.arange(np.floor(latmin), np.ceil(latmax), 1), linewidth=0.15, labels=[1, 0, 0, 0], zorder=12) plt.ion() return m def dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r', marker='o', markersize=5): """ USAGE ----- dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r') Plots dots colored with a dual-color criterion, separated by a threshold value. """ ax = plt.gca() # Below-threshold dots. f=z<=thresh ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_low, mec=color_low) # Above-threshold dots. f=z>thresh ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_high, mec=color_high) if __name__=='__main__': import doctest doctest.testmod()
mit
MichaelAquilina/numpy
numpy/lib/npyio.py
42
71218
from __future__ import division, absolute_import, print_function import sys import os import re import itertools import warnings import weakref from operator import itemgetter import numpy as np from . import format from ._datasource import DataSource from numpy.core.multiarray import packbits, unpackbits from ._iotools import ( LineSplitter, NameValidator, StringConverter, ConverterError, ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name ) from numpy.compat import ( asbytes, asstr, asbytes_nested, bytes, basestring, unicode ) if sys.version_info[0] >= 3: import pickle else: import cPickle as pickle from future_builtins import map loads = pickle.loads __all__ = [ 'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt', 'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez', 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' ] class BagObj(object): """ BagObj(obj) Convert attribute look-ups to getitems on the object passed in. Parameters ---------- obj : class instance Object on which attribute look-up is performed. Examples -------- >>> from numpy.lib.npyio import BagObj as BO >>> class BagDemo(object): ... def __getitem__(self, key): # An instance of BagObj(BagDemo) ... # will call this method when any ... # attribute look-up is required ... result = "Doesn't matter what you want, " ... return result + "you're gonna get this" ... >>> demo_obj = BagDemo() >>> bagobj = BO(demo_obj) >>> bagobj.hello_there "Doesn't matter what you want, you're gonna get this" >>> bagobj.I_can_be_anything "Doesn't matter what you want, you're gonna get this" """ def __init__(self, obj): # Use weakref to make NpzFile objects collectable by refcount self._obj = weakref.proxy(obj) def __getattribute__(self, key): try: return object.__getattribute__(self, '_obj')[key] except KeyError: raise AttributeError(key) def __dir__(self): """ Enables dir(bagobj) to list the files in an NpzFile. This also enables tab-completion in an interpreter or IPython. """ return object.__getattribute__(self, '_obj').keys() def zipfile_factory(*args, **kwargs): import zipfile kwargs['allowZip64'] = True return zipfile.ZipFile(*args, **kwargs) class NpzFile(object): """ NpzFile(fid) A dictionary-like object with lazy-loading of files in the zipped archive provided on construction. `NpzFile` is used to load files in the NumPy ``.npz`` data archive format. It assumes that files in the archive have a ``.npy`` extension, other files are ignored. The arrays and file strings are lazily loaded on either getitem access using ``obj['key']`` or attribute lookup using ``obj.f.key``. A list of all files (without ``.npy`` extensions) can be obtained with ``obj.files`` and the ZipFile object itself using ``obj.zip``. Attributes ---------- files : list of str List of all files in the archive with a ``.npy`` extension. zip : ZipFile instance The ZipFile object initialized with the zipped archive. f : BagObj instance An object on which attribute can be performed as an alternative to getitem access on the `NpzFile` instance itself. allow_pickle : bool, optional Allow loading pickled data. Default: True pickle_kwargs : dict, optional Additional keyword arguments to pass on to pickle.load. These are only useful when loading object arrays saved on Python 2 when using Python 3. Parameters ---------- fid : file or str The zipped archive to open. This is either a file-like object or a string containing the path to the archive. own_fid : bool, optional Whether NpzFile should close the file handle. Requires that `fid` is a file-like object. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.io.NpzFile) True >>> npz.files ['y', 'x'] >>> npz['x'] # getitem access array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> npz.f.x # attribute lookup array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ def __init__(self, fid, own_fid=False, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an # optional component of the so-called standard library. _zip = zipfile_factory(fid) self._files = _zip.namelist() self.files = [] self.allow_pickle = allow_pickle self.pickle_kwargs = pickle_kwargs for x in self._files: if x.endswith('.npy'): self.files.append(x[:-4]) else: self.files.append(x) self.zip = _zip self.f = BagObj(self) if own_fid: self.fid = fid else: self.fid = None def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() def close(self): """ Close the file. """ if self.zip is not None: self.zip.close() self.zip = None if self.fid is not None: self.fid.close() self.fid = None self.f = None # break reference cycle def __del__(self): self.close() def __getitem__(self, key): # FIXME: This seems like it will copy strings around # more than is strictly necessary. The zipfile # will read the string and then # the format.read_array will copy the string # to another place in memory. # It would be better if the zipfile could read # (or at least uncompress) the data # directly into the array memory. member = 0 if key in self._files: member = 1 elif key in self.files: member = 1 key += '.npy' if member: bytes = self.zip.open(key) magic = bytes.read(len(format.MAGIC_PREFIX)) bytes.close() if magic == format.MAGIC_PREFIX: bytes = self.zip.open(key) return format.read_array(bytes, allow_pickle=self.allow_pickle, pickle_kwargs=self.pickle_kwargs) else: return self.zip.read(key) else: raise KeyError("%s is not a file in the archive" % key) def __iter__(self): return iter(self.files) def items(self): """ Return a list of tuples, with each tuple (filename, array in file). """ return [(f, self[f]) for f in self.files] def iteritems(self): """Generator that returns tuples (filename, array in file).""" for f in self.files: yield (f, self[f]) def keys(self): """Return files in the archive with a ``.npy`` extension.""" return self.files def iterkeys(self): """Return an iterator over the files in the archive.""" return self.__iter__() def __contains__(self, key): return self.files.__contains__(key) def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True, encoding='ASCII'): """ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. Parameters ---------- file : file-like object or string The file to read. File-like objects must support the ``seek()`` and ``read()`` methods. Pickled files require that the file-like object support the ``readline()`` method as well. mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional If not None, then memory-map the file, using the given mode (see `numpy.memmap` for a detailed description of the modes). A memory-mapped array is kept on disk. However, it can be accessed and sliced like any ndarray. Memory mapping is especially useful for accessing small fragments of large files without reading the entire file into memory. allow_pickle : bool, optional Allow loading pickled object arrays stored in npy files. Reasons for disallowing pickles include security, as loading pickled data can execute arbitrary code. If pickles are disallowed, loading object arrays will fail. Default: True fix_imports : bool, optional Only useful when loading Python 2 generated pickled files on Python 3, which includes npy/npz files containing object arrays. If `fix_imports` is True, pickle will try to map the old Python 2 names to the new names used in Python 3. encoding : str, optional What encoding to use when reading Python 2 strings. Only useful when loading Python 2 generated pickled files on Python 3, which includes npy/npz files containing object arrays. Values other than 'latin1', 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical data. Default: 'ASCII' Returns ------- result : array, tuple, dict, etc. Data stored in the file. For ``.npz`` files, the returned instance of NpzFile class must be closed to avoid leaking file descriptors. Raises ------ IOError If the input file does not exist or cannot be read. ValueError The file contains an object array, but allow_pickle=False given. See Also -------- save, savez, savez_compressed, loadtxt memmap : Create a memory-map to an array stored in a file on disk. Notes ----- - If the file contains pickle data, then whatever object is stored in the pickle is returned. - If the file is a ``.npy`` file, then a single array is returned. - If the file is a ``.npz`` file, then a dictionary-like object is returned, containing ``{filename: array}`` key-value pairs, one for each file in the archive. - If the file is a ``.npz`` file, the returned value supports the context manager protocol in a similar fashion to the open function:: with load('foo.npz') as data: a = data['a'] The underlying file descriptor is closed when exiting the 'with' block. Examples -------- Store data to disk, and load it again: >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) >>> np.load('/tmp/123.npy') array([[1, 2, 3], [4, 5, 6]]) Store compressed data to disk, and load it again: >>> a=np.array([[1, 2, 3], [4, 5, 6]]) >>> b=np.array([1, 2]) >>> np.savez('/tmp/123.npz', a=a, b=b) >>> data = np.load('/tmp/123.npz') >>> data['a'] array([[1, 2, 3], [4, 5, 6]]) >>> data['b'] array([1, 2]) >>> data.close() Mem-map the stored array, and then access the second row directly from disk: >>> X = np.load('/tmp/123.npy', mmap_mode='r') >>> X[1, :] memmap([4, 5, 6]) """ import gzip own_fid = False if isinstance(file, basestring): fid = open(file, "rb") own_fid = True else: fid = file if encoding not in ('ASCII', 'latin1', 'bytes'): # The 'encoding' value for pickle also affects what encoding # the serialized binary data of Numpy arrays is loaded # in. Pickle does not pass on the encoding information to # Numpy. The unpickling code in numpy.core.multiarray is # written to assume that unicode data appearing where binary # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. # # Other encoding values can corrupt binary data, and we # purposefully disallow them. For the same reason, the errors= # argument is not exposed, as values other than 'strict' # result can similarly silently corrupt numerical data. raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") if sys.version_info[0] >= 3: pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = {} try: # Code to distinguish from NumPy binary files and pickles. _ZIP_PREFIX = asbytes('PK\x03\x04') N = len(format.MAGIC_PREFIX) magic = fid.read(N) fid.seek(-N, 1) # back-up if magic.startswith(_ZIP_PREFIX): # zip-file (assume .npz) # Transfer file ownership to NpzFile tmp = own_fid own_fid = False return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) elif magic == format.MAGIC_PREFIX: # .npy file if mmap_mode: return format.open_memmap(file, mode=mmap_mode) else: return format.read_array(fid, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) else: # Try a pickle if not allow_pickle: raise ValueError("allow_pickle=False, but file does not contain " "non-pickled data") try: return pickle.load(fid, **pickle_kwargs) except: raise IOError( "Failed to interpret file %s as a pickle" % repr(file)) finally: if own_fid: fid.close() def save(file, arr, allow_pickle=True, fix_imports=True): """ Save an array to a binary file in NumPy ``.npy`` format. Parameters ---------- file : file or str File or filename to which the data is saved. If file is a file-object, then the filename is unchanged. If file is a string, a ``.npy`` extension will be appended to the file name if it does not already have one. allow_pickle : bool, optional Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is compatible between Python 2 and Python 3). Default: True fix_imports : bool, optional Only useful in forcing objects in object arrays on Python 3 to be pickled in a Python 2 compatible way. If `fix_imports` is True, pickle will try to map the new Python 3 names to the old module names used in Python 2, so that the pickle data stream is readable with Python 2. arr : array_like Array data to be saved. See Also -------- savez : Save several arrays into a ``.npz`` archive savetxt, load Notes ----- For a description of the ``.npy`` format, see the module docstring of `numpy.lib.format` or the Numpy Enhancement Proposal http://docs.scipy.org/doc/numpy/neps/npy-format.html Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> np.save(outfile, x) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ own_fid = False if isinstance(file, basestring): if not file.endswith('.npy'): file = file + '.npy' fid = open(file, "wb") own_fid = True else: fid = file if sys.version_info[0] >= 3: pickle_kwargs = dict(fix_imports=fix_imports) else: # Nothing to do on Python 2 pickle_kwargs = None try: arr = np.asanyarray(arr) format.write_array(fid, arr, allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) finally: if own_fid: fid.close() def savez(file, *args, **kwds): """ Save several arrays into a single file in uncompressed ``.npz`` format. If arguments are passed in with no keywords, the corresponding variable names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword arguments are given, the corresponding variable names, in the ``.npz`` file will match the keyword names. Parameters ---------- file : str or file Either the file name (string) or an open file (file-like object) where the data will be saved. If file is a string, the ``.npz`` extension will be appended to the file name if it is not already there. args : Arguments, optional Arrays to save to the file. Since it is not possible for Python to know the names of the arrays outside `savez`, the arrays will be saved with names "arr_0", "arr_1", and so on. These arguments can be any expression. kwds : Keyword arguments, optional Arrays to save to the file. Arrays will be saved in the file with the keyword names. Returns ------- None See Also -------- save : Save a single array to a binary file in NumPy format. savetxt : Save an array to a file as plain text. savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- The ``.npz`` file format is a zipped archive of files named after the variables they contain. The archive is not compressed and each file in the archive contains one variable in ``.npy`` format. For a description of the ``.npy`` format, see `numpy.lib.format` or the Numpy Enhancement Proposal http://docs.scipy.org/doc/numpy/neps/npy-format.html When opening the saved ``.npz`` file with `load` a `NpzFile` object is returned. This is a dictionary-like object which can be queried for its list of arrays (with the ``.files`` attribute), and for the arrays themselves. Examples -------- >>> from tempfile import TemporaryFile >>> outfile = TemporaryFile() >>> x = np.arange(10) >>> y = np.sin(x) Using `savez` with \\*args, the arrays are saved with default names. >>> np.savez(outfile, x, y) >>> outfile.seek(0) # Only needed here to simulate closing & reopening file >>> npzfile = np.load(outfile) >>> npzfile.files ['arr_1', 'arr_0'] >>> npzfile['arr_0'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) Using `savez` with \\**kwds, the arrays are saved with the keyword names. >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) >>> outfile.seek(0) >>> npzfile = np.load(outfile) >>> npzfile.files ['y', 'x'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) """ _savez(file, args, kwds, False) def savez_compressed(file, *args, **kwds): """ Save several arrays into a single file in compressed ``.npz`` format. If keyword arguments are given, then filenames are taken from the keywords. If arguments are passed in with no keywords, then stored file names are arr_0, arr_1, etc. Parameters ---------- file : str File name of ``.npz`` file. args : Arguments Function arguments. kwds : Keyword arguments Keywords. See Also -------- numpy.savez : Save several arrays into an uncompressed ``.npz`` file format numpy.load : Load the files created by savez_compressed. """ _savez(file, args, kwds, True) def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): # Import is postponed to here since zipfile depends on gzip, an optional # component of the so-called standard library. import zipfile # Import deferred for startup time improvement import tempfile if isinstance(file, basestring): if not file.endswith('.npz'): file = file + '.npz' namedict = kwds for i, val in enumerate(args): key = 'arr_%d' % i if key in namedict.keys(): raise ValueError( "Cannot use un-named variables and keyword %s" % key) namedict[key] = val if compress: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED zipf = zipfile_factory(file, mode="w", compression=compression) # Stage arrays in a temporary file on disk, before writing to zip. fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') os.close(fd) try: for key, val in namedict.items(): fname = key + '.npy' fid = open(tmpfile, 'wb') try: format.write_array(fid, np.asanyarray(val), allow_pickle=allow_pickle, pickle_kwargs=pickle_kwargs) fid.close() fid = None zipf.write(tmpfile, arcname=fname) finally: if fid: fid.close() finally: os.remove(tmpfile) zipf.close() def _getconv(dtype): """ Find the correct dtype converter. Adapted from matplotlib """ def floatconv(x): x.lower() if b'0x' in x: return float.fromhex(asstr(x)) return float(x) typ = dtype.type if issubclass(typ, np.bool_): return lambda x: bool(int(x)) if issubclass(typ, np.uint64): return np.uint64 if issubclass(typ, np.int64): return np.int64 if issubclass(typ, np.integer): return lambda x: int(float(x)) elif issubclass(typ, np.floating): return floatconv elif issubclass(typ, np.complex): return lambda x: complex(asstr(x)) elif issubclass(typ, np.bytes_): return bytes else: return str def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None, skiprows=0, usecols=None, unpack=False, ndmin=0): """ Load data from a text file. Each row in the text file must have the same number of values. Parameters ---------- fname : file or str File, filename, or generator to read. If the filename extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note that generators should return byte strings for Python 3k. dtype : data-type, optional Data-type of the resulting array; default: float. If this is a structured data-type, the resulting array will be 1-dimensional, and each row will be interpreted as an element of the array. In this case, the number of columns used must match the number of fields in the data-type. comments : str or sequence, optional The characters or list of characters used to indicate the start of a comment; default: '#'. delimiter : str, optional The string used to separate values. By default, this is any whitespace. converters : dict, optional A dictionary mapping column number to a function that will convert that column to a float. E.g., if column 0 is a date string: ``converters = {0: datestr2num}``. Converters can also be used to provide a default value for missing data (but see also `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional Skip the first `skiprows` lines; default: 0. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. The default, None, results in all columns being read. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)``. When used with a structured data-type, arrays are returned for each field. Default is False. ndmin : int, optional The returned array will have at least `ndmin` dimensions. Otherwise mono-dimensional axes will be squeezed. Legal values: 0 (default), 1 or 2. .. versionadded:: 1.6.0 Returns ------- out : ndarray Data read from the text file. See Also -------- load, fromstring, fromregex genfromtxt : Load data with missing values handled as specified. scipy.io.loadmat : reads MATLAB data files Notes ----- This function aims to be a fast reader for simply formatted files. The `genfromtxt` function provides more sophisticated handling of, e.g., lines with missing values. .. versionadded:: 1.10.0 The strings produced by the Python float.hex method can be used as input for floats. Examples -------- >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO("0 1\\n2 3") >>> np.loadtxt(c) array([[ 0., 1.], [ 2., 3.]]) >>> d = StringIO("M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) array([('M', 21, 72.0), ('F', 35, 58.0)], dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) >>> c = StringIO("1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x array([ 1., 3.]) >>> y array([ 2., 4.]) """ # Type conversions for Py3 convenience if comments is not None: if isinstance(comments, (basestring, bytes)): comments = [asbytes(comments)] else: comments = [asbytes(comment) for comment in comments] # Compile regex for comments beforehand comments = (re.escape(comment) for comment in comments) regex_comments = re.compile(asbytes('|').join(comments)) user_converters = converters if delimiter is not None: delimiter = asbytes(delimiter) if usecols is not None: usecols = list(usecols) fown = False try: if _is_string_like(fname): fown = True if fname.endswith('.gz'): import gzip fh = iter(gzip.GzipFile(fname)) elif fname.endswith('.bz2'): import bz2 fh = iter(bz2.BZ2File(fname)) elif sys.version_info[0] == 2: fh = iter(open(fname, 'U')) else: fh = iter(open(fname)) else: fh = iter(fname) except TypeError: raise ValueError('fname must be a string, file handle, or generator') X = [] def flatten_dtype(dt): """Unpack a structured data-type, and produce re-packing info.""" if dt.names is None: # If the dtype is flattened, return. # If the dtype has a shape, the dtype occurs # in the list more than once. shape = dt.shape if len(shape) == 0: return ([dt.base], None) else: packing = [(shape[-1], list)] if len(shape) > 1: for dim in dt.shape[-2::-1]: packing = [(dim*packing[0][0], packing*dim)] return ([dt.base] * int(np.prod(dt.shape)), packing) else: types = [] packing = [] for field in dt.names: tp, bytes = dt.fields[field] flat_dt, flat_packing = flatten_dtype(tp) types.extend(flat_dt) # Avoid extra nesting for subarrays if len(tp.shape) > 0: packing.extend(flat_packing) else: packing.append((len(flat_dt), flat_packing)) return (types, packing) def pack_items(items, packing): """Pack items into nested lists based on re-packing info.""" if packing is None: return items[0] elif packing is tuple: return tuple(items) elif packing is list: return list(items) else: start = 0 ret = [] for length, subpacking in packing: ret.append(pack_items(items[start:start+length], subpacking)) start += length return tuple(ret) def split_line(line): """Chop off comments, strip, and split at delimiter. Note that although the file is opened as text, this function returns bytes. """ line = asbytes(line) if comments is not None: line = regex_comments.split(asbytes(line), maxsplit=1)[0] line = line.strip(asbytes('\r\n')) if line: return line.split(delimiter) else: return [] try: # Make sure we're dealing with a proper dtype dtype = np.dtype(dtype) defconv = _getconv(dtype) # Skip the first `skiprows` lines for i in range(skiprows): next(fh) # Read until we find a line with some values, and use # it to estimate the number of columns, N. first_vals = None try: while not first_vals: first_line = next(fh) first_vals = split_line(first_line) except StopIteration: # End of lines reached first_line = '' first_vals = [] warnings.warn('loadtxt: Empty input file: "%s"' % fname) N = len(usecols or first_vals) dtype_types, packing = flatten_dtype(dtype) if len(dtype_types) > 1: # We're dealing with a structured array, each field of # the dtype matches a column converters = [_getconv(dt) for dt in dtype_types] else: # All fields have the same dtype converters = [defconv for i in range(N)] if N > 1: packing = [(N, tuple)] # By preference, use the converters specified by the user for i, conv in (user_converters or {}).items(): if usecols: try: i = usecols.index(i) except ValueError: # Unused converter specified continue converters[i] = conv # Parse each line, including the first for i, line in enumerate(itertools.chain([first_line], fh)): vals = split_line(line) if len(vals) == 0: continue if usecols: vals = [vals[i] for i in usecols] if len(vals) != N: line_num = i + skiprows + 1 raise ValueError("Wrong number of columns at line %d" % line_num) # Convert each value according to its column and store items = [conv(val) for (conv, val) in zip(converters, vals)] # Then pack it according to the dtype's nesting items = pack_items(items, packing) X.append(items) finally: if fown: fh.close() X = np.array(X, dtype) # Multicolumn data are returned with shape (1, N, M), i.e. # (1, 1, M) for a single row - remove the singleton dimension there if X.ndim == 3 and X.shape[:2] == (1, 1): X.shape = (1, -1) # Verify that the array has at least dimensions `ndmin`. # Check correctness of the values of `ndmin` if ndmin not in [0, 1, 2]: raise ValueError('Illegal value of ndmin keyword: %s' % ndmin) # Tweak the size and shape of the arrays - remove extraneous dimensions if X.ndim > ndmin: X = np.squeeze(X) # and ensure we have the minimum number of dimensions asked for # - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0 if X.ndim < ndmin: if ndmin == 1: X = np.atleast_1d(X) elif ndmin == 2: X = np.atleast_2d(X).T if unpack: if len(dtype_types) > 1: # For structured arrays, return an array for each field. return [X[field] for field in dtype.names] else: return X.T else: return X def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', footer='', comments='# '): """ Save an array to a text file. Parameters ---------- fname : filename or file handle If the filename ends in ``.gz``, the file is automatically saved in compressed gzip format. `loadtxt` understands gzipped files transparently. X : array_like Data to be saved to a text file. fmt : str or sequence of strs, optional A single format (%10.5f), a sequence of formats, or a multi-format string, e.g. 'Iteration %d -- %10.5f', in which case `delimiter` is ignored. For complex `X`, the legal options for `fmt` are: a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted like `' (%s+%sj)' % (fmt, fmt)` b) a full string specifying every real and imaginary part, e.g. `' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns c) a list of specifiers, one per column - in this case, the real and imaginary part must have separate specifiers, e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns delimiter : str, optional String or character separating columns. newline : str, optional String or character separating lines. .. versionadded:: 1.5.0 header : str, optional String that will be written at the beginning of the file. .. versionadded:: 1.7.0 footer : str, optional String that will be written at the end of the file. .. versionadded:: 1.7.0 comments : str, optional String that will be prepended to the ``header`` and ``footer`` strings, to mark them as comments. Default: '# ', as expected by e.g. ``numpy.loadtxt``. .. versionadded:: 1.7.0 See Also -------- save : Save an array to a binary file in NumPy ``.npy`` format savez : Save several arrays into an uncompressed ``.npz`` archive savez_compressed : Save several arrays into a compressed ``.npz`` archive Notes ----- Further explanation of the `fmt` parameter (``%[flag]width[.precision]specifier``): flags: ``-`` : left justify ``+`` : Forces to precede result with + or -. ``0`` : Left pad the number with zeros instead of space (see width). width: Minimum number of characters to be printed. The value is not truncated if it has more characters. precision: - For integer specifiers (eg. ``d,i,o,x``), the minimum number of digits. - For ``e, E`` and ``f`` specifiers, the number of digits to print after the decimal point. - For ``g`` and ``G``, the maximum number of significant digits. - For ``s``, the maximum number of characters. specifiers: ``c`` : character ``d`` or ``i`` : signed decimal integer ``e`` or ``E`` : scientific notation with ``e`` or ``E``. ``f`` : decimal floating point ``g,G`` : use the shorter of ``e,E`` or ``f`` ``o`` : signed octal ``s`` : string of characters ``u`` : unsigned decimal integer ``x,X`` : unsigned hexadecimal integer This explanation of ``fmt`` is not complete, for an exhaustive specification see [1]_. References ---------- .. [1] `Format Specification Mini-Language <http://docs.python.org/library/string.html# format-specification-mini-language>`_, Python Documentation. Examples -------- >>> x = y = z = np.arange(0.0,5.0,1.0) >>> np.savetxt('test.out', x, delimiter=',') # X is an array >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation """ # Py3 conversions first if isinstance(fmt, bytes): fmt = asstr(fmt) delimiter = asstr(delimiter) own_fh = False if _is_string_like(fname): own_fh = True if fname.endswith('.gz'): import gzip fh = gzip.open(fname, 'wb') else: if sys.version_info[0] >= 3: fh = open(fname, 'wb') else: fh = open(fname, 'w') elif hasattr(fname, 'write'): fh = fname else: raise ValueError('fname must be a string or file handle') try: X = np.asarray(X) # Handle 1-dimensional arrays if X.ndim == 1: # Common case -- 1d array of numbers if X.dtype.names is None: X = np.atleast_2d(X).T ncol = 1 # Complex dtype -- each field indicates a separate column else: ncol = len(X.dtype.descr) else: ncol = X.shape[1] iscomplex_X = np.iscomplexobj(X) # `fmt` can be a string with multiple insertion points or a # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') if type(fmt) in (list, tuple): if len(fmt) != ncol: raise AttributeError('fmt has wrong shape. %s' % str(fmt)) format = asstr(delimiter).join(map(asstr, fmt)) elif isinstance(fmt, str): n_fmt_chars = fmt.count('%') error = ValueError('fmt has wrong number of %% formats: %s' % fmt) if n_fmt_chars == 1: if iscomplex_X: fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol else: fmt = [fmt, ] * ncol format = delimiter.join(fmt) elif iscomplex_X and n_fmt_chars != (2 * ncol): raise error elif ((not iscomplex_X) and n_fmt_chars != ncol): raise error else: format = fmt else: raise ValueError('invalid fmt: %r' % (fmt,)) if len(header) > 0: header = header.replace('\n', '\n' + comments) fh.write(asbytes(comments + header + newline)) if iscomplex_X: for row in X: row2 = [] for number in row: row2.append(number.real) row2.append(number.imag) fh.write(asbytes(format % tuple(row2) + newline)) else: for row in X: try: fh.write(asbytes(format % tuple(row) + newline)) except TypeError: raise TypeError("Mismatch between array dtype ('%s') and " "format specifier ('%s')" % (str(X.dtype), format)) if len(footer) > 0: footer = footer.replace('\n', '\n' + comments) fh.write(asbytes(comments + footer + newline)) finally: if own_fh: fh.close() def fromregex(file, regexp, dtype): """ Construct an array from a text file, using regular expression parsing. The returned array is always a structured array, and is constructed from all matches of the regular expression in the file. Groups in the regular expression are converted to fields of the structured array. Parameters ---------- file : str or file File name or file object to read. regexp : str or regexp Regular expression used to parse the file. Groups in the regular expression correspond to fields in the dtype. dtype : dtype or list of dtypes Dtype for the structured array. Returns ------- output : ndarray The output array, containing the part of the content of `file` that was matched by `regexp`. `output` is always a structured array. Raises ------ TypeError When `dtype` is not a valid dtype for a structured array. See Also -------- fromstring, loadtxt Notes ----- Dtypes for structured arrays can be specified in several forms, but all forms specify at least the data type and field name. For details see `doc.structured_arrays`. Examples -------- >>> f = open('test.dat', 'w') >>> f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] >>> output = np.fromregex('test.dat', regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], dtype=[('num', '<i8'), ('key', '|S3')]) >>> output['num'] array([1312, 1534, 444], dtype=int64) """ own_fh = False if not hasattr(file, "read"): file = open(file, 'rb') own_fh = True try: if not hasattr(regexp, 'match'): regexp = re.compile(asbytes(regexp)) if not isinstance(dtype, np.dtype): dtype = np.dtype(dtype) seq = regexp.findall(file.read()) if seq and not isinstance(seq[0], tuple): # Only one group is in the regexp. # Create the new array as a single data-type and then # re-interpret as a single-field structured array. newdtype = np.dtype(dtype[dtype.names[0]]) output = np.array(seq, dtype=newdtype) output.dtype = dtype else: output = np.array(seq, dtype=dtype) return output finally: if own_fh: file.close() #####-------------------------------------------------------------------------- #---- --- ASCII functions --- #####-------------------------------------------------------------------------- def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skip_header=0, skip_footer=0, converters=None, missing_values=None, filling_values=None, usecols=None, names=None, excludelist=None, deletechars=None, replace_space='_', autostrip=False, case_sensitive=True, defaultfmt="f%i", unpack=None, usemask=False, loose=True, invalid_raise=True, max_rows=None): """ Load data from a text file, with missing values handled as specified. Each line past the first `skip_header` lines is split at the `delimiter` character, and characters following the `comments` character are discarded. Parameters ---------- fname : file or str File, filename, or generator to read. If the filename extension is `.gz` or `.bz2`, the file is first decompressed. Note that generators must return byte strings in Python 3k. dtype : dtype, optional Data type of the resulting array. If None, the dtypes will be determined by the contents of each column, individually. comments : str, optional The character used to indicate the start of a comment. All the characters occurring on a line after a comment are discarded delimiter : str, int, or sequence, optional The string used to separate values. By default, any consecutive whitespaces act as delimiter. An integer or sequence of integers can also be provided as width(s) of each field. skiprows : int, optional `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. skip_header : int, optional The number of lines to skip at the beginning of the file. skip_footer : int, optional The number of lines to skip at the end of the file. converters : variable, optional The set of functions that convert the data of a column to a value. The converters can also be used to provide a default value for missing data: ``converters = {3: lambda s: float(s or 0)}``. missing : variable, optional `missing` was removed in numpy 1.10. Please use `missing_values` instead. missing_values : variable, optional The set of strings corresponding to missing data. filling_values : variable, optional The set of values to be used as default when the data are missing. usecols : sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. names : {None, True, str, sequence}, optional If `names` is True, the field names are read from the first valid line after the first `skip_header` lines. If `names` is a sequence or a single-string of comma-separated names, the names will be used to define the field names in a structured dtype. If `names` is None, the names of the dtype fields will be used, if any. excludelist : sequence, optional A list of names to exclude. This list is appended to the default list ['return','file','print']. Excluded names are appended an underscore: for example, `file` would become `file_`. deletechars : str, optional A string combining invalid characters that must be deleted from the names. defaultfmt : str, optional A format used to define default field names, such as "f%i" or "f_%02i". autostrip : bool, optional Whether to automatically strip white spaces from the variables. replace_space : char, optional Character(s) used in replacement of white spaces in the variables names. By default, use a '_'. case_sensitive : {True, False, 'upper', 'lower'}, optional If True, field names are case sensitive. If False or 'upper', field names are converted to upper case. If 'lower', field names are converted to lower case. unpack : bool, optional If True, the returned array is transposed, so that arguments may be unpacked using ``x, y, z = loadtxt(...)`` usemask : bool, optional If True, return a masked array. If False, return a regular array. loose : bool, optional If True, do not raise errors for invalid values. invalid_raise : bool, optional If True, an exception is raised if an inconsistency is detected in the number of columns. If False, a warning is emitted and the offending lines are skipped. max_rows : int, optional The maximum number of rows to read. Must not be used with skip_footer at the same time. If given, the value must be at least 1. Default is to read the entire file. .. versionadded:: 1.10.0 Returns ------- out : ndarray Data read from the text file. If `usemask` is True, this is a masked array. See Also -------- numpy.loadtxt : equivalent function when no data is missing. Notes ----- * When spaces are used as delimiters, or when no delimiter has been given as input, there should not be any missing data between two fields. * When the variables are named (either by a flexible dtype or with `names`, there must not be any header in the file (else a ValueError exception is raised). * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. References ---------- .. [1] Numpy User Guide, section `I/O with Numpy <http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. Examples --------- >>> from io import StringIO >>> import numpy as np Comma delimited file with mixed dtype >>> s = StringIO("1,1.3,abcde") >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Using dtype = None >>> s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) Specifying dtype and names >>> s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data array((1, 1.3, 'abcde'), dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) An example with fixed-width columns >>> s = StringIO("11.3abcde") >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data array((1, 1.3, 'abcde'), dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')]) """ if max_rows is not None: if skip_footer: raise ValueError( "The keywords 'skip_footer' and 'max_rows' can not be " "specified at the same time.") if max_rows < 1: raise ValueError("'max_rows' must be at least 1.") # Py3 data conversions to bytes, for convenience if comments is not None: comments = asbytes(comments) if isinstance(delimiter, unicode): delimiter = asbytes(delimiter) if isinstance(missing_values, (unicode, list, tuple)): missing_values = asbytes_nested(missing_values) # if usemask: from numpy.ma import MaskedArray, make_mask_descr # Check the input dictionary of converters user_converters = converters or {} if not isinstance(user_converters, dict): raise TypeError( "The input argument 'converter' should be a valid dictionary " "(got '%s' instead)" % type(user_converters)) # Initialize the filehandle, the LineSplitter and the NameValidator own_fhd = False try: if isinstance(fname, basestring): if sys.version_info[0] == 2: fhd = iter(np.lib._datasource.open(fname, 'rbU')) else: fhd = iter(np.lib._datasource.open(fname, 'rb')) own_fhd = True else: fhd = iter(fname) except TypeError: raise TypeError( "fname must be a string, filehandle, or generator. " "(got %s instead)" % type(fname)) split_line = LineSplitter(delimiter=delimiter, comments=comments, autostrip=autostrip)._handyman validate_names = NameValidator(excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Skip the first `skip_header` rows for i in range(skip_header): next(fhd) # Keep on until we find the first valid values first_values = None try: while not first_values: first_line = next(fhd) if names is True: if comments in first_line: first_line = ( asbytes('').join(first_line.split(comments)[1:])) first_values = split_line(first_line) except StopIteration: # return an empty array if the datafile is empty first_line = asbytes('') first_values = [] warnings.warn('genfromtxt: Empty input file: "%s"' % fname) # Should we take the first values as names ? if names is True: fval = first_values[0].strip() if fval in comments: del first_values[0] # Check the columns to use: make sure `usecols` is a list if usecols is not None: try: usecols = [_.strip() for _ in usecols.split(",")] except AttributeError: try: usecols = list(usecols) except TypeError: usecols = [usecols, ] nbcols = len(usecols or first_values) # Check the names and overwrite the dtype.names if needed if names is True: names = validate_names([_bytes_to_name(_.strip()) for _ in first_values]) first_line = asbytes('') elif _is_string_like(names): names = validate_names([_.strip() for _ in names.split(',')]) elif names: names = validate_names(names) # Get the dtype if dtype is not None: dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, excludelist=excludelist, deletechars=deletechars, case_sensitive=case_sensitive, replace_space=replace_space) # Make sure the names is a list (for 2.5) if names is not None: names = list(names) if usecols: for (i, current) in enumerate(usecols): # if usecols is a list of names, convert to a list of indices if _is_string_like(current): usecols[i] = names.index(current) elif current < 0: usecols[i] = current + len(first_values) # If the dtype is not None, make sure we update it if (dtype is not None) and (len(dtype) > nbcols): descr = dtype.descr dtype = np.dtype([descr[_] for _ in usecols]) names = list(dtype.names) # If `names` is not None, update the names elif (names is not None) and (len(names) > nbcols): names = [names[_] for _ in usecols] elif (names is not None) and (dtype is not None): names = list(dtype.names) # Process the missing values ............................... # Rename missing_values for convenience user_missing_values = missing_values or () # Define the list of missing_values (one column: one list) missing_values = [list([asbytes('')]) for _ in range(nbcols)] # We have a dictionary: process it field by field if isinstance(user_missing_values, dict): # Loop on the items for (key, val) in user_missing_values.items(): # Is the key a string ? if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped continue # Redefine the key as needed if it's a column number if usecols: try: key = usecols.index(key) except ValueError: pass # Transform the value as a list of string if isinstance(val, (list, tuple)): val = [str(_) for _ in val] else: val = [str(val), ] # Add the value(s) to the current list of missing if key is None: # None acts as default for miss in missing_values: miss.extend(val) else: missing_values[key].extend(val) # We have a sequence : each item matches a column elif isinstance(user_missing_values, (list, tuple)): for (value, entry) in zip(user_missing_values, missing_values): value = str(value) if value not in entry: entry.append(value) # We have a string : apply it to all entries elif isinstance(user_missing_values, bytes): user_value = user_missing_values.split(asbytes(",")) for entry in missing_values: entry.extend(user_value) # We have something else: apply it to all entries else: for entry in missing_values: entry.extend([str(user_missing_values)]) # Process the filling_values ............................... # Rename the input for convenience user_filling_values = filling_values if user_filling_values is None: user_filling_values = [] # Define the default filling_values = [None] * nbcols # We have a dictionary : update each entry individually if isinstance(user_filling_values, dict): for (key, val) in user_filling_values.items(): if _is_string_like(key): try: # Transform it into an integer key = names.index(key) except ValueError: # We couldn't find it: the name must have been dropped, continue # Redefine the key if it's a column number and usecols is defined if usecols: try: key = usecols.index(key) except ValueError: pass # Add the value to the list filling_values[key] = val # We have a sequence : update on a one-to-one basis elif isinstance(user_filling_values, (list, tuple)): n = len(user_filling_values) if (n <= nbcols): filling_values[:n] = user_filling_values else: filling_values = user_filling_values[:nbcols] # We have something else : use it for all entries else: filling_values = [user_filling_values] * nbcols # Initialize the converters ................................ if dtype is None: # Note: we can't use a [...]*nbcols, as we would have 3 times the same # ... converter, instead of 3 different converters. converters = [StringConverter(None, missing_values=miss, default=fill) for (miss, fill) in zip(missing_values, filling_values)] else: dtype_flat = flatten_dtype(dtype, flatten_base=True) # Initialize the converters if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) converters = [StringConverter(dt, locked=True, missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) converters = [StringConverter(dtype, locked=True, missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): # If the converter is specified by column names, use the index instead if _is_string_like(j): try: j = names.index(j) i = j except ValueError: continue elif usecols: try: i = usecols.index(j) except ValueError: # Unused converter specified continue else: i = j # Find the value to test - first_line is not filtered by usecols: if len(first_line): testing_value = first_values[j] else: testing_value = None converters[i].update(conv, locked=True, testing_value=testing_value, default=filling_values[i], missing_values=missing_values[i],) uc_update.append((i, conv)) # Make sure we have the corrected keys in user_converters... user_converters.update(uc_update) # Fixme: possible error as following variable never used. #miss_chars = [_.missing_values for _ in converters] # Initialize the output lists ... # ... rows rows = [] append_to_rows = rows.append # ... masks if usemask: masks = [] append_to_masks = masks.append # ... invalid invalid = [] append_to_invalid = invalid.append # Parse each line for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): values = split_line(line) nbvalues = len(values) # Skip an empty line if nbvalues == 0: continue if usecols: # Select only the columns we need try: values = [values[_] for _ in usecols] except IndexError: append_to_invalid((i + skip_header + 1, nbvalues)) continue elif nbvalues != nbcols: append_to_invalid((i + skip_header + 1, nbvalues)) continue # Store the values append_to_rows(tuple(values)) if usemask: append_to_masks(tuple([v.strip() in m for (v, m) in zip(values, missing_values)])) if len(rows) == max_rows: break if own_fhd: fhd.close() # Upgrade the converters (if needed) if dtype is None: for (i, converter) in enumerate(converters): current_column = [itemgetter(i)(_m) for _m in rows] try: converter.iterupgrade(current_column) except ConverterLockError: errmsg = "Converter #%i is locked and cannot be upgraded: " % i current_column = map(itemgetter(i), rows) for (j, value) in enumerate(current_column): try: converter.upgrade(value) except (ConverterError, ValueError): errmsg += "(occurred line #%i for value '%s')" errmsg %= (j + 1 + skip_header, value) raise ConverterError(errmsg) # Check that we don't have invalid values nbinvalid = len(invalid) if nbinvalid > 0: nbrows = len(rows) + nbinvalid - skip_footer # Construct the error message template = " Line #%%i (got %%i columns instead of %i)" % nbcols if skip_footer > 0: nbinvalid_skipped = len([_ for _ in invalid if _[0] > nbrows + skip_header]) invalid = invalid[:nbinvalid - nbinvalid_skipped] skip_footer -= nbinvalid_skipped # # nbrows -= skip_footer # errmsg = [template % (i, nb) # for (i, nb) in invalid if i < nbrows] # else: errmsg = [template % (i, nb) for (i, nb) in invalid] if len(errmsg): errmsg.insert(0, "Some errors were detected !") errmsg = "\n".join(errmsg) # Raise an exception ? if invalid_raise: raise ValueError(errmsg) # Issue a warning ? else: warnings.warn(errmsg, ConversionWarning) # Strip the last skip_footer data if skip_footer > 0: rows = rows[:-skip_footer] if usemask: masks = masks[:-skip_footer] # Convert each value according to the converter: # We want to modify the list in place to avoid creating a new one... if loose: rows = list( zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) else: rows = list( zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] for (i, conv) in enumerate(converters)])) # Reset the dtype data = rows if dtype is None: # Get the dtypes from the types of the converters column_types = [conv.type for conv in converters] # Find the columns with strings... strcolidx = [i for (i, v) in enumerate(column_types) if v in (type('S'), np.string_)] # ... and take the largest number of chars. for i in strcolidx: column_types[i] = "|S%i" % max(len(row[i]) for row in data) # if names is None: # If the dtype is uniform, don't define names, else use '' base = set([c.type for c in converters if c._checked]) if len(base) == 1: (ddtype, mdtype) = (list(base)[0], np.bool) else: ddtype = [(defaultfmt % i, dt) for (i, dt) in enumerate(column_types)] if usemask: mdtype = [(defaultfmt % i, np.bool) for (i, dt) in enumerate(column_types)] else: ddtype = list(zip(names, column_types)) mdtype = list(zip(names, [np.bool] * len(column_types))) output = np.array(data, dtype=ddtype) if usemask: outputmask = np.array(masks, dtype=mdtype) else: # Overwrite the initial dtype names if needed if names and dtype.names: dtype.names = names # Case 1. We have a structured type if len(dtype_flat) > 1: # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] # First, create the array using a flattened dtype: # [('a', int), ('b1', int), ('b2', float)] # Then, view the array using the specified dtype. if 'O' in (_.char for _ in dtype_flat): if has_nested_fields(dtype): raise NotImplementedError( "Nested fields involving objects are not supported...") else: output = np.array(data, dtype=dtype) else: rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) output = rows.view(dtype) # Now, process the rowmasks the same way if usemask: rowmasks = np.array( masks, dtype=np.dtype([('', np.bool) for t in dtype_flat])) # Construct the new dtype mdtype = make_mask_descr(dtype) outputmask = rowmasks.view(mdtype) # Case #2. We have a basic dtype else: # We used some user-defined converters if user_converters: ishomogeneous = True descr = [] for i, ttype in enumerate([conv.type for conv in converters]): # Keep the dtype of the current converter if i in user_converters: ishomogeneous &= (ttype == dtype.type) if ttype == np.string_: ttype = "|S%i" % max(len(row[i]) for row in data) descr.append(('', ttype)) else: descr.append(('', dtype)) # So we changed the dtype ? if not ishomogeneous: # We have more than one field if len(descr) > 1: dtype = np.dtype(descr) # We have only one field: drop the name if not needed. else: dtype = np.dtype(ttype) # output = np.array(data, dtype) if usemask: if dtype.names: mdtype = [(_, np.bool) for _ in dtype.names] else: mdtype = np.bool outputmask = np.array(masks, dtype=mdtype) # Try to take care of the missing data we missed names = output.dtype.names if usemask and names: for (name, conv) in zip(names or (), converters): missing_values = [conv(_) for _ in conv.missing_values if _ != asbytes('')] for mval in missing_values: outputmask[name] |= (output[name] == mval) # Construct the final array if usemask: output = output.view(MaskedArray) output._mask = outputmask if unpack: return output.squeeze().T return output.squeeze() def ndfromtxt(fname, **kwargs): """ Load ASCII data stored in a file and return it as a single array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function. """ kwargs['usemask'] = False return genfromtxt(fname, **kwargs) def mafromtxt(fname, **kwargs): """ Load ASCII data stored in a text file and return a masked array. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. """ kwargs['usemask'] = True return genfromtxt(fname, **kwargs) def recfromtxt(fname, **kwargs): """ Load ASCII data from a file and return it in a record array. If ``usemask=False`` a standard `recarray` is returned, if ``usemask=True`` a MaskedRecords array is returned. Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ kwargs.setdefault("dtype", None) usemask = kwargs.get('usemask', False) output = genfromtxt(fname, **kwargs) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output def recfromcsv(fname, **kwargs): """ Load ASCII data stored in a comma-separated file. The returned array is a record array (if ``usemask=False``, see `recarray`) or a masked record array (if ``usemask=True``, see `ma.mrecords.MaskedRecords`). Parameters ---------- fname, kwargs : For a description of input parameters, see `genfromtxt`. See Also -------- numpy.genfromtxt : generic function to load ASCII data. Notes ----- By default, `dtype` is None, which means that the data-type of the output array will be determined from the data. """ # Set default kwargs for genfromtxt as relevant to csv import. kwargs.setdefault("case_sensitive", "lower") kwargs.setdefault("names", True) kwargs.setdefault("delimiter", ",") kwargs.setdefault("dtype", None) output = genfromtxt(fname, **kwargs) usemask = kwargs.get("usemask", False) if usemask: from numpy.ma.mrecords import MaskedRecords output = output.view(MaskedRecords) else: output = output.view(np.recarray) return output
bsd-3-clause
fivejjs/AD3
python/example.py
3
2817
import matplotlib.pyplot as plt import numpy as np from ad3 import simple_grid, general_graph def example_binary(): # generate trivial data x = np.ones((10, 10)) x[:, 5:] = -1 x_noisy = x + np.random.normal(0, 0.8, size=x.shape) x_thresh = x_noisy > .0 # create unaries unaries = x_noisy # as we convert to int, we need to multipy to get sensible values unaries = np.dstack([-unaries, unaries]) # create potts pairwise pairwise = np.eye(2) # do simple cut result = np.argmax(simple_grid(unaries, pairwise)[0], axis=-1) # use the gerneral graph algorithm # first, we construct the grid graph inds = np.arange(x.size).reshape(x.shape) horz = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()] vert = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()] edges = np.vstack([horz, vert]) # we flatten the unaries pairwise_per_edge = np.repeat(pairwise[np.newaxis, :, :], edges.shape[0], axis=0) result_graph = np.argmax(general_graph(unaries.reshape(-1, 2), edges, pairwise_per_edge)[0], axis=-1) # plot results plt.subplot(231, title="original") plt.imshow(x, interpolation='nearest') plt.subplot(232, title="noisy version") plt.imshow(x_noisy, interpolation='nearest') plt.subplot(234, title="thresholding result") plt.imshow(x_thresh, interpolation='nearest') plt.subplot(235, title="cut_simple") plt.imshow(result, interpolation='nearest') plt.subplot(236, title="cut_from_graph") plt.imshow(result_graph.reshape(x.shape), interpolation='nearest') plt.show() def example_multinomial(): # generate dataset with three stripes np.random.seed(4) x = np.zeros((10, 12, 3)) x[:, :4, 0] = 1 x[:, 4:8, 1] = 1 x[:, 8:, 2] = 1 unaries = x + 1.5 * np.random.normal(size=x.shape) x = np.argmax(x, axis=2) unaries = unaries x_thresh = np.argmax(unaries, axis=2) # potts potential pairwise_potts = 2 * np.eye(3) result = np.argmax(simple_grid(unaries, pairwise_potts)[0], axis=-1) # potential that penalizes 0-1 and 1-2 less than 0-2 pairwise_1d = 2 * np.eye(3) + 2 pairwise_1d[-1, 0] = 0 pairwise_1d[0, -1] = 0 print(pairwise_1d) result_1d = np.argmax(simple_grid(unaries, pairwise_1d)[0], axis=-1) plt.subplot(141, title="original") plt.imshow(x, interpolation="nearest") plt.subplot(142, title="thresholded unaries") plt.imshow(x_thresh, interpolation="nearest") plt.subplot(143, title="potts potentials") plt.imshow(result, interpolation="nearest") plt.subplot(144, title="1d topology potentials") plt.imshow(result_1d, interpolation="nearest") plt.show() #example_binary() example_multinomial()
lgpl-3.0
zseder/hunmisc
hunmisc/utils/plotting/matplotlib_simple_xy.py
1
1535
""" Copyright 2011-13 Attila Zseder Email: zseder@gmail.com This file is part of hunmisc project url: https://github.com/zseder/hunmisc hunmisc is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ import sys import matplotlib.pyplot as plt from matplotlib import rc def read_data(istream): r = [[],[],[],[],[]] for l in istream: le = l.strip().split() [r[i].append(le[i]) for i in xrange(len(le))] return r def main(): d = read_data(open(sys.argv[1])) rc('font', size=14) ax = plt.subplot(111) ax.plot(d[0], d[1], label="$M$", linewidth=2) ax.plot(d[0], d[2], label="$l KL$", linewidth=2) ax.plot(d[0], d[3], label="$l (H_q+KL)$", linewidth=2) ax.plot(d[0], d[4], label="$M + l (H_q+KL)$", linewidth=2) plt.xlabel("Bits") ax.legend(loc=7) plt.show() #plt.savefig("fig.png") if __name__ == "__main__": main()
gpl-3.0
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price
data/07 exxon/dataAnalysis.py
26
6163
import numpy from numpy import * from operator import truediv from ast import literal_eval import matplotlib.pyplot as plt import statsmodels.tsa.stattools as st import scipy.stats as scit def calCorrelation(s,v): # STEP 1: Read all data files p = [line.rstrip('\n') for line in open("positive.txt")] n = [line.rstrip('\n') for line in open("negative.txt")] a = [line.rstrip('\n') for line in open("all.txt")] p_social = [line.rstrip('\n') for line in open("positive_social.txt")] n_social = [line.rstrip('\n') for line in open("negative_social.txt")] a_social = [line.rstrip('\n') for line in open("all_social.txt")] p_election = [line.rstrip('\n') for line in open("positive_election.txt")] n_election = [line.rstrip('\n') for line in open("negative_election.txt")] a_election = [line.rstrip('\n') for line in open("all_election.txt")] p_trump = [line.rstrip('\n') for line in open("positive_trump.txt")] n_trump = [line.rstrip('\n') for line in open("negative_trump.txt")] a_trump = [line.rstrip('\n') for line in open("all_trump.txt")] p_clinton = [line.rstrip('\n') for line in open("positive_clinton.txt")] n_clinton = [line.rstrip('\n') for line in open("negative_clinton.txt")] a_clinton = [line.rstrip('\n') for line in open("all_clinton.txt")] # STEP 2: Convert into numpy.array and float + bias of 1 for 0 data to avoid divided by 0 erro pInt = numpy.array(map(float, p))+1 nInt = numpy.array(map(float, n))+1 aInt = numpy.array(map(float, a))+1 p_s_Int = numpy.array(map(float, p_social))+1 n_s_Int = numpy.array(map(float, n_social))+1 a_s_Int = numpy.array(map(float, a_social))+1 p_elec = numpy.array(map(float, p_election))+1 n_elec = numpy.array(map(float, n_election))+1 a_elec = numpy.array(map(float, a_election))+1 p_tr = numpy.array(map(float, p_trump))+1 n_tr = numpy.array(map(float, n_trump))+1 a_tr = numpy.array(map(float, a_trump))+1 p_hc = numpy.array(map(float, p_clinton))+1 n_hc = numpy.array(map(float, n_clinton))+1 a_hc = numpy.array(map(float, a_clinton))+1 # STEP 3: Grab only 30 data since those are only needed for samples to calculate correlation pInt = pInt[0:30] nInt = nInt[0:30] aInt = aInt[0:30] p_s_Int = p_s_Int[0:30] n_s_Int = n_s_Int[0:30] a_s_Int = a_s_Int[0:30] p_elec = p_elec[0:30] n_elec = n_elec[0:30] a_elec = a_elec[0:30] p_tr = p_tr[0:30] n_tr = n_tr[0:30] a_tr = a_tr[0:30] p_hc = p_hc[0:30] n_hc = n_hc[0:30] a_hc = a_hc[0:30] print n_tr print p_tr print a_tr print n_elec print p_elec print a_elec # STEP 4: Simple Correlation of Data against Stock Market Prices p_corr = numpy.corrcoef([pInt, s]) n_corr = numpy.corrcoef([nInt, s]) a_corr = numpy.corrcoef([aInt, s]) print "Positive Sentiment Corr: " + str(p_corr) print "Negative Sentiment Corr: " + str(n_corr) print "Neutral Sentiment Corr: " + str(a_corr) cross_corr = numpy.correlate(pInt, s) print cross_corr # STEP 5: Simple Correlation of Social Medica Data against Stock Market Prices p_s_corr = numpy.corrcoef([p_s_Int, s]) n_s_corr = numpy.corrcoef([n_s_Int, s]) a_s_corr = numpy.corrcoef([a_s_Int, s]) print "Positive Social Sentiment Corr: " + str(p_s_corr) print "Negative Social Sentiment Corr: " + str(n_s_corr) print "Neutral Social Sentiment Corr: " + str(a_s_corr) # above caculation does not tell us much # STEP 6: Sentiment with Social Medica Factor Corr p_allcorr = numpy.corrcoef([numpy.add(pInt, p_s_Int), s]) n_allcorr = numpy.corrcoef([numpy.add(nInt, n_s_Int), s]) a_allcorr = numpy.corrcoef([numpy.add(aInt, a_s_Int), s]) print "Positive Articles + Social Sentiment Corr: " + str(p_allcorr) print "Negative Articles + Sentiment Corr: " + str(n_allcorr) print "Neutral Articles + Sentiment Corr: " + str(a_allcorr) # STEP 7: Volumn & News Article Correlation p_v_corr = numpy.corrcoef([pInt, v]) n_v_corr = numpy.corrcoef([nInt, v]) a_v_corr = numpy.corrcoef([aInt, v]) print "Positive Articles Sentiment - Volume Corr: " + str(p_v_corr) print "Negative Sentiment - Volume Corr: " + str(n_v_corr) print "Neutral Sentiment - Volume Corr: " + str(a_v_corr) # with social media p_all_v_corr = numpy.corrcoef([numpy.add(pInt, p_s_Int), v]) n_all_v_corr = numpy.corrcoef([numpy.add(nInt, n_s_Int), v]) a_all_v_corr = numpy.corrcoef([numpy.add(aInt, a_s_Int), v]) print "Positive Articles + Social Sentiment Corr: " + str(p_all_v_corr) print "Negative Articles + Sentiment Corr: " + str(n_all_v_corr) print "Neutral Articles + Sentiment Corr: " + str(a_all_v_corr) pn_ratio = pInt/nInt print pn_ratio pnr_corr = numpy.corrcoef([pn_ratio, s]) print "PNR Corr: " + str(pnr_corr) tr_corr = numpy.corrcoef([n_tr+p_tr+a_tr, s]) print tr_corr elec_corr = numpy.corrcoef([n_elec + p_elec + a_elec, s]) print elec_corr n_elec_corr = numpy.corrcoef([n_elec, s]) print n_elec_corr print "PNR Corr: " + str(pnr_corr) p_sum = numpy.add(pInt, p_s_Int) n_sum = numpy.add(nInt, n_s_Int) print "P_SUM:" + str(p_sum) print "N_SUM:" + str(n_sum) beta = 1 alpha = 1 p_sum = pInt*numpy.array(p_s_Int)*beta n_sum = nInt*numpy.array(n_s_Int)*alpha pn_sum_ratio = p_sum/n_sum pnr_sum_corr = numpy.corrcoef([pn_sum_ratio, s]) print "PNR Sum Corr: " + str(pnr_sum_corr) #spearman_r = scit.spearmanr(pn_sum_ratio, s) #print "Spearman's Correlation: " + str(spearman_r) #cross_corr = numpy.correlate(pn_sum_ratio, s) #print cross_corr print "Null Hypothesis - Postive Sentiment does not cause stock market price" testVector = numpy.column_stack((s, pInt)) st.grangercausalitytests(testVector, 8, verbose = True) print "Null Hypothesis - Negative Sentiment does not cause stock market price" testVector = numpy.column_stack((s, nInt)) st.grangercausalitytests(testVector, 8, verbose = True) print "Null Hypothesis - Neutral Sentiment does not cause stock market price" testVector = numpy.column_stack((s, aInt)) st.grangercausalitytests(testVector, 8, verbose = True) testVector = numpy.column_stack((s, pn_ratio)) st.grangercausalitytests(testVector, 8, verbose = True) testVector = numpy.column_stack((pn_ratio, s)) st.grangercausalitytests(testVector, 8, verbose = True)
mit
yarikoptic/NiPy-OLD
nipy/neurospin/viz/activation_maps.py
1
25526
#!/usr/bin/env python """ Functions to do automatic visualization of activation-like maps. For 2D-only visualization, only matplotlib is required. For 3D visualization, Mayavi, version 3.0 or greater, is required. """ # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # License: BSD # Standard library imports import os import sys # Standard scientific libraries imports (more specific imports are # delayed, so that the part module can be used without them). import numpy as np import matplotlib as mp import pylab as pl # Local imports from nipy.neurospin.utils.mask import compute_mask from nipy.io.imageformats import load from anat_cache import mni_sform, mni_sform_inv, _AnatCache from coord_tools import coord_transform, find_activation, \ find_cut_coords class SformError(Exception): pass class NiftiIndexError(IndexError): pass ################################################################################ # Colormaps def _rotate_cmap(cmap, name=None, swap_order=('green', 'red', 'blue')): """ Utility function to swap the colors of a colormap. """ orig_cdict = cmap._segmentdata.copy() cdict = dict() cdict['green'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[0]]] cdict['blue'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[1]]] cdict['red'] = [(p, c1, c2) for (p, c1, c2) in orig_cdict[swap_order[2]]] if name is None: name = '%s_rotated' % cmap.name return mp.colors.LinearSegmentedColormap(name, cdict, 512) def _pigtailed_cmap(cmap, name=None, swap_order=('green', 'red', 'blue')): """ Utility function to make a new colormap by concatenating a colormap with its reverse. """ orig_cdict = cmap._segmentdata.copy() cdict = dict() cdict['green'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])] cdict['blue'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])] cdict['red'] = [(0.5*(1-p), c1, c2) for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])] for color in ('red', 'green', 'blue'): cdict[color].extend([(0.5*(1+p), c1, c2) for (p, c1, c2) in orig_cdict[color]]) if name is None: name = '%s_reversed' % cmap.name return mp.colors.LinearSegmentedColormap(name, cdict, 512) # Using a dict as a namespace, to micmic matplotlib's cm _cm = dict( cold_hot = _pigtailed_cmap(pl.cm.hot, name='cold_hot'), brown_blue = _pigtailed_cmap(pl.cm.bone, name='brown_blue'), cyan_copper = _pigtailed_cmap(pl.cm.copper, name='cyan_copper'), cyan_orange = _pigtailed_cmap(pl.cm.YlOrBr_r, name='cyan_orange'), blue_red = _pigtailed_cmap(pl.cm.Reds_r, name='blue_red'), brown_cyan = _pigtailed_cmap(pl.cm.Blues_r, name='brown_cyan'), purple_green = _pigtailed_cmap(pl.cm.Greens_r, name='purple_green', swap_order=('red', 'blue', 'green')), purple_blue = _pigtailed_cmap(pl.cm.Blues_r, name='purple_blue', swap_order=('red', 'blue', 'green')), blue_orange = _pigtailed_cmap(pl.cm.Oranges_r, name='blue_orange', swap_order=('green', 'red', 'blue')), black_blue = _rotate_cmap(pl.cm.hot, name='black_blue'), black_purple = _rotate_cmap(pl.cm.hot, name='black_purple', swap_order=('blue', 'red', 'green')), black_pink = _rotate_cmap(pl.cm.hot, name='black_pink', swap_order=('blue', 'green', 'red')), black_green = _rotate_cmap(pl.cm.hot, name='black_green', swap_order=('red', 'blue', 'green')), black_red = pl.cm.hot, ) _cm.update(pl.cm.datad) class _CM(dict): def __init__(self, *args, **kwargs): dict.__init__(self, *args, **kwargs) self.__dict__.update(self) cm = _CM(**_cm) ################################################################################ # 2D plotting of activation maps ################################################################################ def plot_map_2d(map, sform, cut_coords, anat=None, anat_sform=None, vmin=None, figure_num=None, axes=None, title='', mask=None, **kwargs): """ Plot three cuts of a given activation map (Frontal, Axial, and Lateral) Parameters ---------- map : 3D ndarray The activation map, as a 3D image. sform : 4x4 ndarray The affine matrix going from image voxel space to MNI space. cut_coords: 3-tuple of floats The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. anat : 3D ndarray, optional or False The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. If False, no anat is displayed. anat_sform : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicite anatomical image. vmin : float, optional The lower threshold of the positive activation. This parameter is used to threshold the activation map. figure_num : integer, optional The number of the matplotlib figure used. If None is given, a new figure is created. axes : 4 tuple of float: (xmin, xmax, ymin, ymin), optional The coordinates, in matplotlib figure space, of the axes used to display the plot. If None, the complete figure is used. title : string, optional The title dispayed on the figure. mask : 3D ndarray, boolean, optional The brain mask. If None, the mask is computed from the map.* kwargs: extra keyword arguments, optional Extra keyword arguments passed to pylab.imshow Notes ----- All the 3D arrays are in numpy convention: (x, y, z) Cut coordinates are in Talairach coordinates. Warning: Talairach coordinates are (y, x, z), if (x, y, z) are in voxel-ordering convention. """ if anat is None: anat, anat_sform, vmax_anat = _AnatCache.get_anat() elif anat is not False: vmax_anat = anat.max() if mask is not None and ( np.all(mask) or np.all(np.logical_not(mask))): mask = None vmin_map = map.min() vmax_map = map.max() if vmin is not None and np.isfinite(vmin): map = np.ma.masked_less(map, vmin) elif mask is not None and not isinstance(map, np.ma.masked_array): map = np.ma.masked_array(map, np.logical_not(mask)) vmin_map = map.min() vmax_map = map.max() if isinstance(map, np.ma.core.MaskedArray): use_mask = False if map._mask is False or np.all(np.logical_not(map._mask)): map = np.asarray(map) elif map._mask is True or np.all(map._mask): map = np.asarray(map) if use_mask and mask is not None: map = np.ma.masked_array(map, np.logical_not(mask)) # Calculate the bounds if anat is not False: anat_bounds = np.zeros((4, 6)) anat_bounds[:3, -3:] = np.identity(3)*anat.shape anat_bounds[-1, :] = 1 anat_bounds = np.dot(anat_sform, anat_bounds) map_bounds = np.zeros((4, 6)) map_bounds[:3, -3:] = np.identity(3)*map.shape map_bounds[-1, :] = 1 map_bounds = np.dot(sform, map_bounds) # The coordinates of the center of the cut in different spaces. y, x, z = cut_coords x_map, y_map, z_map = [int(round(c)) for c in coord_transform(x, y, z, np.linalg.inv(sform))] if anat is not False: x_anat, y_anat, z_anat = [int(round(c)) for c in coord_transform(x, y, z, np.linalg.inv(anat_sform))] fig = pl.figure(figure_num, figsize=(6.6, 2.6)) if axes is None: axes = (0., 1., 0., 1.) pl.clf() ax_xmin, ax_xmax, ax_ymin, ax_ymax = axes ax_width = ax_xmax - ax_xmin ax_height = ax_ymax - ax_ymin # Calculate the axes ratio size in a 'clever' way if anat is not False: shapes = np.array(anat.shape, 'f') else: shapes = np.array(map.shape, 'f') shapes *= ax_width/shapes.sum() ########################################################################### # Frontal pl.axes([ax_xmin, ax_ymin, shapes[0], ax_height]) if anat is not False: if y_anat < anat.shape[1]: pl.imshow(np.rot90(anat[:, y_anat, :]), cmap=pl.cm.gray, vmin=-.5*vmax_anat, vmax=vmax_anat, extent=(anat_bounds[0, 3], anat_bounds[0, 0], anat_bounds[2, 0], anat_bounds[2, 5])) if y_map < map.shape[1]: pl.imshow(np.rot90(map[:, y_map, :]), vmin=vmin_map, vmax=vmax_map, extent=(map_bounds[0, 3], map_bounds[0, 0], map_bounds[2, 0], map_bounds[2, 5]), **kwargs) pl.text(ax_xmin +shapes[0] + shapes[1] - 0.01, ax_ymin + 0.07, '%i' % x, horizontalalignment='right', verticalalignment='bottom', transform=fig.transFigure) xmin, xmax = pl.xlim() ymin, ymax = pl.ylim() pl.hlines(z, xmin, xmax, color=(.5, .5, .5)) pl.vlines(-x, ymin, ymax, color=(.5, .5, .5)) pl.axis('off') ########################################################################### # Lateral pl.axes([ax_xmin + shapes[0], ax_ymin, shapes[1], ax_height]) if anat is not False: if x_anat < anat.shape[0]: pl.imshow(np.rot90(anat[x_anat, ...]), cmap=pl.cm.gray, vmin=-.5*vmax_anat, vmax=vmax_anat, extent=(anat_bounds[1, 0], anat_bounds[1, 4], anat_bounds[2, 0], anat_bounds[2, 5])) if x_map < map.shape[0]: pl.imshow(np.rot90(map[x_map, ...]), vmin=vmin_map, vmax=vmax_map, extent=(map_bounds[1, 0], map_bounds[1, 4], map_bounds[2, 0], map_bounds[2, 5]), **kwargs) pl.text(ax_xmin + shapes[-1] - 0.01, ax_ymin + 0.07, '%i' % y, horizontalalignment='right', verticalalignment='bottom', transform=fig.transFigure) xmin, xmax = pl.xlim() ymin, ymax = pl.ylim() pl.hlines(z, xmin, xmax, color=(.5, .5, .5)) pl.vlines(y, ymin, ymax, color=(.5, .5, .5)) pl.axis('off') ########################################################################### # Axial pl.axes([ax_xmin + shapes[0] + shapes[1], ax_ymin, shapes[-1], ax_height]) if anat is not False: if z_anat < anat.shape[2]: pl.imshow(np.rot90(anat[..., z_anat]), cmap=pl.cm.gray, vmin=-.5*vmax_anat, vmax=vmax_anat, extent=(anat_bounds[0, 0], anat_bounds[0, 3], anat_bounds[1, 0], anat_bounds[1, 4])) if z_map < map.shape[2]: pl.imshow(np.rot90(map[..., z_map]), vmin=vmin_map, vmax=vmax_map, extent=(map_bounds[0, 0], map_bounds[0, 3], map_bounds[1, 0], map_bounds[1, 4]), **kwargs) pl.text(ax_xmax - 0.01, ax_ymin + 0.07, '%i' % z, horizontalalignment='right', verticalalignment='bottom', transform=fig.transFigure) xmin, xmax = pl.xlim() ymin, ymax = pl.ylim() pl.hlines(y, xmin, xmax, color=(.5, .5, .5)) pl.vlines(x, ymin, ymax, color=(.5, .5, .5)) pl.axis('off') pl.text(ax_xmin + 0.01, ax_ymax - 0.01, title, horizontalalignment='left', verticalalignment='top', transform=fig.transFigure) pl.axis('off') def demo_plot_map_2d(): map = np.zeros((182, 218, 182)) # Color a asymetric rectangle around Broadman area 26: x, y, z = -6, -53, 9 x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv) map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1 map = np.ma.masked_less(map, 0.5) plot_map_2d(map, mni_sform, cut_coords=(x, y, z), figure_num=512) def plot_map(map, sform, cut_coords, anat=None, anat_sform=None, vmin=None, figure_num=None, title='', mask=None): """ Plot a together a 3D volume rendering view of the activation, with an outline of the brain, and 2D cuts. If Mayavi is not installed, falls back to 2D views only. Parameters ---------- map : 3D ndarray The activation map, as a 3D image. sform : 4x4 ndarray The affine matrix going from image voxel space to MNI space. cut_coords: 3-tuple of floats, optional The MNI coordinates of the cut to perform, in MNI coordinates and order. If None is given, the cut_coords are automaticaly estimated. anat : 3D ndarray, optional The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. anat_sform : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicite anatomical image. vmin : float, optional The lower threshold of the positive activation. This parameter is used to threshold the activation map. figure_num : integer, optional The number of the matplotlib and Mayavi figures used. If None is given, a new figure is created. title : string, optional The title dispayed on the figure. mask : 3D ndarray, boolean, optional The brain mask. If None, the mask is computed from the map. Notes ----- All the 3D arrays are in numpy convention: (x, y, z) Cut coordinates are in Talairach coordinates. Warning: Talairach coordinates are (y, x, z), if (x, y, z) are in voxel-ordering convention. """ try: from enthought.mayavi import version if not int(version.version[0]) > 2: raise ImportError except ImportError: print >> sys.stderr, 'Mayavi > 3.x not installed, plotting only 2D' return plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat, anat_sform=anat_sform, vmin=vmin, title=title, figure_num=figure_num, mask=mask) from .maps_3d import plot_map_3d, m2screenshot plot_map_3d(map, sform, cut_coords=cut_coords, anat=anat, anat_sform=anat_sform, vmin=vmin, figure_num=figure_num, mask=mask) fig = pl.figure(figure_num, figsize=(10.6, 2.6)) ax = pl.axes((-0.01, 0, 0.3, 1)) m2screenshot(mpl_axes=ax) plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat, anat_sform=anat_sform, vmin=vmin, mask=mask, figure_num=fig.number, axes=(0.28, 1, 0, 1.), title=title) def demo_plot_map(): map = np.zeros((182, 218, 182)) # Color a asymetric rectangle around Broadman area 26: x, y, z = -6, -53, 9 x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv) map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1 plot_map(map, mni_sform, cut_coords=(x, y, z), vmin=0.5, figure_num=512) def auto_plot_map(map, sform, vmin=None, cut_coords=None, do3d=False, anat=None, anat_sform=None, title='', figure_num=None, mask=None, auto_sign=True): """ Automatic plotting of an activation map. Plot a together a 3D volume rendering view of the activation, with an outline of the brain, and 2D cuts. If Mayavi is not installed, falls back to 2D views only. Parameters ---------- map : 3D ndarray The activation map, as a 3D image. sform : 4x4 ndarray The affine matrix going from image voxel space to MNI space. vmin : float, optional The lower threshold of the positive activation. This parameter is used to threshold the activation map. cut_coords: 3-tuple of floats, optional The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If None is given, the cut_coords are automaticaly estimated. do3d : boolean, optional If do3d is True, a 3D plot is created if Mayavi is installed. anat : 3D ndarray, optional The anatomical image to be used as a background. If None, the MNI152 T1 1mm template is used. anat_sform : 4x4 ndarray, optional The affine matrix going from the anatomical image voxel space to MNI space. This parameter is not used when the default anatomical is used, but it is compulsory when using an explicite anatomical image. title : string, optional The title dispayed on the figure. figure_num : integer, optional The number of the matplotlib and Mayavi figures used. If None is given, a new figure is created. mask : 3D ndarray, boolean, optional The brain mask. If None, the mask is computed from the map. auto_sign : boolean, optional If auto_sign is True, the sign of the activation is automaticaly computed: negative activation can thus be plotted. Returns ------- vmin : float The lower threshold of the activation used. cut_coords : 3-tuple of floats The Talairach coordinates of the cut performed for the 2D view. Notes ----- All the 3D arrays are in numpy convention: (x, y, z) Cut coordinates are in Talairach coordinates. Warning: Talairach coordinates are (y, x, z), if (x, y, z) are in voxel-ordering convention. """ if do3d: if do3d == 'offscreen': try: from enthought.mayavi import mlab mlab.options.offscreen = True except: pass plotter = plot_map else: plotter = plot_map_2d if mask is None: mask = compute_mask(map) if vmin is None: vmin = np.inf pvalue = 0.04 while not np.isfinite(vmin): pvalue *= 1.25 vmax, vmin = find_activation(map, mask=mask, pvalue=pvalue) if not np.isfinite(vmin) and auto_sign: if np.isfinite(vmax): vmin = -vmax if mask is not None: map[mask] *= -1 else: map *= -1 if cut_coords is None: x, y, z = find_cut_coords(map, activation_threshold=vmin) # XXX: Careful with Voxel/MNI ordering y, x, z = coord_transform(x, y, z, sform) cut_coords = (x, y, z) plotter(map, sform, vmin=vmin, cut_coords=cut_coords, anat=anat, anat_sform=anat_sform, title=title, figure_num=figure_num, mask=mask) return vmin, cut_coords def plot_niftifile(filename, outputname=None, do3d=False, vmin=None, cut_coords=None, anat_filename=None, figure_num=None, mask_filename=None, auto_sign=True): """ Given a nifti filename, plot a view of it to a file (png by default). Parameters ---------- filename : string The name of the Nifti file of the map to be plotted outputname : string, optional The file name of the output file created. By default the name of the input file with a png extension is used. do3d : boolean, optional If do3d is True, a 3D plot is created if Mayavi is installed. vmin : float, optional The lower threshold of the positive activation. This parameter is used to threshold the activation map. cut_coords: 3-tuple of floats, optional The MNI coordinates of the point where the cut is performed, in MNI coordinates and order. If None is given, the cut_coords are automaticaly estimated. anat : string, optional Name of the Nifti image file to be used as a background. If None, the MNI152 T1 1mm template is used. title : string, optional The title dispayed on the figure. figure_num : integer, optional The number of the matplotlib and Mayavi figures used. If None is given, a new figure is created. mask_filename : string, optional Name of the Nifti file to be used as brain mask. If None, the mask is computed from the map. auto_sign : boolean, optional If auto_sign is True, the sign of the activation is automaticaly computed: negative activation can thus be plotted. Notes ----- Cut coordinates are in Talairach coordinates. Warning: Talairach coordinates are (y, x, z), if (x, y, z) are in voxel-ordering convention. """ if outputname is None: outputname = os.path.splitext(filename)[0] + '.png' if not os.path.exists(filename): raise OSError, 'File %s does not exist' % filename nim = load(filename) sform = nim.get_affine() if any(np.linalg.eigvals(sform)==0): raise SformError, "sform affine is not inversible" if anat_filename is not None: anat_im = load(anat_filename) anat = anat_im.data anat_sform = anat_im.get_affine() else: anat = None anat_sform = None if mask_filename is not None: mask_im = load(mask_filename) mask = mask_im.data.astype(np.bool) if not np.allclose(mask_im.get_affine(), sform): raise SformError, 'Mask does not have same sform as image' if not np.allclose(mask.shape, nim.data.shape[:3]): raise NiftiIndexError, 'Mask does not have same shape as image' else: mask = None output_files = list() if nim.data.ndim == 3: map = nim.data.T auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords, do3d=do3d, anat=anat, anat_sform=anat_sform, mask=mask, title=os.path.basename(filename), figure_num=figure_num, auto_sign=auto_sign) pl.savefig(outputname) output_files.append(outputname) elif nim.data.ndim == 4: outputname, outputext = os.path.splitext(outputname) if len(nim.data) < 10: fmt = '%s_%i%s' elif len(nim.data) < 100: fmt = '%s_%02i%s' elif len(nim.data) < 1000: fmt = '%s_%03i%s' else: fmt = '%s_%04i%s' if mask is None: mask = compute_mask(nim.data.mean(axis=0)).T for index, data in enumerate(nim.data): map = data.T auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords, do3d=do3d, anat=anat, anat_sform=anat_sform, title='%s, %i' % (os.path.basename(filename), index), figure_num=figure_num, mask=mask, auto_sign=auto_sign) this_outputname = fmt % (outputname, index, outputext) pl.savefig(this_outputname) pl.clf() output_files.append(this_outputname) else: raise NiftiIndexError, 'File %s: incorrect number of dimensions' return output_files
bsd-3-clause
chongyangtao/gmmreg
Python/_plotting.py
14
2435
#!/usr/bin/env python #coding=utf-8 ##==================================================== ## $Author$ ## $Date$ ## $Revision$ ##==================================================== from pylab import * from configobj import ConfigObj import matplotlib.pyplot as plt def display2Dpointset(A): fig = plt.figure() ax = fig.add_subplot(111) #ax.grid(True) ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1) labels = plt.getp(plt.gca(), 'xticklabels') plt.setp(labels, color='k', fontweight='bold') labels = plt.getp(plt.gca(), 'yticklabels') plt.setp(labels, color='k', fontweight='bold') for i,x in enumerate(A): ax.annotate('%d'%(i+1), xy = x, xytext = x + 0) ax.set_axis_off() #fig.show() def display2Dpointsets(A, B, ax = None): """ display a pair of 2D point sets """ if not ax: fig = plt.figure() ax = fig.add_subplot(111) ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1) ax.plot(B[:,0],B[:,1],'b+',markersize=8,mew=1) #pylab.setp(pylab.gca(), 'xlim', [-0.15,0.6]) labels = plt.getp(plt.gca(), 'xticklabels') plt.setp(labels, color='k', fontweight='bold') labels = plt.getp(plt.gca(), 'yticklabels') plt.setp(labels, color='k', fontweight='bold') def display3Dpointsets(A,B,ax): #ax.plot3d(A[:,0],A[:,1],A[:,2],'yo',markersize=10,mew=1) #ax.plot3d(B[:,0],B[:,1],B[:,2],'b+',markersize=10,mew=1) ax.scatter(A[:,0],A[:,1],A[:,2], c = 'y', marker = 'o') ax.scatter(B[:,0],B[:,1],B[:,2], c = 'b', marker = '+') ax.set_xlabel('X') ax.set_ylabel('Y') ax.set_zlabel('Z') from mpl_toolkits.mplot3d import Axes3D def displayABC(A,B,C): fig = plt.figure() dim = A.shape[1] if dim==2: ax = plt.subplot(121) display2Dpointsets(A, B, ax) ax = plt.subplot(122) display2Dpointsets(C, B, ax) if dim==3: plot1 = plt.subplot(1,2,1) ax = Axes3D(fig, rect = plot1.get_position()) display3Dpointsets(A,B,ax) plot2 = plt.subplot(1,2,2) ax = Axes3D(fig, rect = plot2.get_position()) display3Dpointsets(C,B,ax) plt.show() def display_pts(f_config): config = ConfigObj(f_config) file_section = config['FILES'] mf = file_section['model'] sf = file_section['scene'] tf = file_section['transformed_model'] m = np.loadtxt(mf) s = np.loadtxt(sf) t = np.loadtxt(tf) displayABC(m,s,t)
gpl-3.0
liang42hao/bokeh
bokeh/compat/mplexporter/renderers/base.py
44
14355
import warnings import itertools from contextlib import contextmanager import numpy as np from matplotlib import transforms from .. import utils from .. import _py3k_compat as py3k class Renderer(object): @staticmethod def ax_zoomable(ax): return bool(ax and ax.get_navigate()) @staticmethod def ax_has_xgrid(ax): return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines()) @staticmethod def ax_has_ygrid(ax): return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines()) @property def current_ax_zoomable(self): return self.ax_zoomable(self._current_ax) @property def current_ax_has_xgrid(self): return self.ax_has_xgrid(self._current_ax) @property def current_ax_has_ygrid(self): return self.ax_has_ygrid(self._current_ax) @contextmanager def draw_figure(self, fig, props): if hasattr(self, "_current_fig") and self._current_fig is not None: warnings.warn("figure embedded in figure: something is wrong") self._current_fig = fig self._fig_props = props self.open_figure(fig=fig, props=props) yield self.close_figure(fig=fig) self._current_fig = None self._fig_props = {} @contextmanager def draw_axes(self, ax, props): if hasattr(self, "_current_ax") and self._current_ax is not None: warnings.warn("axes embedded in axes: something is wrong") self._current_ax = ax self._ax_props = props self.open_axes(ax=ax, props=props) yield self.close_axes(ax=ax) self._current_ax = None self._ax_props = {} @contextmanager def draw_legend(self, legend, props): self._current_legend = legend self._legend_props = props self.open_legend(legend=legend, props=props) yield self.close_legend(legend=legend) self._current_legend = None self._legend_props = {} # Following are the functions which should be overloaded in subclasses def open_figure(self, fig, props): """ Begin commands for a particular figure. Parameters ---------- fig : matplotlib.Figure The Figure which will contain the ensuing axes and elements props : dictionary The dictionary of figure properties """ pass def close_figure(self, fig): """ Finish commands for a particular figure. Parameters ---------- fig : matplotlib.Figure The figure which is finished being drawn. """ pass def open_axes(self, ax, props): """ Begin commands for a particular axes. Parameters ---------- ax : matplotlib.Axes The Axes which will contain the ensuing axes and elements props : dictionary The dictionary of axes properties """ pass def close_axes(self, ax): """ Finish commands for a particular axes. Parameters ---------- ax : matplotlib.Axes The Axes which is finished being drawn. """ pass def open_legend(self, legend, props): """ Beging commands for a particular legend. Parameters ---------- legend : matplotlib.legend.Legend The Legend that will contain the ensuing elements props : dictionary The dictionary of legend properties """ pass def close_legend(self, legend): """ Finish commands for a particular legend. Parameters ---------- legend : matplotlib.legend.Legend The Legend which is finished being drawn """ pass def draw_marked_line(self, data, coordinates, linestyle, markerstyle, label, mplobj=None): """Draw a line that also has markers. If this isn't reimplemented by a renderer object, by default, it will make a call to BOTH draw_line and draw_markers when both markerstyle and linestyle are not None in the same Line2D object. """ if linestyle is not None: self.draw_line(data, coordinates, linestyle, label, mplobj) if markerstyle is not None: self.draw_markers(data, coordinates, markerstyle, label, mplobj) def draw_line(self, data, coordinates, style, label, mplobj=None): """ Draw a line. By default, draw the line via the draw_path() command. Some renderers might wish to override this and provide more fine-grained behavior. In matplotlib, lines are generally created via the plt.plot() command, though this command also can create marker collections. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the line. mplobj : matplotlib object the matplotlib plot element which generated this line """ pathcodes = ['M'] + (data.shape[0] - 1) * ['L'] pathstyle = dict(facecolor='none', **style) pathstyle['edgecolor'] = pathstyle.pop('color') pathstyle['edgewidth'] = pathstyle.pop('linewidth') self.draw_path(data=data, coordinates=coordinates, pathcodes=pathcodes, style=pathstyle, mplobj=mplobj) @staticmethod def _iter_path_collection(paths, path_transforms, offsets, styles): """Build an iterator over the elements of the path collection""" N = max(len(paths), len(offsets)) if not path_transforms: path_transforms = [np.eye(3)] edgecolor = styles['edgecolor'] if np.size(edgecolor) == 0: edgecolor = ['none'] facecolor = styles['facecolor'] if np.size(facecolor) == 0: facecolor = ['none'] elements = [paths, path_transforms, offsets, edgecolor, styles['linewidth'], facecolor] it = itertools return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N) def draw_path_collection(self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj=None): """ Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection """ if offset_order == "before": raise NotImplementedError("offset before transform") for tup in self._iter_path_collection(paths, path_transforms, offsets, styles): (path, path_transform, offset, ec, lw, fc) = tup vertices, pathcodes = path path_transform = transforms.Affine2D(path_transform) vertices = path_transform.transform(vertices) # This is a hack: if path_coordinates == "figure": path_coordinates = "points" style = {"edgecolor": utils.color_to_hex(ec), "facecolor": utils.color_to_hex(fc), "edgewidth": lw, "dasharray": "10,0", "alpha": styles['alpha'], "zorder": styles['zorder']} self.draw_path(data=vertices, coordinates=path_coordinates, pathcodes=pathcodes, style=style, offset=offset, offset_coordinates=offset_coordinates, mplobj=mplobj) def draw_markers(self, data, coordinates, style, label, mplobj=None): """ Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection """ vertices, pathcodes = style['markerpath'] pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor', 'facecolor', 'zorder', 'edgewidth']) pathstyle['dasharray'] = "10,0" for vertex in data: self.draw_path(data=vertices, coordinates="points", pathcodes=pathcodes, style=pathstyle, offset=vertex, offset_coordinates=coordinates, mplobj=mplobj) def draw_text(self, text, position, coordinates, style, text_type=None, mplobj=None): """ Draw text on the image. Parameters ---------- text : string The text to draw position : tuple The (x, y) position of the text coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the text. text_type : string or None if specified, a type of text such as "xlabel", "ylabel", "title" mplobj : matplotlib object the matplotlib plot element which generated this text """ raise NotImplementedError() def draw_path(self, data, coordinates, pathcodes, style, offset=None, offset_coordinates="data", mplobj=None): """ Draw a path. In matplotlib, paths are created by filled regions, histograms, contour plots, patches, etc. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, 'figure' for figure (pixel) coordinates, or "points" for raw point coordinates (useful in conjunction with offsets, below). pathcodes : list A list of single-character SVG pathcodes associated with the data. Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't', 'S', 's', 'C', 'c', 'Z', 'z'] See the SVG specification for details. Note that some path codes consume more than one datapoint (while 'Z' consumes none), so in general, the length of the pathcodes list will not be the same as that of the data array. style : dictionary a dictionary specifying the appearance of the line. offset : list (optional) the (x, y) offset of the path. If not given, no offset will be used. offset_coordinates : string (optional) A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. mplobj : matplotlib object the matplotlib plot element which generated this path """ raise NotImplementedError() def draw_image(self, imdata, extent, coordinates, style, mplobj=None): """ Draw an image. Parameters ---------- imdata : string base64 encoded png representation of the image extent : list the axes extent of the image: [xmin, xmax, ymin, ymax] coordinates: string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the image mplobj : matplotlib object the matplotlib plot object which generated this image """ raise NotImplementedError()
bsd-3-clause
bovulpes/AliceO2
Detectors/FIT/benchmark/process.py
6
12238
# load modules import re import sys import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import AutoMinorLocator # use classic plot style plt.style.use('classic') # read and save user input filenames mem_filename = sys.argv[1] cpu_filename = sys.argv[2] # save the process id names process_id_mem = re.findall('mem_evolution_(\\d+)', mem_filename)[0] process_id_cpu = re.findall('cpu_evolution_(\\d+)', cpu_filename)[0] # check that the process id names are the same if not process_id_mem==process_id_cpu: # throw error if true and exit program sys.stderr.write("The memory and cpu process filenames do not match...\n") print("input memory filename: ",mem_filename) print("inpu cpu filename: ",cpu_filename) exit(1) # save the main process id (driver application) process_id = process_id_mem + '.txt' # as string '<PID>.txt' # save the same process id (driver application), but as a float driver = float(process_id_mem) # load the o2 command given with open(mem_filename) as f: title = f.readline() # extract the command given title = re.findall('#command line: (\\w.+)', title)[0] # declare string variables for different runs simulation = 'o2-sim ' serial = 'o2-sim-serial' digitization = 'o2-sim-digitizer-workflow' # print the command for the user print("\nYour command was: ", title) # check what type of command and parse it to a logfile variable if title.find(simulation) == 0: print("You have monitored o2 simulation in parallel.\n") command=simulation logfilename = 'o2sim.log' elif title.find(serial) == 0: print("You have monitored o2 simulation in serial.\n") command=serial logfilename = 'o2sim.log' elif title.find(digitization) == 0: command=digitization print("You have monitored o2 digitization.\n") logfilename = 'o2digi.log' else : print("I do not know this type of simulation.\n") exit(1) ################################################# # # # Extract the PIDs from logfile # # # ################################################# if command==simulation: # True if you typed o2-sim try: # open o2sim.log file name with open(logfilename) as logfile: # read and save the first 6 lines in o2sim.log loglines = [next(logfile) for line in range(6)] # print("*******************************\n") # print("Driver application PID is: ", driver) # find the PID for the event generator (o2-sim-primary-..) eventgenerator_line = re.search('Spawning particle server on PID (.*); Redirect output to serverlog\n',loglines[3]) event_gen = float(eventgenerator_line.group(1)) # print("Eventgenerator PID is: ", event_gen) # find the PID for sim worker 0 (o2-sim-device-runner) sim_worker_line = re.search('Spawning sim worker 0 on PID (.*); Redirect output to workerlog0\n',loglines[4]) sim_worker = float(sim_worker_line.group(1)) # print("SimWorker 0 PID is: ", sim_worker) # find the PID for the hitmerger (o2-sim-hitmerger) hitmerger_line = re.search('Spawning hit merger on PID (.*); Redirect output to mergerlog\n',loglines[5]) hit_merger = float(hitmerger_line.group(1)) # print("Hitmerger PID is: ", hit_merger, "\n") # print("*******************************\n") # find the number of simulation workers n_workers = int(re.findall('Running with (\\d+)', loglines[1])[0]) # save into a list pid_names = ['driver','event gen','sim worker 0','hit merger'] pid_vals = [driver,event_gen,sim_worker,hit_merger] # append pid names for remaining workers for i in range(n_workers-1): pid_names.append(f"sim worker {i+1}") no_log = False except IOError: print("There exists no o2sim.log..") print("No details of devices will be provided.") no_log = True elif command==digitization: # True if you typed o2-sim-digitizer-workflow try: # open o2digi.log file name with open(logfilename) as logfile: # save the first 100 lines in o2digi.log loglines = [next(logfile) for line in range(100)] # declare list for PID numbers and names pid_vals = [] pid_names = [] # loop through lines to find PIDs for line_num,line in enumerate(loglines): pid_line = re.findall('Starting (\\w.+) on pid (\\d+)',line) if pid_line: # True if the line contains 'Start <PID name> on pid <PID number>' # assign the name and value to variables pid_name = pid_line[0][0] pid_val = float(pid_line[0][1]) # save to list pid_names.append(pid_name) pid_vals.append(pid_val) # insert driver application name and value pid_names.insert(0,'driver') pid_vals.insert(0,driver) # for id in range(len(pid_names)): # print(pid_names[id],"PID is: ",pid_vals[id]) # print(pid_vals[pid]) # print("*******************************\n") no_log = False except IOError: print("There exists no o2digi.log..") print("No details of devices will be provided.") no_log = True elif command==serial: print("*******************************\n") print("Driver application PID is: ", driver) print("There are no other PIDs") no_log = False else : print("Something went wrong.. exiting") exit(1) ############### End of PID extraction ################# # get time and PID filenames time_filename = 'time_evolution_' + process_id pid_filename = 'pid_evolution_' + process_id # load data as pandas DataFrame (DataFrame due to uneven number of coloumns in file) mem = pd.read_csv(mem_filename, skiprows=2, sep=" +", engine="python",header=None) cpu = pd.read_csv(cpu_filename, skiprows=2, sep=" +", engine="python",header=None) pid = pd.read_csv(pid_filename, skiprows=2, sep=" +", engine="python",header=None) t = np.loadtxt(time_filename) # time in ms (mili-seconds) # extract values from the DataFrame mem = mem[1:].values cpu = cpu[1:].values pid = pid[1:].values # process time series t = t-t[0] # rescale time such that t_start=0 t = t*10**(-3) # convert mili-seconds to seconds # replace 'Nones' (empty) elements w/ zeros and convert string values to floats mem = np.nan_to_num(mem.astype(np.float)) cpu = np.nan_to_num(cpu.astype(np.float)) pid = np.nan_to_num(pid.astype(np.float)) # find all process identifaction numbers involved (PIDs), the index of their first # occurence (index) for an unraveled array and the total number of apperances (counts) in the process PIDs, index, counts = np.unique(pid,return_index=True,return_counts=True) # NOTE: we don't want to count 'fake' PIDs. These are PIDs that spawns only once not taking # any memory or cpu. Due to their appearence they shift the colomns in all monitored files. # This needs to be taken care of and they are therefore deleted from the removed. # return the index of the fake pids fake = np.where(counts==1) # delete the fake pids from PIDs list PIDs = np.delete(PIDs,fake) index = np.delete(index,fake) counts = np.delete(counts,fake) # we also dele PID=0, as this is not a real PID PIDs = np.delete(PIDs,0) index = np.delete(index,0) counts = np.delete(counts,0) # get number of real PIDs nPIDs = len(PIDs) # dimension of data dim = pid.shape # could also use from time series # NOTE: dimensiton is always (n_steps, 40) # because of '#' characters in ./monitor.sh # number of steps in simulation for o2-sim steps = len(pid[:,0]) # could also use from time series # declare final lists m = [] # memory c = [] # cpu p = [] # process for i in range(nPIDs): # loop through all valid PIDs # find the number of zeros to pad with init_zeros, _ = np.unravel_index(index[i],dim) # pad the 'initial' zeros (begining) mem_dummy = np.hstack((np.zeros(init_zeros),mem[pid==PIDs[i]])) cpu_dummy = np.hstack((np.zeros(init_zeros),cpu[pid==PIDs[i]])) pid_dummy = np.hstack((np.zeros(init_zeros),pid[pid==PIDs[i]])) # find the difference in final steps n_diff = steps - len(mem_dummy) # pad the ending w/ zeros mem_dummy = np.hstack((mem_dummy,np.zeros(n_diff))) cpu_dummy = np.hstack((cpu_dummy,np.zeros(n_diff))) pid_dummy = np.hstack((pid_dummy,np.zeros(n_diff))) # save to list m.append(mem_dummy) c.append(cpu_dummy) p.append(pid_dummy) #print("PID is: ",PIDs[i]) #print("initial number of zeros to pad: ", init_zeros) #print("final number of zeros to pad: ", n_diff) #print("**************\n") # convert to array and assure correct shape of arrays m = np.asarray(m).T c = np.asarray(c).T p = np.asarray(p).T ################################### # # # COMPUTATIONS # # # ################################### print("********************************") # compute average memory and maximum memory M = np.sum(m,axis=1) # sum all processes memory max_mem = np.max(M) # find maximum mean_mem = np.mean(M) # find mean print(f"max mem: {max_mem:.2f} MB") print(f"mean mem: {mean_mem:.2f} MB") C = np.sum(c,axis=1) # compute total cpu max_cpu = np.max(C) print(f"max cpu: {max_cpu:.2f}s") # print total wall clock time wall_clock = t[-1] print(f"Total wall clock time: {wall_clock:.2f} s") # print ratio ratio = np.max(C)/t[-1] print(f"Ratio (cpu time) / (wall clock time) : {ratio:.2f}") print("********************************") ################################### # # # PLOTTING # # # ################################### if no_log: # True if user hasn't provided logfiles # plot of total, max and mean memory fig,ax = plt.subplots(dpi=125,facecolor="white") ax.plot(t,M,'-k',label='total memory'); ax.hlines(np.mean(M),np.min(t),np.max(t),color='blue',linestyles='--',label='mean memory'); ax.hlines(np.max(M),np.min(t),np.max(t),color='red',linestyles='--',label='max memory'); ax.set_title(title) ax.set_xlabel("Time [s]") ax.set_ylabel("Memory [MB]") ax.xaxis.set_minor_locator(AutoMinorLocator()) ax.yaxis.set_minor_locator(AutoMinorLocator()) ax.legend(prop={'size': 10},loc='best') ax.grid(); # plot of total, max and mean CPU fig1,ax1 = plt.subplots(dpi=125,facecolor="white") ax1.plot(t,C,'-k',label='total cpu'); ax1.hlines(np.mean(C),np.min(t),np.max(t),color='blue',linestyles='--',label='mean cpu'); ax1.hlines(np.max(C),np.min(t),np.max(t),color='red',linestyles='--',label='max cpu'); ax1.set_title(title) ax1.set_xlabel("Time [s]") ax1.set_ylabel("CPU [s]") ax1.xaxis.set_minor_locator(AutoMinorLocator()) ax1.yaxis.set_minor_locator(AutoMinorLocator()) ax1.legend(prop={'size': 10},loc='best'); ax1.grid() plt.show(); else : # details about the PID exists (from logfiles) # # convert to pid info lists to arrays # pid_vals = np.asarray(pid_vals) # pid_names = np.asarray(pid_names) # # # be sure of the correct ordering of pids # pid_placement = np.where(pid_vals==PIDs) # plot memory fig,ax = plt.subplots(dpi=125,facecolor="white") ax.plot(t,m); # some features for the plot ax.set_title(title) ax.set_xlabel("Time [s]") ax.set_ylabel("Memory [MB]") ax.xaxis.set_minor_locator(AutoMinorLocator()) ax.yaxis.set_minor_locator(AutoMinorLocator()) ax.legend(pid_names,prop={'size': 10},loc='best') ax.grid(); # plot cpu fig1,ax1 = plt.subplots(dpi=125,facecolor="white") ax1.plot(t,c); # some features for the plot ax1.set_title(title) ax1.set_xlabel("Time [s]") ax1.set_ylabel("CPU [s]") ax1.xaxis.set_minor_locator(AutoMinorLocator()) ax1.yaxis.set_minor_locator(AutoMinorLocator()) ax1.legend(pid_names,prop={'size': 10},loc='best'); ax1.grid() plt.show();
gpl-3.0
drabastomek/practicalDataAnalysisCookbook
Codes/Chapter07/ts_detrendAndRemoveSeasonality.py
1
2625
import pandas as pd import numpy as np import matplotlib import matplotlib.pyplot as plt # change the font size matplotlib.rc('xtick', labelsize=9) matplotlib.rc('ytick', labelsize=9) matplotlib.rc('font', size=14) # time series tools import statsmodels.api as sm def period_mean(data, freq): ''' Method to calculate mean for each frequency ''' return np.array( [np.mean(data[i::freq]) for i in range(freq)]) # folder with data data_folder = '../../Data/Chapter07/' # colors colors = ['#FF6600', '#000000', '#29407C', '#660000'] # read the data riverFlows = pd.read_csv(data_folder + 'combined_flow.csv', index_col=0, parse_dates=[0]) # detrend the data detrended = sm.tsa.tsatools.detrend(riverFlows, order=1, axis=0) # create a data frame with the detrended data detrended = pd.DataFrame(detrended, index=riverFlows.index, columns=['american_flow_d', 'columbia_flow_d']) # join to the main dataset riverFlows = riverFlows.join(detrended) # calculate trend riverFlows['american_flow_t'] = riverFlows['american_flow'] \ - riverFlows['american_flow_d'] riverFlows['columbia_flow_t'] = riverFlows['columbia_flow'] \ - riverFlows['columbia_flow_d'] # number of observations and frequency of seasonal component nobs = len(riverFlows) freq = 12 # yearly seasonality # remove the seasonality for col in ['american_flow_d', 'columbia_flow_d']: period_averages = period_mean(riverFlows[col], freq) riverFlows[col[:-2]+'_s'] = np.tile(period_averages, nobs // freq + 1)[:nobs] riverFlows[col[:-2]+'_r'] = np.array(riverFlows[col]) \ - np.array(riverFlows[col[:-2]+'_s']) # save the decomposed dataset with open(data_folder + 'combined_flow_d.csv', 'w') as o: o.write(riverFlows.to_csv(ignore_index=True)) # plot the data fig, ax = plt.subplots(2, 3, sharex=True, sharey=True) # set the size of the figure explicitly fig.set_size_inches(12, 7) # plot the charts for american ax[0, 0].plot(riverFlows['american_flow_t'], colors[0]) ax[0, 1].plot(riverFlows['american_flow_s'], colors[1]) ax[0, 2].plot(riverFlows['american_flow_r'], colors[2]) # plot the charts for columbia ax[1, 0].plot(riverFlows['columbia_flow_t'], colors[0]) ax[1, 1].plot(riverFlows['columbia_flow_s'], colors[1]) ax[1, 2].plot(riverFlows['columbia_flow_r'], colors[2]) # set titles for columns ax[0, 0].set_title('Trend') ax[0, 1].set_title('Seasonality') ax[0, 2].set_title('Residuals') # set titles for rows ax[0, 0].set_ylabel('American') ax[1, 0].set_ylabel('Columbia') # save the chart plt.savefig(data_folder + 'charts/detrended.png', dpi=300)
gpl-2.0
jlegendary/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py
69
40833
""" GUI Neutral widgets All of these widgets require you to predefine an Axes instance and pass that as the first arg. matplotlib doesn't try to be too smart in layout -- you have to figure out how wide and tall you want your Axes to be to accommodate your widget. """ import numpy as np from mlab import dist from patches import Circle, Rectangle from lines import Line2D from transforms import blended_transform_factory class LockDraw: """ some widgets, like the cursor, draw onto the canvas, and this is not desirable under all circumstaces, like when the toolbar is in zoom-to-rect mode and drawing a rectangle. The module level "lock" allows someone to grab the lock and prevent other widgets from drawing. Use matplotlib.widgets.lock(someobj) to pr """ def __init__(self): self._owner = None def __call__(self, o): 'reserve the lock for o' if not self.available(o): raise ValueError('already locked') self._owner = o def release(self, o): 'release the lock' if not self.available(o): raise ValueError('you do not own this lock') self._owner = None def available(self, o): 'drawing is available to o' return not self.locked() or self.isowner(o) def isowner(self, o): 'o owns the lock' return self._owner is o def locked(self): 'the lock is held' return self._owner is not None class Widget: """ OK, I couldn't resist; abstract base class for mpl GUI neutral widgets """ drawon = True eventson = True class Button(Widget): """ A GUI neutral button The following attributes are accesible ax - the Axes the button renders into label - a text.Text instance color - the color of the button when not hovering hovercolor - the color of the button when hovering Call "on_clicked" to connect to the button """ def __init__(self, ax, label, image=None, color='0.85', hovercolor='0.95'): """ ax is the Axes instance the button will be placed into label is a string which is the button text image if not None, is an image to place in the button -- can be any legal arg to imshow (numpy array, matplotlib Image instance, or PIL image) color is the color of the button when not activated hovercolor is the color of the button when the mouse is over it """ if image is not None: ax.imshow(image) self.label = ax.text(0.5, 0.5, label, verticalalignment='center', horizontalalignment='center', transform=ax.transAxes) self.cnt = 0 self.observers = {} self.ax = ax ax.figure.canvas.mpl_connect('button_press_event', self._click) ax.figure.canvas.mpl_connect('motion_notify_event', self._motion) ax.set_navigate(False) ax.set_axis_bgcolor(color) ax.set_xticks([]) ax.set_yticks([]) self.color = color self.hovercolor = hovercolor self._lastcolor = color def _click(self, event): if event.inaxes != self.ax: return if not self.eventson: return for cid, func in self.observers.items(): func(event) def _motion(self, event): if event.inaxes==self.ax: c = self.hovercolor else: c = self.color if c != self._lastcolor: self.ax.set_axis_bgcolor(c) self._lastcolor = c if self.drawon: self.ax.figure.canvas.draw() def on_clicked(self, func): """ When the button is clicked, call this func with event A connection id is returned which can be used to disconnect """ cid = self.cnt self.observers[cid] = func self.cnt += 1 return cid def disconnect(self, cid): 'remove the observer with connection id cid' try: del self.observers[cid] except KeyError: pass class Slider(Widget): """ A slider representing a floating point range The following attributes are defined ax : the slider axes.Axes instance val : the current slider value vline : a Line2D instance representing the initial value poly : A patch.Polygon instance which is the slider valfmt : the format string for formatting the slider text label : a text.Text instance, the slider label closedmin : whether the slider is closed on the minimum closedmax : whether the slider is closed on the maximum slidermin : another slider - if not None, this slider must be > slidermin slidermax : another slider - if not None, this slider must be < slidermax dragging : allow for mouse dragging on slider Call on_changed to connect to the slider event """ def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f', closedmin=True, closedmax=True, slidermin=None, slidermax=None, dragging=True, **kwargs): """ Create a slider from valmin to valmax in axes ax; valinit - the slider initial position label - the slider label valfmt - used to format the slider value closedmin and closedmax - indicate whether the slider interval is closed slidermin and slidermax - be used to contrain the value of this slider to the values of other sliders. additional kwargs are passed on to self.poly which is the matplotlib.patches.Rectangle which draws the slider. See the matplotlib.patches.Rectangle documentation for legal property names (eg facecolor, edgecolor, alpha, ...) """ self.ax = ax self.valmin = valmin self.valmax = valmax self.val = valinit self.valinit = valinit self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs) self.vline = ax.axvline(valinit,0,1, color='r', lw=1) self.valfmt=valfmt ax.set_yticks([]) ax.set_xlim((valmin, valmax)) ax.set_xticks([]) ax.set_navigate(False) ax.figure.canvas.mpl_connect('button_press_event', self._update) if dragging: ax.figure.canvas.mpl_connect('motion_notify_event', self._update) self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes, verticalalignment='center', horizontalalignment='right') self.valtext = ax.text(1.02, 0.5, valfmt%valinit, transform=ax.transAxes, verticalalignment='center', horizontalalignment='left') self.cnt = 0 self.observers = {} self.closedmin = closedmin self.closedmax = closedmax self.slidermin = slidermin self.slidermax = slidermax def _update(self, event): 'update the slider position' if event.button !=1: return if event.inaxes != self.ax: return val = event.xdata if not self.closedmin and val<=self.valmin: return if not self.closedmax and val>=self.valmax: return if self.slidermin is not None: if val<=self.slidermin.val: return if self.slidermax is not None: if val>=self.slidermax.val: return self.set_val(val) def set_val(self, val): xy = self.poly.xy xy[-1] = val, 0 xy[-2] = val, 1 self.poly.xy = xy self.valtext.set_text(self.valfmt%val) if self.drawon: self.ax.figure.canvas.draw() self.val = val if not self.eventson: return for cid, func in self.observers.items(): func(val) def on_changed(self, func): """ When the slider valud is changed, call this func with the new slider position A connection id is returned which can be used to disconnect """ cid = self.cnt self.observers[cid] = func self.cnt += 1 return cid def disconnect(self, cid): 'remove the observer with connection id cid' try: del self.observers[cid] except KeyError: pass def reset(self): "reset the slider to the initial value if needed" if (self.val != self.valinit): self.set_val(self.valinit) class CheckButtons(Widget): """ A GUI neutral radio button The following attributes are exposed ax - the Axes instance the buttons are in labels - a list of text.Text instances lines - a list of (line1, line2) tuples for the x's in the check boxes. These lines exist for each box, but have set_visible(False) when box is not checked rectangles - a list of patch.Rectangle instances Connect to the CheckButtons with the on_clicked method """ def __init__(self, ax, labels, actives): """ Add check buttons to axes.Axes instance ax labels is a len(buttons) list of labels as strings actives is a len(buttons) list of booleans indicating whether the button is active """ ax.set_xticks([]) ax.set_yticks([]) ax.set_navigate(False) if len(labels)>1: dy = 1./(len(labels)+1) ys = np.linspace(1-dy, dy, len(labels)) else: dy = 0.25 ys = [0.5] cnt = 0 axcolor = ax.get_axis_bgcolor() self.labels = [] self.lines = [] self.rectangles = [] lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes, 'solid_capstyle':'butt'} for y, label in zip(ys, labels): t = ax.text(0.25, y, label, transform=ax.transAxes, horizontalalignment='left', verticalalignment='center') w, h = dy/2., dy/2. x, y = 0.05, y-h/2. p = Rectangle(xy=(x,y), width=w, height=h, facecolor=axcolor, transform=ax.transAxes) l1 = Line2D([x, x+w], [y+h, y], **lineparams) l2 = Line2D([x, x+w], [y, y+h], **lineparams) l1.set_visible(actives[cnt]) l2.set_visible(actives[cnt]) self.labels.append(t) self.rectangles.append(p) self.lines.append((l1,l2)) ax.add_patch(p) ax.add_line(l1) ax.add_line(l2) cnt += 1 ax.figure.canvas.mpl_connect('button_press_event', self._clicked) self.ax = ax self.cnt = 0 self.observers = {} def _clicked(self, event): if event.button !=1 : return if event.inaxes != self.ax: return for p,t,lines in zip(self.rectangles, self.labels, self.lines): if (t.get_window_extent().contains(event.x, event.y) or p.get_window_extent().contains(event.x, event.y) ): l1, l2 = lines l1.set_visible(not l1.get_visible()) l2.set_visible(not l2.get_visible()) thist = t break else: return if self.drawon: self.ax.figure.canvas.draw() if not self.eventson: return for cid, func in self.observers.items(): func(thist.get_text()) def on_clicked(self, func): """ When the button is clicked, call this func with button label A connection id is returned which can be used to disconnect """ cid = self.cnt self.observers[cid] = func self.cnt += 1 return cid def disconnect(self, cid): 'remove the observer with connection id cid' try: del self.observers[cid] except KeyError: pass class RadioButtons(Widget): """ A GUI neutral radio button The following attributes are exposed ax - the Axes instance the buttons are in activecolor - the color of the button when clicked labels - a list of text.Text instances circles - a list of patch.Circle instances Connect to the RadioButtons with the on_clicked method """ def __init__(self, ax, labels, active=0, activecolor='blue'): """ Add radio buttons to axes.Axes instance ax labels is a len(buttons) list of labels as strings active is the index into labels for the button that is active activecolor is the color of the button when clicked """ self.activecolor = activecolor ax.set_xticks([]) ax.set_yticks([]) ax.set_navigate(False) dy = 1./(len(labels)+1) ys = np.linspace(1-dy, dy, len(labels)) cnt = 0 axcolor = ax.get_axis_bgcolor() self.labels = [] self.circles = [] for y, label in zip(ys, labels): t = ax.text(0.25, y, label, transform=ax.transAxes, horizontalalignment='left', verticalalignment='center') if cnt==active: facecolor = activecolor else: facecolor = axcolor p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor, transform=ax.transAxes) self.labels.append(t) self.circles.append(p) ax.add_patch(p) cnt += 1 ax.figure.canvas.mpl_connect('button_press_event', self._clicked) self.ax = ax self.cnt = 0 self.observers = {} def _clicked(self, event): if event.button !=1 : return if event.inaxes != self.ax: return xy = self.ax.transAxes.inverted().transform_point((event.x, event.y)) pclicked = np.array([xy[0], xy[1]]) def inside(p): pcirc = np.array([p.center[0], p.center[1]]) return dist(pclicked, pcirc) < p.radius for p,t in zip(self.circles, self.labels): if t.get_window_extent().contains(event.x, event.y) or inside(p): inp = p thist = t break else: return for p in self.circles: if p==inp: color = self.activecolor else: color = self.ax.get_axis_bgcolor() p.set_facecolor(color) if self.drawon: self.ax.figure.canvas.draw() if not self.eventson: return for cid, func in self.observers.items(): func(thist.get_text()) def on_clicked(self, func): """ When the button is clicked, call this func with button label A connection id is returned which can be used to disconnect """ cid = self.cnt self.observers[cid] = func self.cnt += 1 return cid def disconnect(self, cid): 'remove the observer with connection id cid' try: del self.observers[cid] except KeyError: pass class SubplotTool(Widget): """ A tool to adjust to subplot params of fig """ def __init__(self, targetfig, toolfig): """ targetfig is the figure to adjust toolfig is the figure to embed the the subplot tool into. If None, a default pylab figure will be created. If you are using this from the GUI """ self.targetfig = targetfig toolfig.subplots_adjust(left=0.2, right=0.9) class toolbarfmt: def __init__(self, slider): self.slider = slider def __call__(self, x, y): fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt) return fmt%x self.axleft = toolfig.add_subplot(711) self.axleft.set_title('Click on slider to adjust subplot param') self.axleft.set_navigate(False) self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False) self.sliderleft.on_changed(self.funcleft) self.axbottom = toolfig.add_subplot(712) self.axbottom.set_navigate(False) self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False) self.sliderbottom.on_changed(self.funcbottom) self.axright = toolfig.add_subplot(713) self.axright.set_navigate(False) self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False) self.sliderright.on_changed(self.funcright) self.axtop = toolfig.add_subplot(714) self.axtop.set_navigate(False) self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False) self.slidertop.on_changed(self.functop) self.axwspace = toolfig.add_subplot(715) self.axwspace.set_navigate(False) self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False) self.sliderwspace.on_changed(self.funcwspace) self.axhspace = toolfig.add_subplot(716) self.axhspace.set_navigate(False) self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False) self.sliderhspace.on_changed(self.funchspace) # constraints self.sliderleft.slidermax = self.sliderright self.sliderright.slidermin = self.sliderleft self.sliderbottom.slidermax = self.slidertop self.slidertop.slidermin = self.sliderbottom bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075]) self.buttonreset = Button(bax, 'Reset') sliders = (self.sliderleft, self.sliderbottom, self.sliderright, self.slidertop, self.sliderwspace, self.sliderhspace, ) def func(event): thisdrawon = self.drawon self.drawon = False # store the drawon state of each slider bs = [] for slider in sliders: bs.append(slider.drawon) slider.drawon = False # reset the slider to the initial position for slider in sliders: slider.reset() # reset drawon for slider, b in zip(sliders, bs): slider.drawon = b # draw the canvas self.drawon = thisdrawon if self.drawon: toolfig.canvas.draw() self.targetfig.canvas.draw() # during reset there can be a temporary invalid state # depending on the order of the reset so we turn off # validation for the resetting validate = toolfig.subplotpars.validate toolfig.subplotpars.validate = False self.buttonreset.on_clicked(func) toolfig.subplotpars.validate = validate def funcleft(self, val): self.targetfig.subplots_adjust(left=val) if self.drawon: self.targetfig.canvas.draw() def funcright(self, val): self.targetfig.subplots_adjust(right=val) if self.drawon: self.targetfig.canvas.draw() def funcbottom(self, val): self.targetfig.subplots_adjust(bottom=val) if self.drawon: self.targetfig.canvas.draw() def functop(self, val): self.targetfig.subplots_adjust(top=val) if self.drawon: self.targetfig.canvas.draw() def funcwspace(self, val): self.targetfig.subplots_adjust(wspace=val) if self.drawon: self.targetfig.canvas.draw() def funchspace(self, val): self.targetfig.subplots_adjust(hspace=val) if self.drawon: self.targetfig.canvas.draw() class Cursor: """ A horizontal and vertical line span the axes that and move with the pointer. You can turn off the hline or vline spectively with the attributes horizOn =True|False: controls visibility of the horizontal line vertOn =True|False: controls visibility of the horizontal line And the visibility of the cursor itself with visible attribute """ def __init__(self, ax, useblit=False, **lineprops): """ Add a cursor to ax. If useblit=True, use the backend dependent blitting features for faster updates (GTKAgg only now). lineprops is a dictionary of line properties. See examples/widgets/cursor.py. """ self.ax = ax self.canvas = ax.figure.canvas self.canvas.mpl_connect('motion_notify_event', self.onmove) self.canvas.mpl_connect('draw_event', self.clear) self.visible = True self.horizOn = True self.vertOn = True self.useblit = useblit self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops) self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops) self.background = None self.needclear = False def clear(self, event): 'clear the cursor' if self.useblit: self.background = self.canvas.copy_from_bbox(self.ax.bbox) self.linev.set_visible(False) self.lineh.set_visible(False) def onmove(self, event): 'on mouse motion draw the cursor if visible' if event.inaxes != self.ax: self.linev.set_visible(False) self.lineh.set_visible(False) if self.needclear: self.canvas.draw() self.needclear = False return self.needclear = True if not self.visible: return self.linev.set_xdata((event.xdata, event.xdata)) self.lineh.set_ydata((event.ydata, event.ydata)) self.linev.set_visible(self.visible and self.vertOn) self.lineh.set_visible(self.visible and self.horizOn) self._update() def _update(self): if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) self.ax.draw_artist(self.linev) self.ax.draw_artist(self.lineh) self.canvas.blit(self.ax.bbox) else: self.canvas.draw_idle() return False class MultiCursor: """ Provide a vertical line cursor shared between multiple axes from matplotlib.widgets import MultiCursor from pylab import figure, show, nx t = nx.arange(0.0, 2.0, 0.01) s1 = nx.sin(2*nx.pi*t) s2 = nx.sin(4*nx.pi*t) fig = figure() ax1 = fig.add_subplot(211) ax1.plot(t, s1) ax2 = fig.add_subplot(212, sharex=ax1) ax2.plot(t, s2) multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1) show() """ def __init__(self, canvas, axes, useblit=True, **lineprops): self.canvas = canvas self.axes = axes xmin, xmax = axes[-1].get_xlim() xmid = 0.5*(xmin+xmax) self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes] self.visible = True self.useblit = useblit self.background = None self.needclear = False self.canvas.mpl_connect('motion_notify_event', self.onmove) self.canvas.mpl_connect('draw_event', self.clear) def clear(self, event): 'clear the cursor' if self.useblit: self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox) for line in self.lines: line.set_visible(False) def onmove(self, event): if event.inaxes is None: return if not self.canvas.widgetlock.available(self): return self.needclear = True if not self.visible: return for line in self.lines: line.set_xdata((event.xdata, event.xdata)) line.set_visible(self.visible) self._update() def _update(self): if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) for ax, line in zip(self.axes, self.lines): ax.draw_artist(line) self.canvas.blit(self.canvas.figure.bbox) else: self.canvas.draw_idle() class SpanSelector: """ Select a min/max range of the x or y axes for a matplotlib Axes Example usage: ax = subplot(111) ax.plot(x,y) def onselect(vmin, vmax): print vmin, vmax span = SpanSelector(ax, onselect, 'horizontal') onmove_callback is an optional callback that will be called on mouse move with the span range """ def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None): """ Create a span selector in ax. When a selection is made, clear the span and call onselect with onselect(vmin, vmax) and clear the span. direction must be 'horizontal' or 'vertical' If minspan is not None, ignore events smaller than minspan The span rect is drawn with rectprops; default rectprops = dict(facecolor='red', alpha=0.5) set the visible attribute to False if you want to turn off the functionality of the span selector """ if rectprops is None: rectprops = dict(facecolor='red', alpha=0.5) assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction' self.direction = direction self.ax = None self.canvas = None self.visible = True self.cids=[] self.rect = None self.background = None self.pressv = None self.rectprops = rectprops self.onselect = onselect self.onmove_callback = onmove_callback self.useblit = useblit self.minspan = minspan # Needed when dragging out of axes self.buttonDown = False self.prev = (0, 0) self.new_axes(ax) def new_axes(self,ax): self.ax = ax if self.canvas is not ax.figure.canvas: for cid in self.cids: self.canvas.mpl_disconnect(cid) self.canvas = ax.figure.canvas self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove)) self.cids.append(self.canvas.mpl_connect('button_press_event', self.press)) self.cids.append(self.canvas.mpl_connect('button_release_event', self.release)) self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background)) if self.direction == 'horizontal': trans = blended_transform_factory(self.ax.transData, self.ax.transAxes) w,h = 0,1 else: trans = blended_transform_factory(self.ax.transAxes, self.ax.transData) w,h = 1,0 self.rect = Rectangle( (0,0), w, h, transform=trans, visible=False, **self.rectprops ) if not self.useblit: self.ax.add_patch(self.rect) def update_background(self, event): 'force an update of the background' if self.useblit: self.background = self.canvas.copy_from_bbox(self.ax.bbox) def ignore(self, event): 'return True if event should be ignored' return event.inaxes!=self.ax or not self.visible or event.button !=1 def press(self, event): 'on button press event' if self.ignore(event): return self.buttonDown = True self.rect.set_visible(self.visible) if self.direction == 'horizontal': self.pressv = event.xdata else: self.pressv = event.ydata return False def release(self, event): 'on button release event' if self.pressv is None or (self.ignore(event) and not self.buttonDown): return self.buttonDown = False self.rect.set_visible(False) self.canvas.draw() vmin = self.pressv if self.direction == 'horizontal': vmax = event.xdata or self.prev[0] else: vmax = event.ydata or self.prev[1] if vmin>vmax: vmin, vmax = vmax, vmin span = vmax - vmin if self.minspan is not None and span<self.minspan: return self.onselect(vmin, vmax) self.pressv = None return False def update(self): 'draw using newfangled blit or oldfangled draw depending on useblit' if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) self.ax.draw_artist(self.rect) self.canvas.blit(self.ax.bbox) else: self.canvas.draw_idle() return False def onmove(self, event): 'on motion notify event' if self.pressv is None or self.ignore(event): return x, y = event.xdata, event.ydata self.prev = x, y if self.direction == 'horizontal': v = x else: v = y minv, maxv = v, self.pressv if minv>maxv: minv, maxv = maxv, minv if self.direction == 'horizontal': self.rect.set_x(minv) self.rect.set_width(maxv-minv) else: self.rect.set_y(minv) self.rect.set_height(maxv-minv) if self.onmove_callback is not None: vmin = self.pressv if self.direction == 'horizontal': vmax = event.xdata or self.prev[0] else: vmax = event.ydata or self.prev[1] if vmin>vmax: vmin, vmax = vmax, vmin self.onmove_callback(vmin, vmax) self.update() return False # For backwards compatibility only! class HorizontalSpanSelector(SpanSelector): def __init__(self, ax, onselect, **kwargs): import warnings warnings.warn('Use SpanSelector instead!', DeprecationWarning) SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs) class RectangleSelector: """ Select a min/max range of the x axes for a matplotlib Axes Example usage:: from matplotlib.widgets import RectangleSelector from pylab import * def onselect(eclick, erelease): 'eclick and erelease are matplotlib events at press and release' print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata) print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata) print ' used button : ', eclick.button def toggle_selector(event): print ' Key pressed.' if event.key in ['Q', 'q'] and toggle_selector.RS.active: print ' RectangleSelector deactivated.' toggle_selector.RS.set_active(False) if event.key in ['A', 'a'] and not toggle_selector.RS.active: print ' RectangleSelector activated.' toggle_selector.RS.set_active(True) x = arange(100)/(99.0) y = sin(x) fig = figure ax = subplot(111) ax.plot(x,y) toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line') connect('key_press_event', toggle_selector) show() """ def __init__(self, ax, onselect, drawtype='box', minspanx=None, minspany=None, useblit=False, lineprops=None, rectprops=None, spancoords='data'): """ Create a selector in ax. When a selection is made, clear the span and call onselect with onselect(pos_1, pos_2) and clear the drawn box/line. There pos_i are arrays of length 2 containing the x- and y-coordinate. If minspanx is not None then events smaller than minspanx in x direction are ignored(it's the same for y). The rect is drawn with rectprops; default rectprops = dict(facecolor='red', edgecolor = 'black', alpha=0.5, fill=False) The line is drawn with lineprops; default lineprops = dict(color='black', linestyle='-', linewidth = 2, alpha=0.5) Use type if you want the mouse to draw a line, a box or nothing between click and actual position ny setting drawtype = 'line', drawtype='box' or drawtype = 'none'. spancoords is one of 'data' or 'pixels'. If 'data', minspanx and minspanx will be interpreted in the same coordinates as the x and ya axis, if 'pixels', they are in pixels """ self.ax = ax self.visible = True self.canvas = ax.figure.canvas self.canvas.mpl_connect('motion_notify_event', self.onmove) self.canvas.mpl_connect('button_press_event', self.press) self.canvas.mpl_connect('button_release_event', self.release) self.canvas.mpl_connect('draw_event', self.update_background) self.active = True # for activation / deactivation self.to_draw = None self.background = None if drawtype == 'none': drawtype = 'line' # draw a line but make it self.visible = False # invisible if drawtype == 'box': if rectprops is None: rectprops = dict(facecolor='white', edgecolor = 'black', alpha=0.5, fill=False) self.rectprops = rectprops self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops) self.ax.add_patch(self.to_draw) if drawtype == 'line': if lineprops is None: lineprops = dict(color='black', linestyle='-', linewidth = 2, alpha=0.5) self.lineprops = lineprops self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops) self.ax.add_line(self.to_draw) self.onselect = onselect self.useblit = useblit self.minspanx = minspanx self.minspany = minspany assert(spancoords in ('data', 'pixels')) self.spancoords = spancoords self.drawtype = drawtype # will save the data (position at mouseclick) self.eventpress = None # will save the data (pos. at mouserelease) self.eventrelease = None def update_background(self, event): 'force an update of the background' if self.useblit: self.background = self.canvas.copy_from_bbox(self.ax.bbox) def ignore(self, event): 'return True if event should be ignored' # If RectangleSelector is not active : if not self.active: return True # If canvas was locked if not self.canvas.widgetlock.available(self): return True # If no button was pressed yet ignore the event if it was out # of the axes if self.eventpress == None: return event.inaxes!= self.ax # If a button was pressed, check if the release-button is the # same. return (event.inaxes!=self.ax or event.button != self.eventpress.button) def press(self, event): 'on button press event' # Is the correct button pressed within the correct axes? if self.ignore(event): return # make the drawed box/line visible get the click-coordinates, # button, ... self.to_draw.set_visible(self.visible) self.eventpress = event return False def release(self, event): 'on button release event' if self.eventpress is None or self.ignore(event): return # make the box/line invisible again self.to_draw.set_visible(False) self.canvas.draw() # release coordinates, button, ... self.eventrelease = event if self.spancoords=='data': xmin, ymin = self.eventpress.xdata, self.eventpress.ydata xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata # calculate dimensions of box or line get values in the right # order elif self.spancoords=='pixels': xmin, ymin = self.eventpress.x, self.eventpress.y xmax, ymax = self.eventrelease.x, self.eventrelease.y else: raise ValueError('spancoords must be "data" or "pixels"') if xmin>xmax: xmin, xmax = xmax, xmin if ymin>ymax: ymin, ymax = ymax, ymin spanx = xmax - xmin spany = ymax - ymin xproblems = self.minspanx is not None and spanx<self.minspanx yproblems = self.minspany is not None and spany<self.minspany if (self.drawtype=='box') and (xproblems or yproblems): """Box to small""" # check if drawed distance (if it exists) is return # not to small in neither x nor y-direction if (self.drawtype=='line') and (xproblems and yproblems): """Line to small""" # check if drawed distance (if it exists) is return # not to small in neither x nor y-direction self.onselect(self.eventpress, self.eventrelease) # call desired function self.eventpress = None # reset the variables to their self.eventrelease = None # inital values return False def update(self): 'draw using newfangled blit or oldfangled draw depending on useblit' if self.useblit: if self.background is not None: self.canvas.restore_region(self.background) self.ax.draw_artist(self.to_draw) self.canvas.blit(self.ax.bbox) else: self.canvas.draw_idle() return False def onmove(self, event): 'on motion notify event if box/line is wanted' if self.eventpress is None or self.ignore(event): return x,y = event.xdata, event.ydata # actual position (with # (button still pressed) if self.drawtype == 'box': minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y if minx>maxx: minx, maxx = maxx, minx # get them in the right order if miny>maxy: miny, maxy = maxy, miny self.to_draw.set_x(minx) # set lower left of box self.to_draw.set_y(miny) self.to_draw.set_width(maxx-minx) # set width and height of box self.to_draw.set_height(maxy-miny) self.update() return False if self.drawtype == 'line': self.to_draw.set_data([self.eventpress.xdata, x], [self.eventpress.ydata, y]) self.update() return False def set_active(self, active): """ Use this to activate / deactivate the RectangleSelector from your program with an boolean variable 'active'. """ self.active = active def get_active(self): """ to get status of active mode (boolean variable)""" return self.active class Lasso(Widget): def __init__(self, ax, xy, callback=None, useblit=True): self.axes = ax self.figure = ax.figure self.canvas = self.figure.canvas self.useblit = useblit if useblit: self.background = self.canvas.copy_from_bbox(self.axes.bbox) x, y = xy self.verts = [(x,y)] self.line = Line2D([x], [y], linestyle='-', color='black', lw=2) self.axes.add_line(self.line) self.callback = callback self.cids = [] self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease)) self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove)) def onrelease(self, event): if self.verts is not None: self.verts.append((event.xdata, event.ydata)) if len(self.verts)>2: self.callback(self.verts) self.axes.lines.remove(self.line) self.verts = None for cid in self.cids: self.canvas.mpl_disconnect(cid) def onmove(self, event): if self.verts is None: return if event.inaxes != self.axes: return if event.button!=1: return self.verts.append((event.xdata, event.ydata)) self.line.set_data(zip(*self.verts)) if self.useblit: self.canvas.restore_region(self.background) self.axes.draw_artist(self.line) self.canvas.blit(self.axes.bbox) else: self.canvas.draw_idle()
gpl-3.0
murrayrm/python-control
examples/pvtol-nested.py
2
4551
# pvtol-nested.py - inner/outer design for vectored thrust aircraft # RMM, 5 Sep 09 # # This file works through a fairly complicated control design and # analysis, corresponding to the planar vertical takeoff and landing # (PVTOL) aircraft in Astrom and Murray, Chapter 11. It is intended # to demonstrate the basic functionality of the python-control # package. # from __future__ import print_function import os import matplotlib.pyplot as plt # MATLAB plotting functions from control.matlab import * # MATLAB-like functions import numpy as np # System parameters m = 4 # mass of aircraft J = 0.0475 # inertia around pitch axis r = 0.25 # distance to center of force g = 9.8 # gravitational constant c = 0.05 # damping factor (estimated) # Transfer functions for dynamics Pi = tf([r], [J, 0, 0]) # inner loop (roll) Po = tf([1], [m, c, 0]) # outer loop (position) # # Inner loop control design # # This is the controller for the pitch dynamics. Goal is to have # fast response for the pitch dynamics so that we can use this as a # control for the lateral dynamics # # Design a simple lead controller for the system k, a, b = 200, 2, 50 Ci = k*tf([1, a], [1, b]) # lead compensator Li = Pi*Ci # Bode plot for the open loop process plt.figure(1) bode(Pi) # Bode plot for the loop transfer function, with margins plt.figure(2) bode(Li) # Compute out the gain and phase margins #! Not implemented # gm, pm, wcg, wcp = margin(Li) # Compute the sensitivity and complementary sensitivity functions Si = feedback(1, Li) Ti = Li*Si # Check to make sure that the specification is met plt.figure(3) gangof4(Pi, Ci) # Compute out the actual transfer function from u1 to v1 (see L8.2 notes) # Hi = Ci*(1-m*g*Pi)/(1+Ci*Pi) Hi = parallel(feedback(Ci, Pi), -m*g*feedback(Ci*Pi, 1)) plt.figure(4) plt.clf() plt.subplot(221) bode(Hi) # Now design the lateral control system a, b, K = 0.02, 5, 2 Co = -K*tf([1, 0.3], [1, 10]) # another lead compensator Lo = -m*g*Po*Co plt.figure(5) bode(Lo) # margin(Lo) # Finally compute the real outer-loop loop gain + responses L = Co*Hi*Po S = feedback(1, L) T = feedback(L, 1) # Compute stability margins gm, pm, wgc, wpc = margin(L) print("Gain margin: %g at %g" % (gm, wgc)) print("Phase margin: %g at %g" % (pm, wpc)) plt.figure(6) plt.clf() bode(L, np.logspace(-4, 3)) # Add crossover line to the magnitude plot # # Note: in matplotlib before v2.1, the following code worked: # # plt.subplot(211); hold(True); # loglog([1e-4, 1e3], [1, 1], 'k-') # # In later versions of matplotlib the call to plt.subplot will clear the # axes and so we have to extract the axes that we want to use by hand. # In addition, hold() is deprecated so we no longer require it. # for ax in plt.gcf().axes: if ax.get_label() == 'control-bode-magnitude': break ax.semilogx([1e-4, 1e3], 20*np.log10([1, 1]), 'k-') # # Replot phase starting at -90 degrees # # Get the phase plot axes for ax in plt.gcf().axes: if ax.get_label() == 'control-bode-phase': break # Recreate the frequency response and shift the phase mag, phase, w = freqresp(L, np.logspace(-4, 3)) phase = phase - 360 # Replot the phase by hand ax.semilogx([1e-4, 1e3], [-180, -180], 'k-') ax.semilogx(w, np.squeeze(phase), 'b-') ax.axis([1e-4, 1e3, -360, 0]) plt.xlabel('Frequency [deg]') plt.ylabel('Phase [deg]') # plt.set(gca, 'YTick', [-360, -270, -180, -90, 0]) # plt.set(gca, 'XTick', [10^-4, 10^-2, 1, 100]) # # Nyquist plot for complete design # plt.figure(7) plt.clf() nyquist(L, (0.0001, 1000)) # Add a box in the region we are going to expand plt.plot([-2, -2, 1, 1, -2], [-4, 4, 4, -4, -4], 'r-') # Expanded region plt.figure(8) plt.clf() nyquist(L) plt.axis([-2, 1, -4, 4]) # set up the color color = 'b' # Add arrows to the plot # H1 = L.evalfr(0.4); H2 = L.evalfr(0.41); # arrow([real(H1), imag(H1)], [real(H2), imag(H2)], AM_normal_arrowsize, \ # 'EdgeColor', color, 'FaceColor', color); # H1 = freqresp(L, 0.35); H2 = freqresp(L, 0.36); # arrow([real(H2), -imag(H2)], [real(H1), -imag(H1)], AM_normal_arrowsize, \ # 'EdgeColor', color, 'FaceColor', color); plt.figure(9) Yvec, Tvec = step(T, np.linspace(0, 20)) plt.plot(Tvec.T, Yvec.T) Yvec, Tvec = step(Co*S, np.linspace(0, 20)) plt.plot(Tvec.T, Yvec.T) plt.figure(10) plt.clf() P, Z = pzmap(T, plot=True, grid=True) print("Closed loop poles and zeros: ", P, Z) # Gang of Four plt.figure(11) plt.clf() gangof4(Hi*Po, Co) if 'PYCONTROL_TEST_EXAMPLES' not in os.environ: plt.show()
bsd-3-clause
rc/sfepy
script/plot_mesh.py
4
4164
#!/usr/bin/env python """ Plot mesh connectivities, facet orientations, global and local DOF ids etc. To switch off plotting some mesh entities, set the corresponding color to `None`. """ from __future__ import absolute_import import sys sys.path.append('.') from argparse import ArgumentParser import matplotlib.pyplot as plt from sfepy.base.base import output from sfepy.base.conf import dict_from_string from sfepy.discrete.fem import Mesh, FEDomain import sfepy.postprocess.plot_cmesh as pc helps = { 'vertex_opts' : 'plotting options for mesh vertices' ' [default: %(default)s]', 'edge_opts' : 'plotting options for mesh edges' ' [default: %(default)s]', 'face_opts' : 'plotting options for mesh faces' ' [default: %(default)s]', 'cell_opts' : 'plotting options for mesh cells' ' [default: %(default)s]', 'wireframe_opts' : 'plotting options for mesh wireframe' ' [default: %(default)s]', 'no_axes' : 'do not show the figure axes', 'no_show' : 'do not show the mesh plot figure', } def main(): default_vertex_opts = """color='k', label_global=12, label_local=8""" default_edge_opts = """color='b', label_global=12, label_local=8""" default_face_opts = """color='g', label_global=12, label_local=8""" default_cell_opts = """color='r', label_global=12""" default_wireframe_opts = "color='k'" parser = ArgumentParser(description=__doc__) parser.add_argument('--version', action='version', version='%(prog)s') parser.add_argument('--vertex-opts', metavar='dict-like', action='store', dest='vertex_opts', default=default_vertex_opts, help=helps['vertex_opts']) parser.add_argument('--edge-opts', metavar='dict-like', action='store', dest='edge_opts', default=default_edge_opts, help=helps['edge_opts']) parser.add_argument('--face-opts', metavar='dict-like', action='store', dest='face_opts', default=default_face_opts, help=helps['face_opts']) parser.add_argument('--cell-opts', metavar='dict-like', action='store', dest='cell_opts', default=default_cell_opts, help=helps['cell_opts']) parser.add_argument('--wireframe-opts', metavar='dict-like', action='store', dest='wireframe_opts', default=default_wireframe_opts, help=helps['wireframe_opts']) parser.add_argument('--no-axes', action='store_false', dest='axes', help=helps['no_axes']) parser.add_argument('-n', '--no-show', action='store_false', dest='show', help=helps['no_show']) parser.add_argument('filename') parser.add_argument('figname', nargs='?') options = parser.parse_args() entities_opts = [ dict_from_string(options.vertex_opts), dict_from_string(options.edge_opts), dict_from_string(options.face_opts), dict_from_string(options.cell_opts), ] wireframe_opts = dict_from_string(options.wireframe_opts) filename = options.filename mesh = Mesh.from_file(filename) output('Mesh:') output(' dimension: %d, vertices: %d, elements: %d' % (mesh.dim, mesh.n_nod, mesh.n_el)) domain = FEDomain('domain', mesh) output(domain.cmesh) domain.cmesh.cprint(1) dim = domain.cmesh.dim if dim == 2: entities_opts.pop(2) ax = pc.plot_cmesh(None, domain.cmesh, wireframe_opts=wireframe_opts, entities_opts=entities_opts) ax.axis('image') if not options.axes: ax.axis('off') plt.tight_layout() if options.figname: fig = ax.figure fig.savefig(options.figname, bbox_inches='tight') if options.show: plt.show() if __name__ == '__main__': main()
bsd-3-clause
adamginsburg/APEX_CMZ_H2CO
plot_codes/tmap_figure.py
2
12670
import pylab as pl import numpy as np import aplpy import os import copy from astropy import log from paths import h2copath, figurepath import paths import matplotlib from scipy import stats as ss from astropy.io import fits matplotlib.rc_file(paths.pcpath('pubfiguresrc')) pl.ioff() # Close these figures so we can remake them in the appropriate size for fignum in (4,5,6,7): pl.close(fignum) cmap = pl.cm.RdYlBu_r figsize = (20,10) small_recen = dict(x=0.3, y=-0.03,width=1.05,height=0.27) big_recen = dict(x=0.55, y=-0.075,width=2.3,height=0.40) sgrb2x = [000.6773, 0.6578, 0.6672] sgrb2y = [-00.0290, -00.0418, -00.0364] vmin=10 vmax = 200 dustcolumn = '/Users/adam/work/gc/gcmosaic_column_conv36.fits' # most of these come from make_ratiotem_cubesims toloop = zip(( 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens3e4.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens3e4.fits', 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4.fits', 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_abund1e-8.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_abund1e-8.fits', 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_abund1e-10.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_abund1e-10.fits', 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e5.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e5.fits', 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_masked.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_masked.fits', 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens3e4_masked.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens3e4_masked.fits', 'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e5_masked.fits', 'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e5_masked.fits', 'TemperatureCube_DendrogramObjects{0}_leaves_integ.fits', 'TemperatureCube_DendrogramObjects{0}_leaves_integ_weighted.fits', 'TemperatureCube_DendrogramObjects{0}_integ.fits', 'TemperatureCube_DendrogramObjects{0}_integ_weighted.fits'), ('dens3e4', 'dens3e4_weighted', 'dens1e4', 'dens1e4_weighted', 'dens1e4_abund1e-8', 'dens1e4_abund1e-8_weighted', 'dens1e4_abund1e-10', 'dens1e4_abund1e-10_weighted', 'dens1e5', 'dens1e5_weighted', 'dens1e4_masked','dens1e4_weighted_masked', 'dens3e4_masked','dens3e4_weighted_masked', 'dens1e5_masked','dens1e5_weighted_masked', 'dendro_leaf','dendro_leaf_weighted', 'dendro','dendro_weighted')) #for vmax,vmax_str in zip((100,200),("_vmax100","")): for vmax,vmax_str in zip((200,),("",)): for ftemplate,outtype in toloop: for smooth in ("","_smooth",):#"_vsmooth"): log.info(ftemplate.format(smooth)+" "+outtype) fig = pl.figure(4, figsize=figsize) fig.clf() F = aplpy.FITSFigure(h2copath+ftemplate.format(smooth), convention='calabretta', figure=fig) cm = copy.copy(cmap) cm.set_bad((0.5,)*3) F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax) F.set_tick_labels_format('d.dd','d.dd') F.recenter(**small_recen) peaksn = os.path.join(h2copath,'APEX_H2CO_303_202{0}_bl_mask_integ.fits'.format(smooth)) #F.show_contour(peaksn, levels=[4,7,11,20,38], colors=[(0.25,0.25,0.25,0.5)]*5, #smooth=3, # linewidths=[1.0]*5, # zorder=10, convention='calabretta') #color = (0.25,)*3 #F.show_contour(peaksn, levels=[4,7,11,20,38], colors=[color + (alpha,) for alpha in (0.9,0.6,0.3,0.1,0.0)], #smooth=3, # filled=True, # #linewidths=[1.0]*5, # zorder=10, convention='calabretta') color = (0.5,)*3 # should be same as background #888 F.show_contour(peaksn, levels=[-1,0]+np.logspace(0.20,2).tolist(), colors=[(0.5,0.5,0.5,1)]*2 + [color + (alpha,) for alpha in np.exp(-(np.logspace(0.20,2)-1.7)**2/(2.5**2*2.))], #smooth=3, filled=True, #linewidths=[1.0]*5, layer='mask', zorder=10, convention='calabretta', rasterized=True) F.add_colorbar() F.colorbar.set_axis_label_text('T (K)') F.colorbar.set_axis_label_font(size=18) F.colorbar.set_label_properties(size=16) F.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250, edgecolor='k', alpha=0.9) F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str))) F.recenter(**big_recen) F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str))) log.info(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str))) F.show_contour(dustcolumn, levels=[5], colors=[(0,0,0,0.5)], zorder=15, alpha=0.5, linewidths=[0.5], layer='dustcontour') F.recenter(**small_recen) F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str))) F.recenter(**big_recen) F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str))) log.info(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str))) F.hide_layer('mask') F.recenter(**small_recen) F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_nomask_withcontours.pdf'.format(smooth, outtype, vmax_str))) F.recenter(**big_recen) F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_nomask_withcontours.pdf'.format(smooth, outtype, vmax_str))) fig7 = pl.figure(7, figsize=figsize) fig7.clf() Fsn = aplpy.FITSFigure(peaksn, convention='calabretta', figure=fig7) Fsn.show_grayscale(vmin=0, vmax=10, stretch='linear', invert=True) Fsn.add_colorbar() Fsn.colorbar.set_axis_label_text('Peak S/N') Fsn.colorbar.set_axis_label_font(size=18) Fsn.colorbar.set_label_properties(size=16) Fsn.set_tick_labels_format('d.dd','d.dd') Fsn.recenter(**big_recen) Fsn.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_peaksn.pdf'.format(smooth, outtype, vmax_str))) F.hide_layer('dustcontour') dusttemperature = '/Users/adam/work/gc/gcmosaic_temp_conv36.fits' F.show_contour(dusttemperature, levels=[20,25], colors=[(0,0,x,0.5) for x in [0.9,0.7,0.6,0.2]], zorder=20) F.recenter(**small_recen) F.save(os.path.join(figurepath, "big_maps",'lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str))) F.recenter(**big_recen) F.save(os.path.join(figurepath, "big_maps",'big_lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str))) log.info(os.path.join(figurepath, "big_maps",'big_lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str))) im = fits.getdata(h2copath+ftemplate.format(smooth)) data = im[np.isfinite(im)] fig9 = pl.figure(9) fig9.clf() ax9 = fig9.gca() h,l,p = ax9.hist(data, bins=np.linspace(0,300), alpha=0.5) shape, loc, scale = ss.lognorm.fit(data, floc=0) # from http://nbviewer.ipython.org/url/xweb.geos.ed.ac.uk/~jsteven5/blog/lognormal_distributions.ipynb mu = np.log(scale) # Mean of log(X) [but I want mean(x)] sigma = shape # Standard deviation of log(X) M = np.exp(mu) # Geometric mean == median s = np.exp(sigma) # Geometric standard deviation lnf = ss.lognorm(s=shape, loc=loc, scale=scale) pdf = lnf.pdf(np.arange(300)) label1 = ("$\sigma_{{\mathrm{{ln}} x}} = {0:0.2f}$\n" "$\mu_x = {1:0.2f}$\n" "$\sigma_x = {2:0.2f}$".format(sigma, scale,s)) pm = np.abs(ss.lognorm.interval(0.683, s=shape, loc=0, scale=scale) - scale) label2 = ("$x = {0:0.1f}^{{+{1:0.1f}}}_{{-{2:0.1f}}}$\n" "$\sigma_{{\mathrm{{ln}} x}} = {3:0.1f}$\n" .format(scale, pm[1], pm[0], sigma, )) ax9.plot(np.arange(300), pdf*h.max()/pdf.max(), linewidth=4, alpha=0.5, label=label2) ax9.legend(loc='best') ax9.set_xlim(0,300) fig9.savefig(os.path.join(figurepath, "big_maps", 'histogram_{0}{1}{2}_tmap.pdf'.format(smooth, outtype, vmax_str)), bbox_inches='tight') #F.show_contour('h2co218222_all.fits', levels=[1,7,11,20,38], colors=['g']*5, smooth=1, zorder=5) #F.show_contour(datapath+'APEX_H2CO_merge_high_smooth_noise.fits', levels=[0.05,0.1], colors=['#0000FF']*2, zorder=3, convention='calabretta') #F.show_contour(datapath+'APEX_H2CO_merge_high_nhits.fits', levels=[9], colors=['#0000FF']*2, zorder=3, convention='calabretta',smooth=3) #F.show_regions('2014_expansion_targets_simpler.reg') #F.save('CMZ_H2CO_observed_planned.pdf') #F.show_rgb(background, wcs=wcs) #F.save('CMZ_H2CO_observed_planned_colorful.pdf') fig = pl.figure(5, figsize=figsize) fig.clf() F2 = aplpy.FITSFigure(dusttemperature, convention='calabretta', figure=fig) F2.show_colorscale(cmap=pl.cm.hot, vmin=10, vmax=40) F2.add_colorbar() F2.show_contour(h2copath+'H2CO_321220_to_303202_smooth_bl_integ_temperature.fits', convention='calabretta', levels=[30,75,100,150], cmap=pl.cm.BuGn) F2.recenter(**small_recen) F2.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250, edgecolor='k', alpha=0.9) F2.save(os.path.join(figurepath, "big_maps",'H2COtemperatureOnDust.pdf')) F2.recenter(**big_recen) F2.save(os.path.join(figurepath, "big_maps",'big_H2COtemperatureOnDust.pdf')) for vmax in (100,200): fig = pl.figure(6, figsize=figsize) fig.clf() F = aplpy.FITSFigure('/Users/adam/work/gc/Tkin-GC.fits.gz', convention='calabretta', figure=fig) cm = copy.copy(cmap) cm.set_bad((0.5,)*3) F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax) F.set_tick_labels_format('d.dd','d.dd') F.recenter(**small_recen) F.add_colorbar() F.colorbar.set_axis_label_text('T (K)') F.colorbar.set_axis_label_font(size=18) F.colorbar.set_label_properties(size=16) F.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250, edgecolor='k', alpha=0.9) F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to{0}.pdf'.format(vmax))) F.show_colorscale(cmap=cm,vmin=vmin,vmax=80) F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to80.pdf')) F.show_contour(dustcolumn, levels=[5], colors=[(0,0,0,0.5)], zorder=15, alpha=0.5, linewidths=[0.5], layer='dustcontour') F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to80_withcontours.pdf')) F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax) F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to{0}_withcontours.pdf'.format(vmax)))
bsd-3-clause
drammock/mne-python
mne/viz/backends/_abstract.py
4
24939
"""ABCs.""" # Authors: Guillaume Favelier <guillaume.favelier@gmail.com # Eric Larson <larson.eric.d@gmail.com> # # License: Simplified BSD from abc import ABC, abstractmethod, abstractclassmethod from contextlib import nullcontext import warnings from ..utils import tight_layout class _AbstractRenderer(ABC): @abstractclassmethod def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.), name=None, show=False, shape=(1, 1)): """Set up the scene.""" pass @abstractclassmethod def subplot(self, x, y): """Set the active subplot.""" pass @abstractclassmethod def scene(self): """Return scene handle.""" pass @abstractclassmethod def set_interaction(self, interaction): """Set interaction mode.""" pass @abstractclassmethod def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False, backface_culling=False, scalars=None, colormap=None, vmin=None, vmax=None, interpolate_before_map=True, representation='surface', line_width=1., normals=None, polygon_offset=None, **kwargs): """Add a mesh in the scene. Parameters ---------- x : array, shape (n_vertices,) The array containing the X component of the vertices. y : array, shape (n_vertices,) The array containing the Y component of the vertices. z : array, shape (n_vertices,) The array containing the Z component of the vertices. triangles : array, shape (n_polygons, 3) The array containing the indices of the polygons. color : tuple | str The color of the mesh as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). opacity : float The opacity of the mesh. shading : bool If True, enable the mesh shading. backface_culling : bool If True, enable backface culling on the mesh. scalars : ndarray, shape (n_vertices,) The scalar valued associated to the vertices. vmin : float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax : float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap : The colormap to use. interpolate_before_map : Enabling makes for a smoother scalars display. Default is True. When False, OpenGL will interpolate the mapped colors which can result is showing colors that are not present in the color map. representation : str The representation of the mesh: either 'surface' or 'wireframe'. line_width : int The width of the lines when representation='wireframe'. normals : array, shape (n_vertices, 3) The array containing the normal of each vertex. polygon_offset : float If not None, the factor used to resolve coincident topology. kwargs : args The arguments to pass to triangular_mesh Returns ------- surface : Handle of the mesh in the scene. """ pass @abstractclassmethod def contour(self, surface, scalars, contours, width=1.0, opacity=1.0, vmin=None, vmax=None, colormap=None, normalized_colormap=False, kind='line', color=None): """Add a contour in the scene. Parameters ---------- surface : surface object The mesh to use as support for contour. scalars : ndarray, shape (n_vertices,) The scalar valued associated to the vertices. contours : int | list Specifying a list of values will only give the requested contours. width : float The width of the lines or radius of the tubes. opacity : float The opacity of the contour. vmin : float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax : float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap : The colormap to use. normalized_colormap : bool Specify if the values of the colormap are between 0 and 1. kind : 'line' | 'tube' The type of the primitives to use to display the contours. color : The color of the mesh as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). """ pass @abstractclassmethod def surface(self, surface, color=None, opacity=1.0, vmin=None, vmax=None, colormap=None, normalized_colormap=False, scalars=None, backface_culling=False, polygon_offset=None): """Add a surface in the scene. Parameters ---------- surface : surface object The information describing the surface. color : tuple | str The color of the surface as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). opacity : float The opacity of the surface. vmin : float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax : float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap : The colormap to use. scalars : ndarray, shape (n_vertices,) The scalar valued associated to the vertices. backface_culling : bool If True, enable backface culling on the surface. polygon_offset : float If not None, the factor used to resolve coincident topology. """ pass @abstractclassmethod def sphere(self, center, color, scale, opacity=1.0, resolution=8, backface_culling=False, radius=None): """Add sphere in the scene. Parameters ---------- center : ndarray, shape(n_center, 3) The list of centers to use for the sphere(s). color : tuple | str The color of the sphere as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). scale : float The scaling applied to the spheres. The given value specifies the maximum size in drawing units. opacity : float The opacity of the sphere(s). resolution : int The resolution of the sphere created. This is the number of divisions along theta and phi. backface_culling : bool If True, enable backface culling on the sphere(s). radius : float | None Replace the glyph scaling by a fixed radius value for each sphere (not supported by mayavi). """ pass @abstractclassmethod def tube(self, origin, destination, radius=0.001, color='white', scalars=None, vmin=None, vmax=None, colormap='RdBu', normalized_colormap=False, reverse_lut=False): """Add tube in the scene. Parameters ---------- origin : array, shape(n_lines, 3) The coordinates of the first end of the tube(s). destination : array, shape(n_lines, 3) The coordinates of the other end of the tube(s). radius : float The radius of the tube(s). color : tuple | str The color of the tube as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). scalars : array, shape (n_quivers,) | None The optional scalar data to use. vmin : float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax : float | None vmax is used to scale the colormap. If None, the max of the data will be used colormap : The colormap to use. opacity : float The opacity of the tube(s). backface_culling : bool If True, enable backface culling on the tube(s). reverse_lut : bool If True, reverse the lookup table. Returns ------- surface : Handle of the tube in the scene. """ pass @abstractclassmethod def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8, glyph_height=None, glyph_center=None, glyph_resolution=None, opacity=1.0, scale_mode='none', scalars=None, backface_culling=False, colormap=None, vmin=None, vmax=None, line_width=2., name=None): """Add quiver3d in the scene. Parameters ---------- x : array, shape (n_quivers,) The X component of the position of the quiver. y : array, shape (n_quivers,) The Y component of the position of the quiver. z : array, shape (n_quivers,) The Z component of the position of the quiver. u : array, shape (n_quivers,) The last X component of the quiver. v : array, shape (n_quivers,) The last Y component of the quiver. w : array, shape (n_quivers,) The last Z component of the quiver. color : tuple | str The color of the quiver as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). scale : float The scaling applied to the glyphs. The size of the glyph is by default calculated from the inter-glyph spacing. The given value specifies the maximum glyph size in drawing units. mode : 'arrow', 'cone' or 'cylinder' The type of the quiver. resolution : int The resolution of the glyph created. Depending on the type of glyph, it represents the number of divisions in its geometric representation. glyph_height : float The height of the glyph used with the quiver. glyph_center : tuple The center of the glyph used with the quiver: (x, y, z). glyph_resolution : float The resolution of the glyph used with the quiver. opacity : float The opacity of the quiver. scale_mode : 'vector', 'scalar' or 'none' The scaling mode for the glyph. scalars : array, shape (n_quivers,) | None The optional scalar data to use. backface_culling : bool If True, enable backface culling on the quiver. colormap : The colormap to use. vmin : float | None vmin is used to scale the colormap. If None, the min of the data will be used vmax : float | None vmax is used to scale the colormap. If None, the max of the data will be used line_width : float The width of the 2d arrows. """ pass @abstractclassmethod def text2d(self, x_window, y_window, text, size=14, color='white'): """Add 2d text in the scene. Parameters ---------- x : float The X component to use as position of the text in the window coordinates system (window_width, window_height). y : float The Y component to use as position of the text in the window coordinates system (window_width, window_height). text : str The content of the text. size : int The size of the font. color : tuple | str The color of the text as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). """ pass @abstractclassmethod def text3d(self, x, y, z, text, width, color='white'): """Add 2d text in the scene. Parameters ---------- x : float The X component to use as position of the text. y : float The Y component to use as position of the text. z : float The Z component to use as position of the text. text : str The content of the text. width : float The width of the text. color : tuple | str The color of the text as a tuple (red, green, blue) of float values between 0 and 1 or a valid color name (i.e. 'white' or 'w'). """ pass @abstractclassmethod def scalarbar(self, source, color="white", title=None, n_labels=4, bgcolor=None): """Add a scalar bar in the scene. Parameters ---------- source : The object of the scene used for the colormap. color : The color of the label text. title : str | None The title of the scalar bar. n_labels : int | None The number of labels to display on the scalar bar. bgcolor : The color of the background when there is transparency. """ pass @abstractclassmethod def show(self): """Render the scene.""" pass @abstractclassmethod def close(self): """Close the scene.""" pass @abstractclassmethod def set_camera(self, azimuth=None, elevation=None, distance=None, focalpoint=None, roll=None, reset_camera=True): """Configure the camera of the scene. Parameters ---------- azimuth : float The azimuthal angle of the camera. elevation : float The zenith angle of the camera. distance : float The distance to the focal point. focalpoint : tuple The focal point of the camera: (x, y, z). roll : float The rotation of the camera along its axis. reset_camera : bool If True, reset the camera properties beforehand. """ pass @abstractclassmethod def reset_camera(self): """Reset the camera properties.""" pass @abstractclassmethod def screenshot(self, mode='rgb', filename=None): """Take a screenshot of the scene. Parameters ---------- mode : str Either 'rgb' or 'rgba' for values to return. Default is 'rgb'. filename : str | None If not None, save the figure to the disk. """ pass @abstractclassmethod def project(self, xyz, ch_names): """Convert 3d points to a 2d perspective. Parameters ---------- xyz : array, shape(n_points, 3) The points to project. ch_names : array, shape(_n_points,) Names of the channels. """ pass @abstractclassmethod def enable_depth_peeling(self): """Enable depth peeling.""" pass @abstractclassmethod def remove_mesh(self, mesh_data): """Remove the given mesh from the scene. Parameters ---------- mesh_data : tuple | Surface The mesh to remove. """ pass class _AbstractToolBar(ABC): @abstractmethod def _tool_bar_load_icons(self): pass @abstractmethod def _tool_bar_initialize(self, name="default", window=None): pass @abstractmethod def _tool_bar_add_button(self, name, desc, func, icon_name=None, shortcut=None): pass @abstractmethod def _tool_bar_update_button_icon(self, name, icon_name): pass @abstractmethod def _tool_bar_add_text(self, name, value, placeholder): pass @abstractmethod def _tool_bar_add_spacer(self): pass @abstractmethod def _tool_bar_add_file_button(self, name, desc, func, shortcut=None): pass @abstractmethod def _tool_bar_add_play_button(self, name, desc, func, shortcut=None): pass @abstractmethod def _tool_bar_set_theme(self, theme): pass class _AbstractDock(ABC): @abstractmethod def _dock_initialize(self, window=None): pass @abstractmethod def _dock_finalize(self): pass @abstractmethod def _dock_show(self): pass @abstractmethod def _dock_hide(self): pass @abstractmethod def _dock_add_stretch(self, layout): pass @abstractmethod def _dock_add_layout(self, vertical=True): pass @abstractmethod def _dock_add_label(self, value, align=False, layout=None): pass @abstractmethod def _dock_add_button(self, name, callback, layout=None): pass @abstractmethod def _dock_named_layout(self, name, layout, compact): pass @abstractmethod def _dock_add_slider(self, name, value, rng, callback, compact=True, double=False, layout=None): pass @abstractmethod def _dock_add_spin_box(self, name, value, rng, callback, compact=True, double=True, layout=None): pass @abstractmethod def _dock_add_combo_box(self, name, value, rng, callback, compact=True, layout=None): pass @abstractmethod def _dock_add_group_box(self, name, layout=None): pass class _AbstractMenuBar(ABC): @abstractmethod def _menu_initialize(self, window=None): pass @abstractmethod def _menu_add_submenu(self, name, desc): pass @abstractmethod def _menu_add_button(self, menu_name, name, desc, func): pass class _AbstractStatusBar(ABC): @abstractmethod def _status_bar_initialize(self, window=None): pass @abstractmethod def _status_bar_add_label(self, value, stretch=0): pass @abstractmethod def _status_bar_add_progress_bar(self, stretch=0): pass @abstractmethod def _status_bar_update(self): pass class _AbstractPlayback(ABC): @abstractmethod def _playback_initialize(self, func, timeout, value, rng, time_widget, play_widget): pass class _AbstractLayout(ABC): @abstractmethod def _layout_initialize(self, max_width): pass @abstractmethod def _layout_add_widget(self, layout, widget, stretch=0): pass class _AbstractWidget(ABC): def __init__(self, widget): self._widget = widget @property def widget(self): return self._widget @abstractmethod def set_value(self, value): pass @abstractmethod def get_value(self): pass @abstractmethod def set_range(self, rng): pass @abstractmethod def show(self): pass @abstractmethod def hide(self): pass @abstractmethod def update(self, repaint=True): pass class _AbstractMplInterface(ABC): @abstractmethod def _mpl_initialize(): pass class _AbstractMplCanvas(ABC): def __init__(self, width, height, dpi): """Initialize the MplCanvas.""" from matplotlib import rc_context from matplotlib.figure import Figure # prefer constrained layout here but live with tight_layout otherwise context = nullcontext self._extra_events = ('resize',) try: context = rc_context({'figure.constrained_layout.use': True}) self._extra_events = () except KeyError: pass with context: self.fig = Figure(figsize=(width, height), dpi=dpi) self.axes = self.fig.add_subplot(111) self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)') self.manager = None def _connect(self): for event in ('button_press', 'motion_notify') + self._extra_events: self.canvas.mpl_connect( event + '_event', getattr(self, 'on_' + event)) def plot(self, x, y, label, update=True, **kwargs): """Plot a curve.""" line, = self.axes.plot( x, y, label=label, **kwargs) if update: self.update_plot() return line def plot_time_line(self, x, label, update=True, **kwargs): """Plot the vertical line.""" line = self.axes.axvline(x, label=label, **kwargs) if update: self.update_plot() return line def update_plot(self): """Update the plot.""" with warnings.catch_warnings(record=True): warnings.filterwarnings('ignore', 'constrained_layout') self.canvas.draw() def set_color(self, bg_color, fg_color): """Set the widget colors.""" self.axes.set_facecolor(bg_color) self.axes.xaxis.label.set_color(fg_color) self.axes.yaxis.label.set_color(fg_color) self.axes.spines['top'].set_color(fg_color) self.axes.spines['bottom'].set_color(fg_color) self.axes.spines['left'].set_color(fg_color) self.axes.spines['right'].set_color(fg_color) self.axes.tick_params(axis='x', colors=fg_color) self.axes.tick_params(axis='y', colors=fg_color) self.fig.patch.set_facecolor(bg_color) def show(self): """Show the canvas.""" if self.manager is None: self.canvas.show() else: self.manager.show() def close(self): """Close the canvas.""" self.canvas.close() def clear(self): """Clear internal variables.""" self.close() self.axes.clear() self.fig.clear() self.canvas = None self.manager = None def on_resize(self, event): """Handle resize events.""" tight_layout(fig=self.axes.figure) class _AbstractBrainMplCanvas(_AbstractMplCanvas): def __init__(self, brain, width, height, dpi): """Initialize the MplCanvas.""" super().__init__(width, height, dpi) self.brain = brain self.time_func = brain.callbacks["time"] def update_plot(self): """Update the plot.""" leg = self.axes.legend( prop={'family': 'monospace', 'size': 'small'}, framealpha=0.5, handlelength=1., facecolor=self.brain._bg_color) for text in leg.get_texts(): text.set_color(self.brain._fg_color) super().update_plot() def on_button_press(self, event): """Handle button presses.""" # left click (and maybe drag) in progress in axes if (event.inaxes != self.axes or event.button != 1): return self.time_func( event.xdata, update_widget=True, time_as_index=False) on_motion_notify = on_button_press # for now they can be the same def clear(self): """Clear internal variables.""" super().clear() self.brain = None class _AbstractWindow(ABC): def _window_initialize(self): self._window = None self._interactor = None self._mplcanvas = None self._show_traces = None self._separate_canvas = None self._interactor_fraction = None @abstractmethod def _window_close_connect(self, func): pass @abstractmethod def _window_get_dpi(self): pass @abstractmethod def _window_get_size(self): pass def _window_get_mplcanvas_size(self, fraction): ratio = (1 - fraction) / fraction dpi = self._window_get_dpi() w, h = self._window_get_size() h /= ratio return (w / dpi, h / dpi) @abstractmethod def _window_get_simple_canvas(self, width, height, dpi): pass @abstractmethod def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces, separate_canvas): pass @abstractmethod def _window_adjust_mplcanvas_layout(self): pass @abstractmethod def _window_get_cursor(self): pass @abstractmethod def _window_set_cursor(self, cursor): pass @abstractmethod def _window_new_cursor(self, name): pass @abstractmethod def _window_ensure_minimum_sizes(self): pass @abstractmethod def _window_set_theme(self, theme): pass
bsd-3-clause
RPGroup-PBoC/gist_pboc_2017
code/inclass/phase_portrait_in_class.py
1
1286
# Duhhhh import numpy as np import matplotlib.pyplot as plt import seaborn plt.close('all') # Define the parameters r = 20 # the production rate gamma = 1 / 30 # the degradation rate k = 200 # in units of concentration max_R = 1000 # maximum number of R1 and R2 R1 = np.linspace(0, max_R, 500) R2 = np.linspace(0, max_R, 500) # Compute the nullclines. R1_null = (r / gamma) / (1 + (R2 / k)**2) R2_null = (r / gamma) / (1 + (R1 / k)**2) # Plot the nullclines. plt.figure() plt.plot(R1, R1_null, label='dR1/dt = 0') plt.plot(R2_null, R2, label='dR2/dt = 0') plt.xlabel('R1') plt.ylabel('R2') plt.legend() plt.show() # Generate the vector fields R1_m, R2_m = np.meshgrid(R1[1::30], R2[1::30]) # Compute the derivatives dR1_dt = -gamma * R1_m + r / (1 + (R2_m / k)**2) dR2_dt = -gamma * R2_m + r / (1 + (R1_m / k)**2) # Plot the vector fields!! plt.quiver(R1_m, R2_m, dR1_dt, dR2_dt) plt.show() # Plot the orbit. time = 200 R1 = 800 R2 = 400 # Loop through time and integrate. for t in range(time): dR1 = -gamma * R1 + r / (1 + (R2 / k)**2) dR2 = -gamma * R2 + r / (1 + (R1 / k)**2) # Add this change to our current position R1 = R1 + dR1 # This is the same operation as above.. R2 += dR2 plt.plot(R1, R2, 'ro') plt.show() plt.pause(0.05)
mit
glouppe/scikit-learn
benchmarks/bench_isotonic.py
268
3046
""" Benchmarks of isotonic regression performance. We generate a synthetic dataset of size 10^n, for n in [min, max], and examine the time taken to run isotonic regression over the dataset. The timings are then output to stdout, or visualized on a log-log scale with matplotlib. This alows the scaling of the algorithm with the problem size to be visualized and understood. """ from __future__ import print_function import numpy as np import gc from datetime import datetime from sklearn.isotonic import isotonic_regression from sklearn.utils.bench import total_seconds import matplotlib.pyplot as plt import argparse def generate_perturbed_logarithm_dataset(size): return np.random.randint(-50, 50, size=n) \ + 50. * np.log(1 + np.arange(n)) def generate_logistic_dataset(size): X = np.sort(np.random.normal(size=size)) return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X)) DATASET_GENERATORS = { 'perturbed_logarithm': generate_perturbed_logarithm_dataset, 'logistic': generate_logistic_dataset } def bench_isotonic_regression(Y): """ Runs a single iteration of isotonic regression on the input data, and reports the total time taken (in seconds). """ gc.collect() tstart = datetime.now() isotonic_regression(Y) delta = datetime.now() - tstart return total_seconds(delta) if __name__ == '__main__': parser = argparse.ArgumentParser( description="Isotonic Regression benchmark tool") parser.add_argument('--iterations', type=int, required=True, help="Number of iterations to average timings over " "for each problem size") parser.add_argument('--log_min_problem_size', type=int, required=True, help="Base 10 logarithm of the minimum problem size") parser.add_argument('--log_max_problem_size', type=int, required=True, help="Base 10 logarithm of the maximum problem size") parser.add_argument('--show_plot', action='store_true', help="Plot timing output with matplotlib") parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(), required=True) args = parser.parse_args() timings = [] for exponent in range(args.log_min_problem_size, args.log_max_problem_size): n = 10 ** exponent Y = DATASET_GENERATORS[args.dataset](n) time_per_iteration = \ [bench_isotonic_regression(Y) for i in range(args.iterations)] timing = (n, np.mean(time_per_iteration)) timings.append(timing) # If we're not plotting, dump the timing to stdout if not args.show_plot: print(n, np.mean(time_per_iteration)) if args.show_plot: plt.plot(*zip(*timings)) plt.title("Average time taken running isotonic regression") plt.xlabel('Number of observations') plt.ylabel('Time (s)') plt.axis('tight') plt.loglog() plt.show()
bsd-3-clause
nmartensen/pandas
scripts/file_sizes.py
7
4949
from __future__ import print_function import os import sys import numpy as np import matplotlib.pyplot as plt from pandas import DataFrame from pandas.util.testing import set_trace from pandas import compat dirs = [] names = [] lengths = [] if len(sys.argv) > 1: loc = sys.argv[1] else: loc = '.' walked = os.walk(loc) def _should_count_file(path): return path.endswith('.py') or path.endswith('.pyx') def _is_def_line(line): """def/cdef/cpdef, but not `cdef class`""" return (line.endswith(':') and not 'class' in line.split() and (line.startswith('def ') or line.startswith('cdef ') or line.startswith('cpdef ') or ' def ' in line or ' cdef ' in line or ' cpdef ' in line)) class LengthCounter(object): """ should add option for subtracting nested function lengths?? """ def __init__(self, lines): self.lines = lines self.pos = 0 self.counts = [] self.n = len(lines) def get_counts(self): self.pos = 0 self.counts = [] while self.pos < self.n: line = self.lines[self.pos] self.pos += 1 if _is_def_line(line): level = _get_indent_level(line) self._count_function(indent_level=level) return self.counts def _count_function(self, indent_level=1): indent = ' ' * indent_level def _end_of_function(line): return (line != '' and not line.startswith(indent) and not line.startswith('#')) start_pos = self.pos while self.pos < self.n: line = self.lines[self.pos] if _end_of_function(line): self._push_count(start_pos) return self.pos += 1 if _is_def_line(line): self._count_function(indent_level=indent_level + 1) # end of file self._push_count(start_pos) def _push_count(self, start_pos): func_lines = self.lines[start_pos:self.pos] if len(func_lines) > 300: set_trace() # remove blank lines at end while len(func_lines) > 0 and func_lines[-1] == '': func_lines = func_lines[:-1] # remove docstrings and comments clean_lines = [] in_docstring = False for line in func_lines: line = line.strip() if in_docstring and _is_triplequote(line): in_docstring = False continue if line.startswith('#'): continue if _is_triplequote(line): in_docstring = True continue self.counts.append(len(func_lines)) def _get_indent_level(line): level = 0 while line.startswith(' ' * level): level += 1 return level def _is_triplequote(line): return line.startswith('"""') or line.startswith("'''") def _get_file_function_lengths(path): lines = [x.rstrip() for x in open(path).readlines()] counter = LengthCounter(lines) return counter.get_counts() # def test_get_function_lengths(): text = """ class Foo: def foo(): def bar(): a = 1 b = 2 c = 3 foo = 'bar' def x(): a = 1 b = 3 c = 7 pass """ expected = [5, 8, 7] lines = [x.rstrip() for x in text.splitlines()] counter = LengthCounter(lines) result = counter.get_counts() assert(result == expected) def doit(): for directory, _, files in walked: print(directory) for path in files: if not _should_count_file(path): continue full_path = os.path.join(directory, path) print(full_path) lines = len(open(full_path).readlines()) dirs.append(directory) names.append(path) lengths.append(lines) result = DataFrame({'dirs': dirs, 'names': names, 'lengths': lengths}) def doit2(): counts = {} for directory, _, files in walked: print(directory) for path in files: if not _should_count_file(path) or path.startswith('test_'): continue full_path = os.path.join(directory, path) counts[full_path] = _get_file_function_lengths(full_path) return counts counts = doit2() # counts = _get_file_function_lengths('pandas/tests/test_series.py') all_counts = [] for k, v in compat.iteritems(counts): all_counts.extend(v) all_counts = np.array(all_counts) fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(111) ax.hist(all_counts, bins=100) n = len(all_counts) nmore = (all_counts > 50).sum() ax.set_title('%s function lengths, n=%d' % ('pandas', n)) ax.set_ylabel('N functions') ax.set_xlabel('Function length') ax.text(100, 300, '%.3f%% with > 50 lines' % ((n - nmore) / float(n)), fontsize=18) plt.show()
bsd-3-clause
jakobkolb/MayaSim
mayasim/model/ModelCore.py
1
66303
from __future__ import print_function import datetime import operator import os import sys import traceback import warnings from itertools import compress import networkx as nx import numpy as np import pandas import pkg_resources import scipy.ndimage as ndimage import scipy.sparse as sparse try: import cPickle as pkl except ImportError: import pickle as pkl if __name__ == "__main__": from ModelParameters import ModelParameters as Parameters from f90routines import f90routines else: from .f90routines import f90routines from .ModelParameters import ModelParameters as Parameters class ModelCore(Parameters): def __init__(self, n=30, output_data_location=None, debug=False, output_trajectory=True, **kwargs): """ Instance of the MayaSim model. Parameters ---------- n: int number of settlements to initialize, output_data_location: path_like string stating the folder path to which the output files will be writen, debug: bool switch for debugging output from model, output_trajectory: bool switch for output of trajectory data, output_settlement_data: bool switch for output of settlement data, output_geographic_data: bool switch for output of geographic data. """ # Input/Output settings: # Set path to static input files input_data_location = pkg_resources. \ resource_filename('mayasim', 'input_data/') # Debugging settings self.debug = debug # In debug mode, allways print stack for warnings and errors. def warn_with_traceback(message, category, filename, lineno, file=None, line=None): log = file if hasattr(file, 'write') else sys.stderr traceback.print_stack(file=log) log.write( warnings.formatwarning(message, category, filename, lineno, line)) if self.debug: warnings.showwarning = warn_with_traceback # ******************************************************************* # MODEL PARAMETERS (to be varied) # ******************************************************************* self.output_trajectory = output_trajectory # Settlement and geographic data will be written to files in each time step, # Trajectory data will be kept in one data structure to be read out, when # the model run finished. if output_data_location != 0: # remove file ending self.output_data_location = output_data_location.rsplit('.', 1)[0] # create callable output paths self.settlement_output_path = \ lambda i: self.output_data_location + \ f'settlement_data_{i:03d}.pkl' self.geographic_output_path = \ lambda i: self.output_data_location + \ f'geographic_data_{i:03d}.pkl' # set switches for output generation self.output_geographic_data = True self.output_settlement_data = True else: self.output_geographic_data = False self.output_settlement_data = False self.trajectory = [] self.traders_trajectory = [] # ******************************************************************* # MODEL DATA SOURCES # ******************************************************************* # documentation for TEMPERATURE and PRECIPITATION data can be found # here: http://www.worldclim.org/formats # apparently temperature data is given in x*10 format to allow for # smaller file sizes. # original version of mayasim divides temperature by 12 though self.temp = np.load(input_data_location + '0_RES_432x400_temp.npy') / 12. # precipitation in mm or liters per square meter # (comparing the numbers to numbers from Wikipedia suggests # that it is given per year) self.precip = np.load(input_data_location + '0_RES_432x400_precip.npy') # in meters above sea level self.elev = np.load(input_data_location + '0_RES_432x400_elev.npy') self.slope = np.load(input_data_location + '0_RES_432x400_slope.npy') # documentation for SOIL PRODUCTIVITY is given at: # http://www.fao.org/geonetwork/srv/en/ # main.home?uuid=f7a2b3c0-bdbf-11db-a0f6-000d939bc5d8 # The soil production index considers the suitability # of the best adapted crop to each soils # condition in an area and makes a weighted average for # all soils present in a pixel based # on the formula: 0.9 * VS + 0.6 * S + 0.3 * MS + 0 * NS. # Values range from 0 (bad) to 6 (good) self.soilprod = np.load(input_data_location + '0_RES_432x400_soil.npy') # it also sets soil productivity to 1.5 where the elevation is <= 1 # self.soilprod[self.elev <= 1] = 1.5 # complains because there is nans in elev for ind, x in np.ndenumerate(self.elev): if not np.isnan(x): if x <= 1.: self.soilprod[ind] = 1.5 # smoothen soil productivity dataset self.soilprod = ndimage.gaussian_filter(self.soilprod, sigma=(2, 2), order=0) # and set to zero for non land cells self.soilprod[np.isnan(self.elev)] = 0 # ******************************************************************* # MODEL MAP INITIALIZATION # ******************************************************************* # dimensions of the map self.rows, self.columns = self.precip.shape self.height, self.width = 914., 840. # height and width in km self.pixel_dim = self.width / self.columns self.cell_width = self.width / self.columns self.cell_height = self.height / self.rows self.land_patches = np.asarray(np.where(np.isfinite(self.elev))) self.number_of_land_patches = self.land_patches.shape[1] # lengh unit - total map is about 500 km wide self.area = 516484. / len(self.land_patches[0]) self.elev[:, 0] = np.inf self.elev[:, -1] = np.inf self.elev[0, :] = np.inf self.elev[-1, :] = np.inf # create a list of the index values i = (x, y) of the land # patches with finite elevation h self.list_of_land_patches = [ i for i, h in np.ndenumerate(self.elev) if np.isfinite(self.elev[i]) ] # initialize soil degradation and population # gradient (influencing the forest) # ******************************************************************* # INITIALIZE ECOSYSTEM # ******************************************************************* # Soil (influencing primary production and agricultural productivity) self.soil_deg = np.zeros((self.rows, self.columns)) # Forest self.forest_state = np.ones((self.rows, self.columns), dtype=int) self.forest_state[np.isnan(self.elev)] = 0 self.forest_memory = np.zeros((self.rows, self.columns), dtype=int) self.cleared_land_neighbours = np.zeros((self.rows, self.columns), dtype=int) # The forest has three states: 3=climax forest, # 2=secondary regrowth, 1=cleared land. for i in self.list_of_land_patches: self.forest_state[i] = 3 # Variables describing total amount of water and water flow self.water = np.zeros((self.rows, self.columns)) self.flow = np.zeros((self.rows, self.columns)) self.spaciotemporal_precipitation = np.zeros((self.rows, self.columns)) # initialize the trajectories of the water drops self.x = np.zeros((self.rows, self.columns), dtype="int") self.y = np.zeros((self.rows, self.columns), dtype="int") # define relative coordinates of the neighbourhood of a cell self.neighbourhood = [(i, j) for i in [-1, 0, 1] for j in [-1, 0, 1]] self.f90neighbourhood = np.asarray(self.neighbourhood).T # ******************************************************************* # INITIALIZE SOCIETY # ******************************************************************* # Population gradient (influencing the forest) self.pop_gradient = np.zeros((self.rows, self.columns)) self.number_settlements = n # distribute specified number of settlements on the map self.settlement_positions = self.land_patches[:, np.random.choice( len(self. land_patches[1]), n).astype('int')] self.age = [0] * n # demographic variables self.birth_rate = [self.birth_rate_parameter] * n self.death_rate = [0.1 + 0.05 * r for r in list(np.random.random(n))] self.population = list( np.random.randint(self.min_init_inhabitants, self.max_init_inhabitants, n).astype(float)) self.mig_rate = [0.] * n self.out_mig = [0] * n self.migrants = [0] * n self.pioneer_set = [] self.failed = 0 # index list for populated and abandoned cities # used until removal of dead cities is implemented. self.populated_cities = range(n) self.dead_cities = [] # agricultural influence self.number_cells_in_influence = [0] * n self.area_of_influence = [0.] * n self.coordinates = np.indices((self.rows, self.columns)) self.cells_in_influence = [None] * n # will be a list of arrays self.cropped_cells = [None] * n # for now, cropped cells are only the city positions. # first cropped cells are added at the first call of # get_cropped_cells() for city in self.populated_cities: self.cropped_cells[city] = [[self.settlement_positions[0, city]], [self.settlement_positions[1, city]]] # print(self.cropped_cells[1]) self.occupied_cells = np.zeros((self.rows, self.columns)) self.number_cropped_cells = [0] * n self.crop_yield = [0.] * n self.eco_benefit = [0.] * n self.available = 0 # details of income from ecosystems services self.s_es_ag = [0.] * n self.s_es_wf = [0.] * n self.s_es_fs = [0.] * n self.s_es_sp = [0.] * n self.s_es_pg = [0.] * n self.es_ag = np.zeros((self.rows, self.columns), dtype=float) self.es_wf = np.zeros((self.rows, self.columns), dtype=float) self.es_fs = np.zeros((self.rows, self.columns), dtype=float) self.es_sp = np.zeros((self.rows, self.columns), dtype=float) self.es_pg = np.zeros((self.rows, self.columns), dtype=float) # Trade Variables self.adjacency = np.zeros((n, n)) self.rank = [0] * n self.degree = [0] * n self.comp_size = [0] * n self.centrality = [0] * n self.trade_income = [0] * n self.max_cluster_size = 0 # total real income per capita self.real_income_pc = [0] * n def _get_run_variables(self): """ Saves all variables and values of the class instance 'self' in a dictionary file at the location given by 'path' Parameters: ----------- self: class instance class instance whose variables are saved """ dictionary = { attr: getattr(self, attr) for attr in dir(self) if not attr.startswith('__') and not callable(getattr(self, attr)) } return dictionary def update_precipitation(self, t): """ Modulates the initial precip dataset with a 24 timestep period. Returns a field of rainfall values for each cell. If veg_rainfall > 0, cleared_land_neighbours decreases rain. TO DO: The original Model increases specialization every time rainfall decreases, assuming that trade gets more important to compensate for agriculture decline """ if self.precipitation_modulation: self.spaciotemporal_precipitation = \ self.precip * ( 1 + self.precipitation_amplitude * self.precipitation_variation[ (np.ceil(t / self.climate_var) % 8).astype(int)]) \ - self.veg_rainfall * self.cleared_land_neighbours else: self.spaciotemporal_precipitation = \ self.precip * (1 - self.veg_rainfall * self.cleared_land_neighbours) # check if system time is in drought period drought = False for drought_time in self.drought_times: if drought_time[0] < t <= drought_time[1]: drought = True # if so, decrease precipitation by factor percentage given by # drought severity if drought: self.spaciotemporal_precipitation *= \ (1. - self.drought_severity / 100.) def get_waterflow(self): """ waterflow: takes rain as an argument, uses elev, returns water flow distribution the precip percent parameter that reduces the amount of raindrops that have to be moved. Thereby inceases performance. f90waterflow takes as arguments: list of coordinates of land cells (2xN_land) elevation map in (height x width) rain_volume per cell map in (height x width) rain_volume and elevation must have same units: height per cell neighbourhood offsets height and width of map as integers, Number of land cells, N_land """ # convert precipitation from mm to meters # NOTE: I think, this should be 1e-3 # to convert from mm to meters though... # but 1e-5 is what they do in the original version. rain_volume = np.nan_to_num(self.spaciotemporal_precipitation * 1e-5) max_x, max_y = self.rows, self.columns err, self.flow, self.water = \ f90routines.f90waterflow(self.land_patches, self.elev, rain_volume, self.f90neighbourhood, max_x, max_y, self.number_of_land_patches) return self.water, self.flow def forest_evolve(self, npp): npp_mean = np.nanmean(npp) # Iterate over all cells repeatedly and regenerate or degenerate for repeat in range(4): for i in self.list_of_land_patches: if not np.isnan(self.elev[i]): # Forest regenerates faster [slower] (linearly), # if net primary productivity on the patch # is above [below] average. threshold = npp_mean / npp[i] # Degradation: # Decrement with probability 0.003 # if there is a settlement around, # degrade with higher probability probdec = self.natprobdec * (2 * self.pop_gradient[i] + 1) if np.random.random() <= probdec: if self.forest_state[i] == 3: self.forest_state[i] = 2 self.forest_memory[i] = self.state_change_s2 elif self.forest_state[i] == 2: self.forest_state[i] = 1 self.forest_memory[i] = 0 # Regeneration:" # recover if tree = 1 and memory > threshold 1 if (self.forest_state[i] == 1 and self.forest_memory[i] > self.state_change_s2 * threshold): self.forest_state[i] = 2 self.forest_memory[i] = self.state_change_s2 # recover if tree = 2 and memory > threshold 2 # and certain number of neighbours are # climax forest as well if (self.forest_state[i] == 2 and self.forest_memory[i] > self.state_change_s3 * threshold): state_3_neighbours = \ np.sum(self.forest_state[i[0] - 1:i[0] + 2, i[1] - 1:i[1] + 2] == 3) if state_3_neighbours > \ self.min_number_of_s3_neighbours: self.forest_state[i] = 3 # finally, increase memory by one self.forest_memory[i] += 1 # calculate cleared land neighbours for output: if self.veg_rainfall > 0: for i in self.list_of_land_patches: self.cleared_land_neighbours[i] = \ np.sum(self.forest_state[i[0] - 1:i[0] + 2, i[1] - 1:i[1] + 2] == 1) assert not np.any(self.forest_state[~np.isnan(self.elev)] < 1), \ 'forest state is smaller than 1 somewhere' return def net_primary_prod(self): """ net_primaty_prod is the minimum of a quantity derived from local temperature and rain Why is it rain and not 'surface water' according to the waterflow model? """ # EQUATION ############################################################ npp = 3000 \ * np.minimum(1 - np.exp(-6.64e-4 * self.spaciotemporal_precipitation), 1. / (1 + np.exp(1.315 - (0.119 * self.temp)))) # EQUATION ############################################################ return npp def get_ag(self, npp, wf): """ agricultural productivit is calculated via a linear additive model from net primary productivity, soil productivity, slope, waterflow and soil degradation of each patch. """ # EQUATION ############################################################ return self.a_npp * npp + self.a_sp * self.soilprod \ - self.a_s * self.slope - self.a_wf * wf - self.soil_deg # EQUATION ############################################################ def get_ecoserv(self, ag, wf): """ Ecosystem Services are calculated via a linear additive model from agricultural productivity (ag), waterflow through the cell (wf) and forest state on the cell (forest) \in [1,3], The recent version of mayasim limits value of ecosystem services to 1 < ecoserv < 250, it also proposes to include population density (pop_gradient) and precipitation (rain) """ # EQUATION ########################################################### if not self.better_ess: self.es_ag = self.e_ag * ag self.es_wf = self.e_wf * wf self.es_fs = self.e_f * (self.forest_state - 1.) self.es_sp = self.e_r * self.spaciotemporal_precipitation self.es_pg = self.e_deg * self.pop_gradient else: # change to use forest as proxy for income from agricultural # productivity. Multiply by 2 to get same per cell levels as # before self.es_ag = np.zeros(np.shape(ag)) self.es_wf = self.e_wf * wf self.es_fs = 2. * self.e_ag * (self.forest_state - 1.) * ag self.es_sp = self.e_r * self.spaciotemporal_precipitation self.es_pg = self.e_deg * self.pop_gradient return (self.es_ag + self.es_wf + self.es_fs + self.es_sp - self.es_pg) # EQUATION ########################################################### ###################################################################### # The Society ###################################################################### def benefit_cost(self, ag_in): # Benefit cost assessment return (self.max_yield * (1 - self.origin_shift * np.exp(-self.slope_yield * ag_in))) def get_cells_in_influence(self): """ creates a list of cells for each city that are under its influence. these are the cells that are closer than population^0.8/60 (which is not explained any further... change denominator to 80 and max value to 30 from eyeballing the results """ # EQUATION #################################################################### self.area_of_influence = [(x**0.8) / 60. for x in self.population] self.area_of_influence = [ value if value < 40. else 40. for value in self.area_of_influence ] # EQUATION #################################################################### for city in self.populated_cities: distance = np.sqrt((self.cell_width * (self.settlement_positions[0][city] - self.coordinates[0]))**2 + (self.cell_height * (self.settlement_positions[1][city] - self.coordinates[1]))**2) stencil = distance <= self.area_of_influence[city] self.cells_in_influence[city] = self.coordinates[:, stencil] self.number_cells_in_influence = [ len(x[0]) for x in self.cells_in_influence ] return def get_cropped_cells(self, bca): """ Updates the cropped cells for each city with positive population. Calculates the utility for each cell (depending on distance from the respective city) If population per cropped cell is lower then min_people_per_cropped_cell, cells are abandoned. Cells with negative utility are also abandoned. If population per cropped cell is higher than max_people_per_cropped_cell, new cells are cropped. Newly cropped cells are chosen such that they have highest utility """ abandoned = 0 sown = 0 # for each settlement: how many cells are currently cropped ? self.number_cropped_cells = np.array( [len(x[0]) for x in self.cropped_cells]) # agricultural population density (people per cropped land) # determines the number of cells that can be cropped. ag_pop_density = [ p / (self.number_cropped_cells[c] * self.area) if self.number_cropped_cells[c] > 0 else 0. for c, p in enumerate(self.population) ] # occupied_cells is a mask of all occupied cells calculated as the # unification of the cropped cells of all settlements. if len(self.cropped_cells) > 0: occup = np.concatenate(self.cropped_cells, axis=1).astype('int') if False: print('population of cities without agriculture:') print( np.array(self.population)[self.number_cropped_cells == 0]) print('pt. migration from cities without agriculture:') print(np.array(self.out_mig)[self.number_cropped_cells == 0]) print('out migration from cities without agriculture:') print(np.array(self.migrants)[self.number_cropped_cells == 0]) for index in range(len(occup[0])): self.occupied_cells[occup[0, index], occup[1, index]] = 1 # the age of settlements is increased here. self.age = [x + 1 for x in self.age] # for each settlement: which cells to crop ? # calculate utility first! This can be accelerated, if calculations # are only done in 40 km radius. for city in self.populated_cities: cells = list( zip(self.cells_in_influence[city][0], self.cells_in_influence[city][1])) # EQUATION ######################################################## utility = [ bca[x, y] - self.estab_cost - (self.ag_travel_cost * np.sqrt( (self.cell_width * (self.settlement_positions[0][city] - self.coordinates[0][x, y]))**2 + (self.cell_height * (self.settlement_positions[1][city] - self.coordinates[1][x, y]))**2)) / np.sqrt(self.population[city]) for (x, y) in cells ] # EQUATION ######################################################## available = [ True if self.occupied_cells[x, y] == 0 else False for (x, y) in cells ] # jointly sort utilities, availability and cells such that cells # with highest utility are first. sorted_utility, sorted_available, sorted_cells = \ list(zip(*sorted(list(zip(utility, available, cells)), reverse=True))) # of these sorted lists, sort filter only available cells available_util = list( compress(list(sorted_utility), list(sorted_available))) available_cells = list( compress(list(sorted_cells), list(sorted_available))) # save local copy of all cropped cells cropped_cells = list(zip(*self.cropped_cells[city])) # select utilities for these cropped cells cropped_utils = [ utility[cells.index(cell)] if cell in cells else -1 for cell in cropped_cells ] # sort utilitites and cropped cells to lowest utilities first city_has_crops = True if len(cropped_cells) > 0 else False if city_has_crops: occupied_util, occupied_cells = \ zip(*sorted(list(zip(cropped_utils, cropped_cells)))) # 1.) include new cells if population exceeds a threshold # calculate number of new cells to crop number_of_new_cells = np.floor(ag_pop_density[city] / self.max_people_per_cropped_cell) \ .astype('int') # and crop them by selecting cells with positive utility from the # beginning of the list for n in range(min([number_of_new_cells, len(available_util)])): if available_util[n] > 0: self.occupied_cells[available_cells[n]] = 1 for dim in range(2): self.cropped_cells[city][dim] \ .append(available_cells[n][dim]) if city_has_crops: # 2.) abandon cells if population too low # after cities age > 5 years if (ag_pop_density[city] < self.min_people_per_cropped_cell and self.age[city] > 5): # There are some inconsistencies here. Cells are abandoned, # if the 'people per cropped land' is lower then a # threshold for 'people per cropped cells. Then the # number of cells to abandon is calculated as 30/people # per cropped land. Why?! (check the original version!) number_of_lost_cells = np.ceil( 30 / ag_pop_density[city]).astype('int') # TO DO: recycle utility and cell list to do this faster. # therefore, filter cropped cells from utility list # and delete last n cells. for n in range( min([number_of_lost_cells, len(occupied_cells)])): dropped_cell = occupied_cells[n] self.occupied_cells[dropped_cell] = 0 for dim in range(2): self.cropped_cells[city][dim] \ .remove(dropped_cell[dim]) abandoned += 1 # 3.) abandon cells with utility <= 0 # find cells that have negative utility and belong # to city under consideration, useless_cropped_cells = [ occupied_cells[i] for i in range(len(occupied_cells)) if occupied_util[i] < 0 and occupied_cells[i] in zip(*self.cropped_cells[city]) ] # and release them. for useless_cropped_cell in useless_cropped_cells: self.occupied_cells[useless_cropped_cell] = 0 for dim in range(2): try: self.cropped_cells[city][dim] \ .remove(useless_cropped_cell[dim]) except ValueError: print('ERROR: Useless cell gone already') abandoned += 1 # Finally, update list of lists containing cropped cells for each city # with positive population. self.number_cropped_cells = [ len(self.cropped_cells[city][0]) for city in range(len(self.population)) ] return abandoned, sown def get_pop_mig(self): # gives population and out-migration # print("number of settlements", len(self.population)) # death rate correlates inversely with real income per capita death_rate_diff = self.max_death_rate - self.min_death_rate self.death_rate = [ -death_rate_diff * self.real_income_pc[i] + self.max_death_rate for i in range(len(self.real_income_pc)) ] self.death_rate = list( np.clip(self.death_rate, self.min_death_rate, self.max_death_rate)) # if population control, # birth rate negatively correlates with population size if self.population_control: birth_rate_diff = self.max_birth_rate - self.min_birth_rate self.birth_rate = [ -birth_rate_diff / 10000. * value + self.shift if value > 5000 else self.birth_rate_parameter for value in self.population ] # population grows according to effective growth rate self.population = [ int((1. + self.birth_rate[i] - self.death_rate[i]) * value) for i, value in enumerate(self.population) ] self.population = [ value if value > 0 else 0 for value in self.population ] mig_rate_diffe = self.max_mig_rate - self.min_mig_rate # outmigration rate also correlates # inversely with real income per capita self.mig_rate = [ -mig_rate_diffe * self.real_income_pc[i] + self.max_mig_rate for i in range(len(self.real_income_pc)) ] self.mig_rate = list( np.clip(self.mig_rate, self.min_mig_rate, self.max_mig_rate)) self.out_mig = [ int(self.mig_rate[i] * self.population[i]) for i in range(len(self.population)) ] self.out_mig = [value if value > 0 else 0 for value in self.out_mig] return # impact of sociosphere on ecosphere def update_pop_gradient(self): # pop gradient quantifies the disturbance of the forest by population self.pop_gradient = np.zeros((self.rows, self.columns)) for city in self.populated_cities: distance = np.sqrt(self.area * ( (self.settlement_positions[0][city] - self.coordinates[0])**2 + (self.settlement_positions[1][city] - self.coordinates[1])**2)) # EQUATION ################################################################### self.pop_gradient[self.cells_in_influence[city][0], self.cells_in_influence[city][1]] += \ self.population[city] \ / (300 * (1 + distance[self.cells_in_influence[city][0], self.cells_in_influence[city][1]])) # EQUATION ################################################################### self.pop_gradient[self.pop_gradient > 15] = 15 def evolve_soil_deg(self): # soil degrades for cropped cells cropped = np.concatenate(self.cropped_cells, axis=1).astype('int') self.soil_deg[cropped[0], cropped[1]] += self.deg_rate self.soil_deg[self.forest_state == 3] -= self.reg_rate self.soil_deg[self.soil_deg < 0] = 0 def get_rank(self): # depending on population ranks are assigned # attention: ranks are reverted with respect to Netlogo MayaSim ! # 1 => 3 ; 2 => 2 ; 3 => 1 self.rank = [ 3 if value > self.thresh_rank_3 else 2 if value > self.thresh_rank_2 else 1 if value > self.thresh_rank_1 else 0 for index, value in enumerate(self.population) ] return @property def build_routes(self): adj = self.adjacency.copy() adj[adj == -1] = 0 built_links = 0 lost_links = 0 g = nx.from_numpy_matrix(adj, create_using=nx.DiGraph()) self.degree = g.out_degree() # cities with rank>0 are traders and establish links to neighbours for city in self.populated_cities: if self.degree[city] < self.rank[city]: distances = \ (np.sqrt(self.area * (+ (self.settlement_positions[0][city] - self.settlement_positions[0]) ** 2 + (self.settlement_positions[1][city] - self.settlement_positions[1]) ** 2 ))) if self.rank[city] == 3: treshold = 31. * ( self.thresh_rank_3 / self.thresh_rank_3 * 0.5 + 1.) elif self.rank[city] == 2: treshold = 31. * ( self.thresh_rank_2 / self.thresh_rank_3 * 0.5 + 1.) elif self.rank[city] == 1: treshold = 31. * ( self.thresh_rank_1 / self.thresh_rank_3 * 0.5 + 1.) else: treshold = 0 # don't chose yourself as nearest neighbor distances[city] = 2 * treshold # collect close enough neighbors and omit those that are # already connected. a = distances <= treshold b = self.adjacency[city] == 0 nearby = np.array(list(map(operator.and_, a, b))) # if there are traders nearby, # connect to the one with highest population if sum(nearby) != 0: try: new_partner = np.nanargmax(self.population * nearby) self.adjacency[city, new_partner] = 1 self.adjacency[new_partner, city] = -1 built_links += 1 except ValueError: print('ERROR in new partner') print(np.shape(self.population), np.shape(self.settlement_positions[0])) sys.exit(-1) # cities who cant maintain their trade links, loose them: elif self.degree[city] > self.rank[city]: # get neighbors of node neighbors = g.successors(city) # find smallest of neighbors smallest_neighbor = self.population.index( min([self.population[nb] for nb in neighbors])) # cut link with him self.adjacency[city, smallest_neighbor] = 0 self.adjacency[smallest_neighbor, city] = 0 lost_links += 1 return (built_links, lost_links) def get_comps(self): # convert adjacency matrix to compressed sparse row format adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency)) # extract data vector, row index vector and index pointer vector a = adjacency_csr.data # add one to make indexing compatible to fortran # (where indices start counting with 1) j_a = adjacency_csr.indices + 1 i_c = adjacency_csr.indptr + 1 # determine length of data vectors l_a = np.shape(a)[0] l_ic = np.shape(i_c)[0] # if data vector is not empty, pass data to fortran routine. # else, just fill the centrality vector with ones. if l_a > 0: tmp_comp_size, tmp_degree = \ f90routines.f90sparsecomponents(i_c, a, j_a, self.number_settlements, l_ic, l_a) self.comp_size, self.degree = list(tmp_comp_size), list(tmp_degree) elif l_a == 0: self.comp_size, self.degree = [0] * (l_ic - 1), [0] * (l_ic - 1) return def get_centrality(self): # convert adjacency matrix to compressed sparse row format adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency)) # extract data vector, row index vector and index pointer vector a = adjacency_csr.data # add one to make indexing compatible to fortran # (where indices start counting with 1) j_a = adjacency_csr.indices + 1 i_c = adjacency_csr.indptr + 1 # determine length of data vectors l_a = np.shape(a)[0] l_ic = np.shape(i_c)[0] # print('number of trade links:', sum(a) / 2) # if data vector is not empty, pass data to fortran routine. # else, just fill the centrality vector with ones. if l_a > 0: tmp_centrality = f90routines \ .f90sparsecentrality(i_c, a, j_a, self.number_settlements, l_ic, l_a) self.centrality = list(tmp_centrality) elif l_a == 0: self.centrality = [1] * (l_ic - 1) return def get_crop_income(self, bca): # agricultural benefit of cropping for city in self.populated_cities: crops = bca[self.cropped_cells[city][0], self. cropped_cells[city][1]] # EQUATION # if self.crop_income_mode == "mean": self.crop_yield[city] = self.r_bca_mean \ * np.nanmean(crops[crops > 0]) elif self.crop_income_mode == "sum": self.crop_yield[city] = self.r_bca_sum \ * np.nansum(crops[crops > 0]) self.crop_yield = [ 0 if np.isnan(self.crop_yield[index]) else self.crop_yield[index] for index in range(len(self.crop_yield)) ] return def get_eco_income(self, es): # benefit from ecosystem services of cells in influence # ##EQUATION################################################################### for city in self.populated_cities: if self.eco_income_mode == "mean": self.eco_benefit[city] = self.r_es_mean \ * np.nanmean(es[self.cells_in_influence[city]]) elif self.eco_income_mode == "sum": self.eco_benefit[city] = self.r_es_sum \ * np.nansum(es[self.cells_in_influence[city]]) self.s_es_ag[city] = self.r_es_sum \ * np.nansum(self.es_ag[self.cells_in_influence[city]]) self.s_es_wf[city] = self.r_es_sum \ * np.nansum(self.es_wf[self.cells_in_influence[city]]) self.s_es_fs[city] = self.r_es_sum \ * np.nansum(self.es_fs[self.cells_in_influence[city]]) self.s_es_sp[city] = self.r_es_sum \ * np.nansum(self.es_sp[self.cells_in_influence[city]]) self.s_es_pg[city] = self.r_es_sum \ * np.nansum(self.es_pg[self.cells_in_influence[city]]) try: self.eco_benefit[self.population == 0] = 0 except IndexError: self.print_variable_lengths() # ##EQUATION################################################################### return def get_trade_income(self): # ##EQUATION################################################################### self.trade_income = [ 1. / 30. * (1 + self.comp_size[i] / self.centrality[i])**0.9 for i in range(len(self.centrality)) ] self.trade_income = [ self.r_trade if value > 1 else 0 if (value < 0 or self.degree[index] == 0) else self.r_trade * value for index, value in enumerate(self.trade_income) ] # ##EQUATION################################################################### return def get_real_income_pc(self): # combine agricultural, ecosystem service and trade benefit # EQUATION # self.real_income_pc = [ (self.crop_yield[index] + self.eco_benefit[index] + self.trade_income[index]) / self.population[index] if value > 0 else 0 for index, value in enumerate(self.population) ] return def migration(self, es): # if outmigration rate exceeds threshold, found new settlement self.migrants = [0] * self.number_settlements new_settlements = 0 vacant_lands = np.isfinite(es) influenced_cells = np.concatenate(self.cells_in_influence, axis=1) vacant_lands[influenced_cells[0], influenced_cells[1]] = 0 vacant_lands = np.asarray(np.where(vacant_lands == 1)) for city in self.populated_cities: rd = np.random.rand() if (self.out_mig[city] > 400 and len(vacant_lands[0]) > 0 and np.random.rand() <= 0.5): mig_pop = self.out_mig[city] self.migrants[city] = mig_pop self.population[city] -= mig_pop self.pioneer_set = \ vacant_lands[:, np.random.choice(len(vacant_lands[0]), 75)] travel_cost = np.sqrt( self.area * ((self.settlement_positions[0][city] - self.coordinates[0]) **2 + (self.settlement_positions[1][city] - self.coordinates[1])**2)) utility = self.mig_ES_pref * es \ + self.mig_TC_pref * travel_cost utofpio = utility[self.pioneer_set[0], self.pioneer_set[1]] new_loc = self.pioneer_set[:, np.nanargmax(utofpio)] neighbours = \ (np.sqrt(self.area * ((new_loc[0] - self.settlement_positions[0]) ** 2 + (new_loc[1] - self.settlement_positions[1]) ** 2 ))) <= 7.5 summe = np.sum(neighbours) if summe == 0: self.spawn_city(new_loc[0], new_loc[1], mig_pop) index = (vacant_lands[0, :] == new_loc[0]) \ & (vacant_lands[1, :] == new_loc[1]) np.delete(vacant_lands, int(np.where(index)[0]), 1) new_settlements += 1 return new_settlements def kill_cities(self): # BUG: cities can be added twice, # if they have neither population nor cropped cells. # this might lead to unexpected consequences. see what happenes, # when after adding all cities, only unique ones are kept killed_cities = 0 # kill cities if they have either no crops or no inhabitants: dead_city_indices = [ i for i in range(len(self.population)) if self.population[i] <= self.min_city_size ] if self.kill_cities_without_crops: dead_city_indices += [ i for i in range(len(self.population)) if (len(self.cropped_cells[i][0]) <= 0) ] # the following expression only keeps the unique entries. # might solve the problem. dead_city_indices = list(set(dead_city_indices)) # remove entries from variables # simple lists that can be deleted elementwise for index in sorted(dead_city_indices, reverse=True): self.number_settlements -= 1 self.failed += 1 del self.age[index] del self.birth_rate[index] del self.death_rate[index] del self.population[index] del self.mig_rate[index] del self.out_mig[index] del self.number_cells_in_influence[index] del self.area_of_influence[index] del self.number_cropped_cells[index] del self.crop_yield[index] del self.eco_benefit[index] del self.rank[index] del self.degree[index] del self.comp_size[index] del self.centrality[index] del self.trade_income[index] del self.real_income_pc[index] del self.cells_in_influence[index] del self.cropped_cells[index] del self.s_es_ag[index] del self.s_es_wf[index] del self.s_es_fs[index] del self.s_es_sp[index] del self.s_es_pg[index] del self.migrants[index] killed_cities += 1 # special cases: self.settlement_positions = \ np.delete(self.settlement_positions, dead_city_indices, axis=1) self.adjacency = \ np.delete(np.delete(self.adjacency, dead_city_indices, axis=0), dead_city_indices, axis=1) # update list of indices for populated and dead cities # a) update list of populated cities self.populated_cities = [ index for index, value in enumerate(self.population) if value > 0 ] # b) update list of dead cities self.dead_cities = [ index for index, value in enumerate(self.population) if value == 0 ] return killed_cities def spawn_city(self, x, y, mig_pop): """ Spawn a new city at given location with given population and append it to all necessary lists. Parameters ---------- x: int x location of new city on map y: int y location of new city on map mig_pop: int initial population of new city """ # extend all variables to include new city self.number_settlements += 1 self.settlement_positions = np.append(self.settlement_positions, [[x], [y]], 1) self.cells_in_influence.append([[x], [y]]) self.cropped_cells.append([[x], [y]]) n = len(self.adjacency) self.adjacency = np.append(self.adjacency, [[0] * n], 0) self.adjacency = np.append(self.adjacency, [[0]] * (n + 1), 1) self.age.append(0) self.birth_rate.append(self.birth_rate_parameter) self.death_rate.append(0.1 + 0.05 * np.random.rand()) self.population.append(mig_pop) self.mig_rate.append(0) self.out_mig.append(0) self.number_cells_in_influence.append(0) self.area_of_influence.append(0) self.number_cropped_cells.append(1) self.crop_yield.append(0) self.eco_benefit.append(0) self.rank.append(0) self.degree.append(0) self.trade_income.append(0) self.real_income_pc.append(0) self.s_es_ag.append(0) self.s_es_wf.append(0) self.s_es_fs.append(0) self.s_es_sp.append(0) self.s_es_pg.append(0) self.migrants.append(0) def run(self, t_max=1): """ Run the model for a given number of steps. If no number of steps is given, the model is integrated for one step Parameters ---------- t_max: int number of steps to integrate the model """ # initialize time step t = 0 # print update about output state if self.debug: print('output of settlement and geodata is {} and {}'.format( self.output_settlement_data, self.output_geographic_data)) # initialize variables # net primary productivity npp = np.zeros((self.rows, self.columns)) # water flow if self.debug and t == 0: wf = np.zeros((self.rows, self.columns)) elif not self.debug: wf = np.zeros((self.rows, self.columns)) else: pass # agricultural productivity ag = np.zeros((self.rows, self.columns)) # ecosystem services es = np.zeros((self.rows, self.columns)) # benefit cost map for agriculture bca = np.zeros((self.rows, self.columns)) self.init_output() while t <= t_max: t += 1 if self.debug: print(f"time = {t}, population = {sum(self.population)}") # evolve subselfs # ecosystem self.update_precipitation(t) npp = self.net_primary_prod() self.forest_evolve(npp) # this is curious: only waterflow is used, # water level is abandoned. wf = self.get_waterflow()[1] ag = self.get_ag(npp, wf) es = self.get_ecoserv(ag, wf) bca = self.benefit_cost(ag) # society if len(self.population) > 0: self.get_cells_in_influence() abandoned, sown = self.get_cropped_cells(bca) self.get_crop_income(bca) self.get_eco_income(es) self.evolve_soil_deg() self.update_pop_gradient() self.get_rank() (built, lost) = self.build_routes self.get_comps() self.get_centrality() self.get_trade_income() self.get_real_income_pc() self.get_pop_mig() new_settlements = self.migration(es) killed_settlements = self.kill_cities() else: abandoned = sown = cl = 0 self.step_output(t, npp, wf, ag, es, bca, abandoned, sown, built, lost, new_settlements, killed_settlements) def init_output(self): """initializes data output for trajectory, settlements and geography depending on settings""" if self.output_trajectory: self.init_trajectory_output() self.init_traders_trajectory_output() if self.output_geographic_data or self.output_settlement_data: # If output data location is needed and does not exist, create it. if not os.path.exists(self.output_data_location): os.makedirs(self.output_data_location) if not self.output_data_location.endswith('/'): self.output_data_location += '/' if self.output_settlement_data: settlement_init_data = {'shape': (self.rows, self.columns)} with open(self.settlement_output_path(0), 'wb') as f: pkl.dump(settlement_init_data, f) if self.output_geographic_data: pass def step_output(self, t, npp, wf, ag, es, bca, abandoned, sown, built, lost, new_settlements, killed_settlements): """ call different data saving routines depending on settings. Parameters ---------- t: int Timestep number to append to save file path npp: numpy array Net Primary Productivity on cell basis wf: numpy array Water flow through cell ag: numpy array Agricultural productivity of cell es: numpy array Ecosystem services of cell (that are summed and weighted to calculate ecosystems service income) bca: numpy array Benefit cost analysis of agriculture on cell. abandoned: int Number of cells that was abandoned in the previous time step sown: int Number of cells that was newly cropped in the previous time step built : int number of trade links built in this timestep lost : int number of trade links lost in this timestep new_settlements : int number of new settlements that were spawned during the preceeding timestep killed_settlements : int number of settlements that were killed during the preceeding timestep """ # append stuff to trajectory if self.output_trajectory: self.update_trajectory_output(t, [npp, wf, ag, es, bca], built, lost, new_settlements, killed_settlements) self.update_traders_trajectory_output(t) # save maps of spatial data if self.output_geographic_data: self.save_geographic_output(t, npp, wf, ag, es, bca, abandoned, sown) # save data on settlement basis if self.output_settlement_data: self.save_settlement_output(t) def save_settlement_output(self, t): """ Organize settlement based data in Pandas Dataframe and save to file. Parameters ---------- t: int Timestep number to append to save file path """ colums = [ 'population', 'real income', 'ag income', 'es income', 'trade income', 'x position', 'y position', 'out migration', 'degree' ] data = [ self.population, self.real_income_pc, self.crop_yield, self.eco_benefit, self.trade_income, list(self.settlement_positions[0]), list(self.settlement_positions[1]), self.migrants, [self.degree[city] for city in self.populated_cities] ] data = list(map(list, zip(*data))) data_frame = pandas.DataFrame(columns=colums, data=data) with open(self.settlement_output_path(t), 'wb') as f: pkl.dump(data_frame, f) def save_geographic_output(self, t, npp, wf, ag, es, bca, abandoned, sown): """ Organize Geographic data in dictionary (for separate layers of data) and save to file. Parameters ---------- t: int Timestep number to append to save file path npp: numpy array Net Primary Productivity on cell basis wf: numpy array Water flow through cell ag: numpy array Agricultural productivity of cell es: numpy array Ecosystem services of cell (that are summed and weighted to calculate ecosystems service income) bca: numpy array Benefit cost analysis of agriculture on cell. abandoned: int Number of cells that was abandoned in the previous time step sown: int Number of cells that was newly cropped in the previous time step """ tmpforest = self.forest_state.copy() tmpforest[np.isnan(self.elev)] = 0 data = { 'forest': tmpforest, 'waterflow': wf, 'cells in influence': self.cells_in_influence, 'number of cells in influence': self.number_cells_in_influence, 'cropped cells': self.cropped_cells, 'number of cropped cells': self.number_cropped_cells, 'abandoned sown': np.array([abandoned, sown]), 'soil degradation': self.soil_deg, 'population gradient': self.pop_gradient, 'adjacency': self.adjacency, 'x positions': list(self.settlement_positions[0]), 'y positions': list(self.settlement_positions[1]), 'population': self.population, 'elev': self.elev, 'rank': self.rank } with open(self.geographic_output_path(t), 'wb') as f: pkl.dump(data, f) def init_trajectory_output(self): self.trajectory.append([ 'time', 'total_population', 'max_settlement_population', 'total_migrants', 'total_settlements', 'total_agriculture_cells', 'total_cells_in_influence', 'total_trade_links', 'mean_cluster_size', 'max_cluster_size', 'new_settlements', 'killed_settlements', 'built_trade_links', 'lost_trade_links', 'total_income_agriculture', 'total_income_ecosystem', 'total_income_trade', 'mean_soil_degradation', 'forest_state_3_cells', 'forest_state_2_cells', 'forest_state_1_cells', 'es_income_forest', 'es_income_waterflow', 'es_income_agricultural_productivity', 'es_income_precipitation', 'es_income_pop_density', 'MAP', 'max_npp', 'mean_waterflow', 'max_AG', 'max_ES', 'max_bca', 'max_soil_deg', 'max_pop_grad' ]) def init_traders_trajectory_output(self): self.traders_trajectory.append([ 'time', 'total_population', 'total_migrants', 'total_traders', 'total_settlements', 'total_agriculture_cells', 'total_cells_in_influence', 'total_trade_links', 'total_income_agriculture', 'total_income_ecosystem', 'total_income_trade', 'es_income_forest', 'es_income_waterflow', 'es_income_agricultural_productivity', 'es_income_precipitation', 'es_income_pop_density' ]) def update_trajectory_output(self, time, args, built, lost, new_settlements, killed_settlements): # args = [npp, wf, ag, es, bca] total_population = sum(self.population) try: max_population = np.nanmax(self.population) except: max_population = float('nan') total_migrangs = sum(self.migrants) total_settlements = len(self.population) total_trade_links = sum(self.degree) / 2 income_agriculture = sum(self.crop_yield) income_ecosystem = sum(self.eco_benefit) income_trade = sum(self.trade_income) number_of_components = float( sum([1 if value > 0 else 0 for value in self.comp_size])) mean_cluster_size = float(sum(self.comp_size)) / number_of_components \ if number_of_components > 0 else 0 try: max_cluster_size = max(self.comp_size) except: max_cluster_size = 0 self.max_cluster_size = max_cluster_size total_agriculture_cells = sum(self.number_cropped_cells) total_cells_in_influence = sum(self.number_cells_in_influence) self.trajectory.append([ time, total_population, max_population, total_migrangs, total_settlements, total_agriculture_cells, total_cells_in_influence, total_trade_links, mean_cluster_size, max_cluster_size, new_settlements, killed_settlements, built, lost, income_agriculture, income_ecosystem, income_trade, np.nanmean(self.soil_deg), np.sum(self.forest_state == 3), np.sum(self.forest_state == 2), np.sum(self.forest_state == 1), np.sum(self.s_es_fs), np.sum(self.s_es_wf), np.sum(self.s_es_ag), np.sum(self.s_es_sp), np.sum(self.s_es_pg), np.nanmean(self.spaciotemporal_precipitation), np.nanmax(args[0]), np.nanmean(args[1]), np.nanmax(args[2]), np.nanmax(args[3]), np.nanmax(args[4]), np.nanmax(self.soil_deg), np.nanmax(self.pop_gradient) ]) def update_traders_trajectory_output(self, time): traders = np.where(np.array(self.degree) > 0)[0] total_population = sum([self.population[c] for c in traders]) total_migrants = sum([self.migrants[c] for c in traders]) total_settlements = len(self.population) total_traders = len(traders) total_trade_links = sum(self.degree) / 2 income_agriculture = sum([self.crop_yield[c] for c in traders]) income_ecosystem = sum([self.eco_benefit[c] for c in traders]) income_trade = sum([self.trade_income[c] for c in traders]) income_es_fs = sum([self.s_es_fs[c] for c in traders]) income_es_wf = sum([self.s_es_wf[c] for c in traders]) income_es_ag = sum([self.s_es_ag[c] for c in traders]) income_es_sp = sum([self.s_es_sp[c] for c in traders]) income_es_pg = sum([self.s_es_pg[c] for c in traders]) number_of_components = float( sum([1 if value > 0 else 0 for value in self.comp_size])) mean_cluster_size = (float(sum(self.comp_size)) / number_of_components if number_of_components > 0 else 0) try: max_cluster_size = max(self.comp_size) except: max_cluster_size = 0 total_agriculture_cells = \ sum([self.number_cropped_cells[c] for c in traders]) total_cells_in_influence = \ sum([self.number_cells_in_influence[c] for c in traders]) self.traders_trajectory.append([ time, total_population, total_migrants, total_traders, total_settlements, total_agriculture_cells, total_cells_in_influence, total_trade_links, income_agriculture, income_ecosystem, income_trade, income_es_fs, income_es_wf, income_es_ag, income_es_sp, income_es_pg ]) def get_trajectory(self): try: trj = np.array(self.trajectory) columns = trj[0, :] df = pandas.DataFrame(trj[1:, :], columns=columns) except IOError: print('trajectory mode must be turned on') return df def get_traders_trajectory(self): try: trj = self.traders_trajectory columns = trj.pop(0) df = pandas.DataFrame(trj, columns=columns) except IOError: print('trajectory mode must be turned on') return df def run_test(self, timesteps=5): import shutil N = 50 # define saving location comment = "testing_version" now = datetime.datetime.now() location = "output_data/" \ + "Output_" + comment + '/' if os.path.exists(location): shutil.rmtree(location) os.makedirs(location) # initialize Model model = ModelCore(n=N, debug=True, output_trajectory=True, output_settlement_data=True, output_geographic_data=True, output_data_location=location) # run Model model.crop_income_mode = 'sum' model.r_es_sum = 0.0001 model.r_bca_sum = 0.1 model.population_control = 'False' model.run(timesteps) trj = model.get_trajectory() plot = trj.plot() return 1 def print_variable_lengths(self): for var in dir(self): if not var.startswith('__') and not callable(getattr(self, var)): try: if len(getattr(self, var)) != 432: print(var, len(getattr(self, var))) except: pass if __name__ == "__main__": import matplotlib.pyplot as plt import shutil N = 10 # define saving location comment = "testing_version" now = datetime.datetime.now() location = "output_data/" \ + "Output_" + comment + '/' if os.path.exists(location): shutil.rmtree(location) # os.makedirs(location) # initialize Model model = ModelCore(n=N, debug=True, output_trajectory=True, output_settlement_data=True, output_geographic_data=True, output_data_location=location) # run Model timesteps = 300 model.crop_income_mode = 'sum' model.r_es_sum = 0.0001 model.r_bca_sum = 0.25 model.population_control = 'False' model.run(timesteps) trj = model.get_trajectory() plot = trj[[ 'total_population', 'total_settlements', 'total_migrangs' ]].plot() plt.show() plt.savefig(plot, location + 'plot')
gpl-3.0
tapomayukh/projects_in_python
rapid_categorization/haptic_map/outlier/hmm_crossvalidation_force.py
1
19066
# Hidden Markov Model Implementation import pylab as pyl import numpy as np import matplotlib.pyplot as pp #from enthought.mayavi import mlab import scipy as scp import scipy.ndimage as ni import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3') import rospy #import hrl_lib.mayavi2_util as mu import hrl_lib.viz as hv import hrl_lib.util as ut import hrl_lib.matplotlib_util as mpu import pickle import unittest import ghmm import ghmmwrapper import random import sys sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_length') from data_variable_length_force import Fmat_original if __name__ == '__main__' or __name__ != '__main__': print "Inside outlier HMM model training file" Fmat = Fmat_original # Getting mean / covariance i = 0 number_states = 10 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 35): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 0: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] #print np.shape(state_1) #print np.shape(feature_1_final_data[j]) feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_rf_force = np.zeros((number_states,1)) sigma_rf = np.zeros((number_states,1)) while (j < number_states): mu_rf_force[j] = np.mean(feature_1_final_data[j]) sigma_rf[j] = scp.std(feature_1_final_data[j]) j = j+1 i = 35 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 70): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 35: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_rm_force = np.zeros((number_states,1)) sigma_rm = np.zeros((number_states,1)) while (j < number_states): mu_rm_force[j] = np.mean(feature_1_final_data[j]) sigma_rm[j] = scp.std(feature_1_final_data[j]) j = j+1 i = 70 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 105): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 70: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_sf_force = np.zeros((number_states,1)) sigma_sf = np.zeros((number_states,1)) while (j < number_states): mu_sf_force[j] = np.mean(feature_1_final_data[j]) sigma_sf[j] = scp.std(feature_1_final_data[j]) j = j+1 i = 105 feature_1_final_data = [0.0]*number_states state_1 = [0.0] while (i < 140): data_length = len(Fmat[i]) feature_length = data_length/1 sample_length = feature_length/number_states Feature_1 = Fmat[i][0:feature_length] if i == 105: j = 0 while (j < number_states): feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)] j=j+1 else: j = 0 while (j < number_states): state_1 = Feature_1[sample_length*j:sample_length*(j+1)] feature_1_final_data[j] = feature_1_final_data[j]+state_1 j=j+1 i = i+1 j = 0 mu_sm_force = np.zeros((number_states,1)) sigma_sm = np.zeros((number_states,1)) while (j < number_states): mu_sm_force[j] = np.mean(feature_1_final_data[j]) sigma_sm[j] = scp.std(feature_1_final_data[j]) j = j+1 # HMM - Implementation: # 10 Hidden States # Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state # Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable # Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch) # For new objects, it is classified according to which model it represenst the closest.. F = ghmm.Float() # emission domain of this model # A - Transition Matrix if number_states == 3: A = [[0.2, 0.5, 0.3], [0.0, 0.5, 0.5], [0.0, 0.0, 1.0]] elif number_states == 5: A = [[0.2, 0.35, 0.2, 0.15, 0.1], [0.0, 0.2, 0.45, 0.25, 0.1], [0.0, 0.0, 0.2, 0.55, 0.25], [0.0, 0.0, 0.0, 0.2, 0.8], [0.0, 0.0, 0.0, 0.0, 1.0]] elif number_states == 10: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] elif number_states == 15: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]] elif number_states == 20: A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60], [0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]] # B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma) B_rf = [0.0]*number_states B_rm = [0.0]*number_states B_sf = [0.0]*number_states B_sm = [0.0]*number_states for num_states in range(number_states): B_rf[num_states] = [mu_rf_force[num_states][0],sigma_rf[num_states][0]] B_rm[num_states] = [mu_rm_force[num_states][0],sigma_rm[num_states][0]] B_sf[num_states] = [mu_sf_force[num_states][0],sigma_sf[num_states][0]] B_sm[num_states] = [mu_sm_force[num_states][0],sigma_sm[num_states][0]] #print B_sm #print mu_sm_motion # pi - initial probabilities per state if number_states == 3: pi = [1./3.] * 3 elif number_states == 5: pi = [0.2] * 5 elif number_states == 10: pi = [0.1] * 10 elif number_states == 15: pi = [1./15.] * 15 elif number_states == 20: pi = [0.05] * 20 # generate RF, RM, SF, SM models from parameters model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained trial_number = 1 rf_final = np.matrix(np.zeros((28,1))) rm_final = np.matrix(np.zeros((28,1))) sf_final = np.matrix(np.zeros((28,1))) sm_final = np.matrix(np.zeros((28,1))) total_seq = Fmat for i in range(140): total_seq[i][:] = sum(total_seq[i][:],[]) while (trial_number < 6): # For Training if (trial_number == 1): j = 5 total_seq_rf = total_seq[1:5] total_seq_rm = total_seq[36:40] total_seq_sf = total_seq[71:75] total_seq_sm = total_seq[106:110] #print total_seq_rf while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+1:j+5] total_seq_rm = total_seq_rm+total_seq[j+36:j+40] total_seq_sf = total_seq_sf+total_seq[j+71:j+75] total_seq_sm = total_seq_sm+total_seq[j+106:j+110] j = j+5 if (trial_number == 2): j = 5 total_seq_rf = [total_seq[0]]+total_seq[2:5] total_seq_rm = [total_seq[35]]+total_seq[37:40] total_seq_sf = [total_seq[70]]+total_seq[72:75] total_seq_sm = [total_seq[105]]+total_seq[107:110] #print total_seq_rf while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+0]]+total_seq[j+2:j+5] total_seq_rm = total_seq_rm+[total_seq[j+35]]+total_seq[j+37:j+40] total_seq_sf = total_seq_sf+[total_seq[j+70]]+total_seq[j+72:j+75] total_seq_sm = total_seq_sm+[total_seq[j+105]]+total_seq[j+107:j+110] j = j+5 if (trial_number == 3): j = 5 total_seq_rf = total_seq[0:2]+total_seq[3:5] total_seq_rm = total_seq[35:37]+total_seq[38:40] total_seq_sf = total_seq[70:72]+total_seq[73:75] total_seq_sm = total_seq[105:107]+total_seq[108:110] while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+0:j+2]+total_seq[j+3:j+5] total_seq_rm = total_seq_rm+total_seq[j+35:j+37]+total_seq[j+38:j+40] total_seq_sf = total_seq_sf+total_seq[j+70:j+72]+total_seq[j+73:j+75] total_seq_sm = total_seq_sm+total_seq[j+105:j+107]+total_seq[j+108:j+110] j = j+5 if (trial_number == 4): j = 5 total_seq_rf = total_seq[0:3]+total_seq[4:5] total_seq_rm = total_seq[35:38]+total_seq[39:40] total_seq_sf = total_seq[70:73]+total_seq[74:75] total_seq_sm = total_seq[105:108]+total_seq[109:110] while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+0:j+3]+total_seq[j+4:j+5] total_seq_rm = total_seq_rm+total_seq[j+35:j+38]+total_seq[j+39:j+40] total_seq_sf = total_seq_sf+total_seq[j+70:j+73]+total_seq[j+74:j+75] total_seq_sm = total_seq_sm+total_seq[j+105:j+108]+total_seq[j+109:j+110] j = j+5 if (trial_number == 5): j = 5 total_seq_rf = total_seq[0:4] total_seq_rm = total_seq[35:39] total_seq_sf = total_seq[70:74] total_seq_sm = total_seq[105:109] while (j < 35): total_seq_rf = total_seq_rf+total_seq[j+0:j+4] total_seq_rm = total_seq_rm+total_seq[j+35:j+39] total_seq_sf = total_seq_sf+total_seq[j+70:j+74] total_seq_sm = total_seq_sm+total_seq[j+105:j+109] j = j+5 train_seq_rf = total_seq_rf train_seq_rm = total_seq_rm train_seq_sf = total_seq_sf train_seq_sm = total_seq_sm #print train_seq_rf[27] final_ts_rf = ghmm.SequenceSet(F,train_seq_rf) final_ts_rm = ghmm.SequenceSet(F,train_seq_rm) final_ts_sf = ghmm.SequenceSet(F,train_seq_sf) final_ts_sm = ghmm.SequenceSet(F,train_seq_sm) model_rf.baumWelch(final_ts_rf) model_rm.baumWelch(final_ts_rm) model_sf.baumWelch(final_ts_sf) model_sm.baumWelch(final_ts_sm) # For Testing if (trial_number == 1): j = 5 total_seq_rf = [total_seq[0]] total_seq_rm = [total_seq[35]] total_seq_sf = [total_seq[70]] total_seq_sm = [total_seq[105]] #print np.shape(total_seq_rf) while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j]] total_seq_rm = total_seq_rm+[total_seq[j+35]] total_seq_sf = total_seq_sf+[total_seq[j+70]] total_seq_sm = total_seq_sm+[total_seq[j+105]] j = j+5 if (trial_number == 2): j = 5 total_seq_rf = [total_seq[1]] total_seq_rm = [total_seq[36]] total_seq_sf = [total_seq[71]] total_seq_sm = [total_seq[106]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+1]] total_seq_rm = total_seq_rm+[total_seq[j+36]] total_seq_sf = total_seq_sf+[total_seq[j+71]] total_seq_sm = total_seq_sm+[total_seq[j+106]] j = j+5 if (trial_number == 3): j = 5 total_seq_rf = [total_seq[2]] total_seq_rm = [total_seq[37]] total_seq_sf = [total_seq[72]] total_seq_sm = [total_seq[107]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+2]] total_seq_rm = total_seq_rm+[total_seq[j+37]] total_seq_sf = total_seq_sf+[total_seq[j+72]] total_seq_sm = total_seq_sm+[total_seq[j+107]] j = j+5 if (trial_number == 4): j = 5 total_seq_rf = [total_seq[3]] total_seq_rm = [total_seq[38]] total_seq_sf = [total_seq[73]] total_seq_sm = [total_seq[108]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+3]] total_seq_rm = total_seq_rm+[total_seq[j+38]] total_seq_sf = total_seq_sf+[total_seq[j+73]] total_seq_sm = total_seq_sm+[total_seq[j+108]] j = j+5 if (trial_number == 5): j = 5 total_seq_rf = [total_seq[4]] total_seq_rm = [total_seq[39]] total_seq_sf = [total_seq[74]] total_seq_sm = [total_seq[109]] while (j < 35): total_seq_rf = total_seq_rf+[total_seq[j+4]] total_seq_rm = total_seq_rm+[total_seq[j+39]] total_seq_sf = total_seq_sf+[total_seq[j+74]] total_seq_sm = total_seq_sm+[total_seq[j+109]] j = j+5 trial_number = trial_number + 1 print "Outlier HMM model trained"
mit
dav-stott/phd-thesis
spectra_thesis_ais.py
1
70177
# -*- coding: utf-8 -*- """ Created on Fri Jul 25 08:48:28 2014 @author: david """ #*************** IMPORT DEPENDANCIES******************************************* import numpy as np #import spec_gdal4 as spg from osgeo import gdal import os import csv #import h5py import datetime import numpy.ma as ma #from StringIO import StringIO #import shapely #import r2py from osgeo import gdal_array from osgeo import gdalconst from osgeo.gdalconst import * from osgeo import ogr from osgeo import osr from scipy.spatial import ConvexHull from scipy.signal import find_peaks_cwt from scipy.signal import savgol_filter from scipy import interpolate import matplotlib.pyplot as plt #from shapely.geometry import LineString ################# Functions ################################################### '''These here are functions that are not part of any specific class- these are used by the data import classes for functions such as smoothing''' def smoothing(perc_out, block_start, block_end, kparam, weight, sparam): #D sm_spline_block = perc_out[block_start:block_end,:] sm_x = sm_spline_block[:,0] sm_y = sm_spline_block[:,1] sm_len = sm_x.shape sm_weights = np.zeros(sm_len)+weight sm_spline = interpolate.UnivariateSpline(sm_x, sm_y, k=kparam, w=sm_weights, s=sparam) spline = sm_spline(sm_x) spline = np.column_stack((sm_x,spline)) return spline def interpolate_gaps(array1, array2): array_end = array1.shape[0]-1 array1_endx = array1[array_end, 0] #get the start point of the second array array2_start = array2[0,0] #get the length of the area to be interpolated x_len = array2_start-array1_endx+1 #generate x values to use for the array xvals = np.linspace(array1_endx, array2_start, num=x_len) #y val for the start of the interpolated area yval_array1 = array1[array_end,1] # y val for the end of interpolated area yval_array2 = array2[0,1] #stack the values into a new array xin = np.append(array1_endx, array2_start) yin = np.append(yval_array1, yval_array2) #numpy.interp(x, xp, fp) gap_filling = np.interp(xvals, xin, yin) filled_x = np.column_stack((xvals, gap_filling)) print (filled_x.shape) return filled_x class absorption_feature(): '''this class is used for the characterisation of spectral absortion features, and their investigation using continuum removal''' def __init__(self, spectra, feat_start, feat_end, feat_centre): self.wl = spectra[:,0] self.values = spectra[:,1] print ('CALL TO ABSORPTION FEATURE') # start of absorption feature self.feat_start = feat_start # end of absorption feature self.feat_end = feat_end # approximate 'centre' of feature self.feat_centre = feat_centre #get the range of the data self.min_wl = self.wl[0] self.max_wl = self.wl[-1] print ('Absorption feature',self.feat_start,self.feat_end) #define feature name self.feat_name = str(self.feat_start)+'_'+str(self.feat_end) '''# if the feature is within the range of the sensor, do stuff if self.feat_start > self.min_wl and self.feat_end < self.max_wl: print 'can do stuff with this data' try: self.abs_feature() print ('Absorption feature analysis sussceful') except: print ('ERROR analysing absorption feature', self.feat_name) pass else: print ('Cannot define feature: Out of range')''' ########## Methods ################################################## def abs_feature(self): print ('Call to abs_feature made') # Meffod to calculate the end points of the absorption feature # Does this using the Qhull algorithim form scipy spatial #use the initial defintnion of the absorption feature as a staring point # get the indices for these cont_rem_stacked = None ft_def_stacked = None start_point = np.argmin(np.abs(self.wl-self.feat_start)) end_point = np.argmin(np.abs(self.wl-self.feat_end)) centre = np.argmin(np.abs(self.wl-self.feat_centre)) #find the index minima of reflectance minima = np.argmin(self.values[start_point:end_point])+start_point # if the minima = the start point then the start point is the minima if minima == start_point: left = minima #if not then the left side of the feature is the maixima on the left of the minima elif minima <= centre: left = start_point+np.argmax(self.values[start_point:centre]) else: left = start_point+np.argmax(self.values[start_point:minima]) #right is the maxima on the right of the absorption feature if minima == end_point: right = minima else: right = minima+np.argmax(self.values[minima:end_point]) # use left and right to create a 2D array of points hull_in = np.column_stack((self.wl[left:right],self.values[left:right])) #determine the minima of the points hull_min = minima-left if hull_min <= 0: hull_min=0 #find the wavelength at minima hull_min_wl = hull_in[hull_min,0] # define the wavelength ranges we'll use to select simplices ft_left_wl = hull_min_wl-((hull_min_wl-hull_in[0,0])/2) ft_right_wl = hull_min_wl+((hull_in[-1,0]-hull_min_wl)/2) #use scipy.spatial convex hull to determine the convex hull of the points hull = ConvexHull(hull_in) # get the simplex tuples from the convex hull simplexes = hull.simplices # create an empty list to store simplices potentially related to our feature feat_pos = [] #iterate through the simplices for simplex in simplexes: #extract vertices from simplices vertex1 = simplex[0] vertex2 = simplex[1] #print 'VERT!',hull_in[vertex1,0],hull_in[vertex2,0] ''' We're only interested in the upper hull. Qhull moves counter- clockwise. Therefore we're only interested in those points where vertex 1 is greater than vertex 2''' '''The above may be total bollocks''' if not vertex1 < vertex2: '''We then use the wavelength ranges to determine which simplices relate to our absorption feature''' if hull_in[vertex2,0] <= ft_left_wl and \ hull_in[vertex2,0] >= self.wl[left] and \ hull_in[vertex1,0] >= ft_right_wl and \ hull_in[vertex1,0] <= self.wl[right]: # append the vertices to the list print (hull_in[vertex2,0]) print (hull_in[vertex1,0]) feat_pos.append((vertex2,vertex1)) print ('feat_pos length:',len(feat_pos), type(feat_pos)) #print feat_pos[0],feat_pos[1] else: continue '''We only want one feature here. If there's more than one or less than one we're not interested as we're probably not dealing with vegetation''' # If there's less than one feature... if len(feat_pos) < 1: print ('Absorption feature cannot be defined:less than one feature') ft_def_stacked = None ft_def_hdr = None cont_rem_stacked = None elif len(feat_pos) == 1: feat_pos=feat_pos[0] print ('£££££',feat_pos, type(feat_pos)) else: #if theres more than one fid the widest one. this is not optimal. if len(feat_pos) >1: feat_width = [] for pair in feat_pos: feat_width.append(pair[1]-pair[0]) print ('feat width:', feat_width) #feat_width = np.asarray(feat_width) print (feat_width) f_max = feat_width.index(max(feat_width)) print (f_max) feat_pos = feat_pos[f_max] print (type(feat_pos)) if not feat_pos==None: feat_pos = feat_pos[0], feat_pos[1] print ('DOES MY FEAT_POS CONVERSION WORK?', feat_pos) print ('Analysing absorption feature') #slice feature = hull_in[feat_pos[0]:feat_pos[1],:] print ('Feature shape',feature.shape,'start:',feature[0,0],'end:',feature[-1,0]) #get the minima in the slice minima_pos = np.argmin(feature[:,1]) #continuum removal contrem = self.continuum_removal(feature,minima_pos) # set up single value outputs # start of feature refined_start = feature[0,0] # end of feature refined_end = feature[-1,0] # wavelength at minima minima_WL = feature[minima_pos,0] # reflectance at minima minima_R = feature[minima_pos,1] # area of absorption feature feat_area = contrem[4] # two band normalised index of minima and start of feature left_tbvi = (refined_start-minima_R)/(refined_start+minima_R) # two band normalised index of minima and right of feature right_tbvi = (refined_end-minima_R)/(refined_end+minima_R) # gradient of the continuum line cont_gradient = np.mean(np.gradient(contrem[0])) # area of continuum removed absorption feature cont_rem_area = contrem[3] # maxima of continuum removed absorption feature cont_rem_maxima = np.max(contrem[1]) # wavelength of maxima of continuum removed absorption feature cont_rem_maxima_wl = feature[np.argmax(contrem[1]),0] #area of left part of continuum removed feature cont_area_l = contrem[5] if cont_area_l == None: cont_area_l=0 #are aof right part of continuum removed feature cont_area_r = contrem[6] #stack these into a lovely array ft_def_stacked = np.column_stack((refined_start, refined_end, minima_WL, minima_R, feat_area, left_tbvi, right_tbvi, cont_gradient, cont_rem_area, cont_rem_maxima, cont_rem_maxima_wl, cont_area_l, cont_area_r)) ft_def_hdr = str('"Refined start",'+ '"Refined end",'+ '"Minima Wavelenght",'+ '"Minima Reflectance",'+ '"Feature Area",'+ '"Left TBVI",'+ '"Right TBVI",'+ '"Continuum Gradient",'+ '"Continuum Removed Area",'+ '"Continuum Removed Maxima",'+ '"Continuum Removed Maxima WL",'+ '"Continuum Removed Area Left",'+ '"Continuum Removed Area Right",') #print ft_def_stacked.shape #save the stacked outputs as hdf # stack the 2d continuum removed outputs cont_rem_stacked = np.column_stack((feature[:,0], feature[:,1], contrem[0], contrem[1], contrem[2])) print ('CREM', cont_rem_stacked.shape) return ft_def_stacked, ft_def_hdr, cont_rem_stacked def continuum_removal(self,feature,minima): #method to perform continuum r=<emoval #pull out endmenmbers end_memb = np.vstack((feature[0,:],feature[-1,:])) #interpolate between the endmembers using x intervals continuum_line = np.interp(feature[:,0], end_memb[:,0], end_memb[:,1]) #continuum removal continuum_removed = continuum_line/feature[:,1] #stack into coord pairs so we can measure the area of the feature ft_coords = np.vstack((feature, np.column_stack((feature[:,0],continuum_line)))) #get the area area = self.area(ft_coords) #get the area of the continuum removed feature cont_rem_2d = np.column_stack((feature[:,0],continuum_removed)) cont_r_area = self.area(cont_rem_2d) #band-normalised by area continuum removal cont_BNA = (1-(feature[:,1]/continuum_line))/area #continuum removed area on left of minima cont_area_left = self.area(cont_rem_2d[0:minima,:]) #continuum removed area on right of minima cont_area_right = self.area(cont_rem_2d[minima:,:]) return (continuum_line, continuum_removed, cont_BNA, cont_r_area, area, cont_area_left, cont_area_right) #define area of 2d polygon- using shoelace formula def area(self, coords2d): #setup counter total = 0.0 #get the number of coorsinate pairs N = coords2d.shape[0] #iterate through these for i in range(N): #define the first coordinate pair vertex1 = coords2d[i] #do the second vertex2 = coords2d[(i+1) % N] #append the first & second distance to the toatal total += vertex1[0]*vertex2[1] - vertex1[1]*vertex2[0] #return area return abs(total/2) class Indices(): #class that does vegetation indices def __init__(self,spectra): self.wl = spectra[:,0] self.values = spectra[:,1] self.range = (np.min(self.wl),np.max(self.wl)) '''So, the init method here checks the range of the sensor and runs the appropriate indices within that range, and saves them as hdf5. The indices are all defined as methods of this class''' def visnir(self): # Sensor range VIS-NIR if self.range[0] >= 350 and \ self.range[0] <= 500 and \ self.range[1] >= 900: vis_nir = np.column_stack((self.sr700_800(), self.ndvi694_760(), self.ndvi695_805(), self.ndvi700_800(), self.ndvi705_750(), self.rdvi(), self.savi(), self.msavi2(), self.msr(), self.msrvi(), self.mdvi(), self.tvi(), self.mtvi(), self.mtvi2(), self.vog1vi(), self.vog2(), self.prsi(), self.privi(), self.sipi(), self.mcari(), self.mcari1(), self.mcari2(), self.npci(), self.npqi(), self.cri1(), self.cri2(), self.ari1(), self.ari2(), self.wbi())) vis_nir_hdr=str('"sr700_800",'+ '"ndvi694_760",'+ '"ndvi695_805",'+ '"ndvi700_800",'+ '"ndvi705_750",'+ '"rdvi",'+ '"savi",'+ '"msavi2",'+ '"msr",'+ '"msrvi",'+ '"mdvi",'+ '"tvi",'+ '"mtvi",'+ '"mtvi2",'+ '"vog1vi",'+ '"vog2",'+ '"prsi"'+ '"privi",'+ '"sipi",'+ '"mcari",'+ '"mcari1",'+ '"mcari2",'+ '"npci",'+ '"npqi",'+ '"cri1",'+ '"cri2",'+ '"ari1",'+ '"ari2",'+ '"wbi"') else: vis_nir = None vis_nir_hdr = None return vis_nir,vis_nir_hdr #Range NIR-SWIR def nir_swir(self): if self.range[0] <= 900 and self.range[1] >=2000: nir_swir = np.column_stack((self.ndwi(), self.msi(), self.ndii())) nir_swir_hdr = str('"ndwi",'+ '"msi",'+ '"ndii"') else: #continue print ('not nir-swir') nir_swir=None nir_swir_hdr=None return nir_swir, nir_swir_hdr #range SWIR def swir(self): if self.range[1] >=2000: swir = np.column_stack((self.ndni(), self.ndli())) swir_hdr=str('"ndni",'+ '"ndli"') else: print ('swir-nir') swir = None swir_hdr = None #continue return swir,swir_hdr #||||||||||||||||||||| Methods ||||||||||||||||||||||||||||||||||||||||||||||| # function to run every permutation of the NDVI type index across the Red / IR # ...... VIS / NIR methods .... def multi_tbvi (self, red_start=650, red_end=750, ir_start=700, ir_end=850): # get the indicies of the regions we're going to use. # we've added default values here, but they can happily be overidden #start of red red_l =np.argmin(np.abs(self.wl-red_start)) #end of red red_r = np.argmin(np.abs(self.wl-red_end)) #start of ir ir_l = np.argmin(np.abs(self.wl-ir_start)) #end of ir ir_r = np.argmin(np.abs(self.wl-ir_end)) #slice left = self.values[red_l:red_r] right = self.values[ir_l:ir_r] #set up output values = np.empty(3) #set up counter l = 0 #loop throught the values in the red for lvalue in left: l_wl = self.wl[l+red_l] r = 0 l = l+1 #then calculate the index with each wl in the NIR for rvalue in right: value = (rvalue-lvalue)/(rvalue+lvalue) r_wl = self.wl[r+ir_l] out = np.column_stack((l_wl,r_wl,value)) values = np.vstack((values, out)) out = None r = r+1 return values[1:,:] def sr700_800 (self, x=700, y=800): index = self.values[np.argmin(np.abs(self.wl-x))]/self.values[np.argmin(np.abs(self.wl-y))] return index def ndvi705_750 (self, x=705, y=750): index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\ (self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))]) return index def ndvi700_800 (self, x=700, y=800): index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\ (self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))]) return index def ndvi694_760 (self, x=694, y=760): index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\ (self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))]) return index def ndvi695_805 (self, x=695, y=805): index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\ (self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))]) return index def npci (self, x=430, y=680): index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\ (self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))]) return index def npqi (self, x=415, y=435): index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\ (self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))]) return index #mSRvi #= (750-445)/(705+445) def msrvi (self): x = 750 y = 445 z = 705 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] msrvi_val = (x_val-y_val)/(z_val+y_val) return msrvi_val #Vogelmann Red Edge 1 #740/720 def vog1vi (self): x = 740 y = 720 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] vog1vi_val = (x_val/y_val) return vog1vi_val #Vogelmann Red Edge 2 #= (734-747)/(715+726) def vog2 (self): v = 734 x = 747 y = 715 z = 726 v_val = self.values[np.argmin(np.abs(self.wl-v))] x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] vog2_val = (v_val-x_val)/(y_val+z_val) return vog2_val #PRI # (531-570)/(531+570) def privi (self): x = 531 y = 570 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] privi_val = (x_val-y_val)/(x_val+y_val) return privi_val #SIPI #(800-445)/(800-680) def sipi (self): x = 800 y = 445 z = 680 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] sipi_val = (x_val-y_val)/(x_val+z_val) return sipi_val #Water band index # WBI = 900/700 def wbi (self): x = 900 y = 700 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] wbi_val = (x_val/y_val) return wbi_val #mNDVI #= (750-705)/((750+705)-(445)) def mdvi (self): x = 750 y = 705 z = 445 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] mdvi_val = (x_val-y_val)/((x_val+y_val)-z_val) return mdvi_val #Carotenid Reflectance Index #CRI1 = (1/510)-(1/550) def cri1 (self): x = 510 y = 550 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] cri1_val = (1/x_val)-(1/y_val) return cri1_val #CRI2 = (1/510)-(1/700) def cri2 (self): x = 510 y = 700 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] cri2_val = (1/x_val)-(1/y_val) return cri2_val #Anthocyanin #ARI1 = (1/550)-(1/700) def ari1 (self): x = 550 y = 700 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] ari1_val = (1/x_val)-(1/y_val) return ari1_val #ARI2 = 800*((1/550)-(1/700)_)) def ari2 (self): x = 510 y = 700 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] ari2_val = 800*((1/x_val)-(1/y_val)) return ari2_val #MSR #=((800/670)-1)/SQRT(800+670) def msr (self): x = 800 y = 670 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] msr_val = ((x_val/y_val)-1)/(np.sqrt(x_val+y_val)) return msr_val #SAVI #= (1+l)(800-670)/(800+670+l) def savi (self, l=0.5): x = 800 y = 670 l = 0.5 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] savi_val = ((1+l)*(x_val-y_val))/(x_val+y_val+l) return savi_val #MSAVI #=1/2(sqrt(2*800)+1)-SQRT(((2*800+1)sqr)-8*(800-670) def msavi2 (self): x = 800 y = 670 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] msavi2_top1 = (2*x_val+1) msavi2_top2 = (np.sqrt(np.square(2*x_val+1)-(8*(x_val-y_val)))) msavi2_top = msavi2_top1-msavi2_top2 msavi2_val = msavi2_top/2 return msavi2_val #Modified clhoropyll absorption indec #MCARI = ((700-670)-0.2*(700-550))*(700/670) def mcari (self): x = 700 y = 670 z = 550 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] mcari_val = (x_val-y_val)-(0.2*(x_val-z_val)*(x_val/y_val)) return mcari_val #Triangular vegetation index #TVI 0.5*(120*(750-550))-(200*(670-550)) def tvi (self): x = 750 y = 550 z = 670 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] tvi_val = 0.5*((120*(x_val-y_val))-(200*(z_val+y_val))) return tvi_val #MCAsavRI1 = 1.2*(2.5*(800-67-)-(1.3*800-550) def mcari1 (self): x = 800 y = 670 z = 550 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] mcari1_val = (1.2*((2.5*(x_val-y_val)))-(1.3*(x_val+z_val))) return mcari1_val #MTVI1 #=1.2*((1.2*(800-550))-(2.5(670-550))) def mtvi (self): x = 800 y = 550 z = 670 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] mtvi_val = (1.2*(12*(x_val-y_val)))-(2.5*(z_val-y_val)) return mtvi_val def mcari2 (self): x = 800 y = 670 z = 550 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] mcari2_top = (1.5*(2.5*(x_val-y_val)))-(1.3*(x_val-z_val)) mcari2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5) mcari2_val = mcari2_top/mcari2_btm return mcari2_val #MTVI2=(1.5*(2.5(800-670)-2.5*(800-550))/sqrt((2*800+1s)sq)-((6*800)-(5*sqrt670))-0.5 def mtvi2 (self): x = 800 y = 670 z = 550 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] mtvi2_top = (1.5*(2.5*(x_val-z_val)))-(1.3*(x_val-z_val)) mtvi2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5) mtvi2_val = mtvi2_top/mtvi2_btm return mtvi2_val #Renormalised DVI #RDVI = (800-670)/sqrt(800+670) def rdvi (self): x = 800 y = 670 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] rdvi_val = (x_val-y_val)/np.sqrt(x_val+y_val) return rdvi_val #Plant senescance reflectance index #PRSI = (680-500)/750 def prsi (self): x = 680 y = 500 z = 750 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] prsi_val = (x_val-y_val)/z_val return prsi_val #||||||||||||||||||||||| SWIR methods |||||||||||||||||||||||||||||||||||| #Cellulose Absorption Index #CAI =0.5*(2000-2200)/2100 def cai (self): x = 2000 y = 2200 z = 2100 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] z_val = self.values[np.argmin(np.abs(self.wl-z))] cai_val = 0.5*(x_val-y_val)-z_val return cai_val #Normalized Lignin Difference #NDLI = (log(1/1754)-log(1/1680))/(log(1/1754)+log(1/1680)) def ndli (self): x = 1754 y = 2680 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] ndli_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val)) return ndli_val #Canopy N #NDNI =(log(1/1510)-log(1/1680))/(log(1/1510)+log(1/1680)) def ndni (self): x = 1510 y = 1680 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] ndni_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val)) return ndni_val #|||||||||||||||||||||| Full spectrum (VIS-SWIR)|||||||||||||||||||||||||||| #Normalised Difference IR index #NDII = (819-1649)/(819+1649)#NDII = (819-1649)/(819+1649) def ndii (self): x = 819 y = 1649 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] ndii_val = (x_val-y_val)/(x_val+y_val) return ndii_val #Moisture Stress Index #MSI = 1599/819http://askubuntu.com/questions/89826/what-is-tumblerd def msi (self): x = 1599 y = 810 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] msi_val = (x_val/y_val) return msi_val #NDWI #(857-1241)/(857+1241) def ndwi (self): x = 857 y = 1241 x_val = self.values[np.argmin(np.abs(self.wl-x))] y_val = self.values[np.argmin(np.abs(self.wl-y))] ndwi_val = (x_val-y_val)/(x_val+y_val) return ndwi_val class red_edge(): '''Class to derive red edge position using a number of different methods''' def __init__(self, spectra): self.wl = spectra[:,0] self.values = spectra[:,1] self.range = (np.min(self.wl),np.max(self.wl)) '''Again, the mehtod that initialises this class uses the range of the sensor to check to see if it falls within the red-edge reigion. If so, it will derive the red edge using the differnet methods and save these as seprate hdf5 datasets in the appropriate group''' if self.range[0] <= 670 and self.range[1] >=750: self.redge_vals = np.column_stack((self.redge_linear(), self.redge_lagrange(), self.redge_linear_extrapolation())) print (self.redge_vals) print (self.redge_linear,self.redge_lagrange,self.redge_linear_extrapolation) self.redge_hdr = str('"linear",'+ '"lagrange",'+ '"extrapolated"') else: print ('red_edge out of range') self.redge_vals = None self.redge_hdr = None ##################### METHODS ######################################### #linear- defined by clevers et al 1994: def redge_linear(self): r670 = self.values[np.argmin(np.abs(self.wl-670))] r780 = self.values[np.argmin(np.abs(self.wl-780))] r700 = self.values[np.argmin(np.abs(self.wl-700))] r740 = self.values[np.argmin(np.abs(self.wl-740))] r_edge = (r670+r780)/2 lin_rep =700+40*((r_edge-r700)/(r740-r700)) print ('REDGE_LINEAR',lin_rep) return lin_rep #Lagrangian method, after Dawson & Curran 1998 def redge_lagrange(self): #select the red edge region of the first derviative and associate this #with wavelength x = 680 y = 730 first_diff = np.diff(self.values, 1) spec_in = np.column_stack((self.wl[1:], first_diff)) l680 = np.argmin(np.abs(spec_in[:,0]-x)) r680 = spec_in[l680,0] l730 = np.argmin(np.abs(spec_in[:,0]-y)) r730 = spec_in[l730,0] redge_region_sel = np.where(np.logical_and(spec_in[:,0]>r680-1, spec_in[:,0]<r730+1)) redge_region = spec_in[redge_region_sel] #find the maximum first derivative, return index dif_max = np.argmax(redge_region[:,1], axis=0) #find band with the max derivative -1, return index dif_max_less = (np.argmax(redge_region[:,1], axis=0))-1 #find band with the max derivative +1, return index dif_max_more = (np.argmax(redge_region[:,1], axis=0))+1 if dif_max_more >= redge_region.shape[0]: dif_max_more = redge_region.shape[0]-1 #use these indeces to slice the array rmax = redge_region[dif_max] rmax_less =redge_region[dif_max_less] rmax_more =redge_region[dif_max_more] #lagrangian interpolation with three points #this has been expanded to make the syntax easier a = rmax_less[1]/(rmax_less[0]-rmax[0])*(rmax_less[0]-rmax_more[0]) b = rmax[1]/(rmax[0]-rmax_less[0])*(rmax[0]-rmax_more[0]) c = rmax_more[1]/(rmax_more[0]-rmax_less[0])*(rmax_more[0]-rmax[0]) d = a*(rmax[0]+rmax_more[0]) e = b*(rmax_less[0]+rmax_more[0]) f = c*(rmax_less[0]+rmax[0]) lg_rep = (d+e+f)/(2*(a+b+c)) print ('Lagrangian', lg_rep) return lg_rep #Linear extrapolation- after Cho & Skidmore 2006, Cho et al 2007 def redge_linear_extrapolation(self): diff = np.diff(self.values) d680 = diff[np.argmin(np.abs(self.wl-680+1))] d694 = diff[np.argmin(np.abs(self.wl-694+1))] d724 = diff[np.argmin(np.abs(self.wl-724+1))] d760 = diff[np.argmin(np.abs(self.wl-760+1))] red_slope = ((d694-d680)/(694-680)) ir_slope = ((d760-d724)/(760-724)) red_inter = d680-(red_slope*680) ir_inter = d724-(ir_slope*724) wl = (ir_inter-red_inter)/(ir_slope-red_slope) print ('^!!!!!!!!! Linear:',wl) return np.abs(wl) class fluorescence(): '''this class is inteded to look for evidence of photosynthetic flourescence currently this is limited to simple reflectance indices. This should be expanded to take in other more complex methods to invesitgae fluorescence''' def __init__(self, spectra): self.wl = spectra[:,0] self.values = spectra[:,1] self.range = (np.min(self.wl),np.max(self.wl)) print ('call to fluor') '''The init method checks the range to establish if it overlaps with region of chlorophyll flourescence. If so it will will perform the analysis methods and output to hdf5''' def wl_selector(self, x): '''this method finds the index of the wavelength closest to that specified for reflectance''' value = self.values[np.argmin(np.abs(self.wl-x))] return value def d_wl_selector(self, x): '''this method finds the index of the wavelength closest to that specified for the first derivative''' diff = np.diff(self.values) value = diff[np.argmin(np.abs(self.wl-x))+1] return value def wl_max_d(self): '''method to extract wavelength of the maxima of the first derivative and return this''' start = np.argmin(np.abs(self.wl-650)) end = np.argmin(np.abs(self.wl-760)) diff = np.diff(self.values[start:end]) maxdiff = np.argmax(diff) maxdiffwl = self.wl[maxdiff+start+1] return maxdiffwl, diff[maxdiff] def simple_ratios(self): ''' This method runs flourescence indices ratios and returns them as a stacked numpy array''' #r680/r630 r680r630 = self.wl_selector(680)/self.wl_selector(630) print (r680r630) #r685/r630 r685r630 = self.wl_selector(685)/self.wl_selector(630) print (r685r630) #r685/r655 r685r655 = self.wl_selector(685)/self.wl_selector(655) print (r685r655) #r687/r630 r687r630 = self.wl_selector(687)/self.wl_selector(630) print (r687r630) #r690/r630 r690r630 = self.wl_selector(690)/self.wl_selector(630) print (r690r630) #r750/r800 r750r800 = self.wl_selector(750)/self.wl_selector(800) print (r750r800) #sq(r685)/(r675-r690) sqr685 = np.square(self.wl_selector(685))/(self.wl_selector(675)-self.wl_selector(690)) print (sqr685) #(r675-r690)/sq(r683) Zarco-Tejada 2000 r675r690divsq683 = (self.wl_selector(675)-self.wl_selector(690))/np.square(self.wl_selector(683)) print (r675r690divsq683) #d705/d722 d705d722 = self.d_wl_selector(705)/self.d_wl_selector(722) print (d705d722) #d730/d706 d730d706 = self.d_wl_selector(730)/self.d_wl_selector(706) print (d730d706) #(d688-d710)/sq(d697) d686d710sq697 = (self.d_wl_selector(688)-self.d_wl_selector(710))\ /np.square(self.d_wl_selector(697)) print (d686d710sq697) #wl at max d / d720 maxdd720 = self.wl_max_d()[1]/self.d_wl_selector(720) print (maxdd720) #wl at max d / d703 maxdd703 = self.wl_max_d()[1]/self.d_wl_selector(703) print (maxdd703) #wl at max d / d(max d+12) print (self.wl_max_d()[0]) maxd12 = self.wl_max_d()[1]/self.d_wl_selector(self.wl_max_d()[0]+12) print (maxd12) combined = np.vstack((r680r630, r685r630, r685r655, r687r630, r690r630, r750r800, sqr685, r675r690divsq683, d705d722, d730d706, d686d710sq697, maxdd720, maxdd703, maxd12)) fluo_hdr = str('"r680r630",'+ '"r685r630",'+ '"r685r655",'+ '"r687r630",'+ '"r690r630",'+ '"r750r800",'+ '"sqr685",'+ '"r675r690divsq683",'+ '"d705d722",'+ '"d730d706",'+ '"d686d710sq697",'+ '"maxdd720",'+ '"maxdd703",'+ '"maxd12"') return combined, fluo_hdr def dual_peak(self): '''This fuction loogs for a dual peak in the red-edge region. If it's there it measures the depth of the feature between the two peaks. UNTESTED''' start = self.wl_selector(640) end = self.wl_selector(740) d1_region = np.diff(self.values[start:end]) #d2_region = np.diff(self.values[start:end], n=2) peak_finder = find_peaks_cwt(d1_region, np.arange(3,10)) peak_wl = wavelengths[peak_finder] fluor_peaks = [] for peak in peak_finder: if peak_wl[peak] == self.wl[self.wl_selector(668)]: print ('found flourescence peak at 668nm') fluor_peaks.append(peak) elif peak_wl[peak] == self.wl[self.wl_selector(735)]: print ('found flourescence peak at 735nm') fluor_peaks.append[peak] else: print ('unknown peak') '''if len(fluor_peaks) == 2: something = 'something''' class load_asd(): def __init__(self, indir, output_dir): data_list = os.listdir(indir) print (data_list) #output_dir = os.path.join(indir,'output') if not os.path.exists(output_dir): os.mkdir(output_dirx) for directory in data_list: parent = os.path.join(indir, directory) spectra_dir = os.path.join(parent, 'raw_spectra') reading_info_dir = os.path.join(parent, 'reading_info') sensor_name = 'ASD FieldSpec Pro' sensor_type = 'SPR' sensor_units = 'nm' sensor_range = [350,2500] os.chdir(reading_info_dir) reading_info_file = open('reading_atributes.txt','rb') reading_info = csv.DictReader(reading_info_file) reading_info_array = np.empty(12) readings_list = [row for row in reading_info] for reading in readings_list[:]: reading_filename = str(reading['reading_id']+'.txt') reading_info_line = np.column_stack((reading['reading_id'], reading['dartField'], reading['transect'], reading['transectPosition'], reading['reading_type'], reading['reading_coord_osgb_x'], reading['reading_coord_osgb_y'], reading['dateOfAcquisition'], reading['timeOfAcquisition'], reading['instrument_number'], reading['dark_current'], reading['white_ref'])) #print reading_info_line if reading['reading_type']== 'REF': reading_info_array = np.vstack((reading_info_array,reading_info_line)) #print reading_info_array print ('*********** Loading File', reading_filename, '***********') os.chdir(spectra_dir) spec = np.genfromtxt(reading_filename, delimiter=', ', skiprows=30) spec = np.column_stack((spec[:,0],spec[:,1]*100)) nir_start = 0 nir_end = 990 nir_weight = 3.5 nir_k = 4.9 nir_s =45 swir1_start = 1080 swir1_end = 1438 swir1_weight = 8.5 swir1_k = 3.5 swir1_s = 35 swir2_start = 1622 swir2_end = 2149 swir2_weight = 1.2 swir2_s = 92 swir2_k = 2.8 #smoothing(perc_out, block_start, block_end, kparam, weight, sparam) nir_smoothed = smoothing(spec, nir_start, nir_end, nir_k, nir_weight, nir_s) swir1_smoothed = smoothing(spec, swir1_start, swir1_end, swir1_k, swir1_weight, swir1_s) swir2_smoothed = smoothing(spec, swir2_start, swir2_end, swir2_k, swir2_weight, swir2_s) print ('Smoothed array shape', nir_smoothed.shape,swir1_smoothed.shape,swir2_smoothed.shape) nir_swir_gap = interpolate_gaps(nir_smoothed,swir1_smoothed) swir2_gap = interpolate_gaps(swir1_smoothed,swir2_smoothed) spec_smoothed = np.vstack((nir_smoothed, nir_swir_gap, swir1_smoothed, swir2_gap, swir2_smoothed)) print ('Spec SHAPE:', spec.shape) survey_dir = os.path.join(output_dir, directory) if not os.path.exists(survey_dir): os.mkdir(survey_dir) os.chdir(survey_dir) try: abs470 = absorption_feature(spec_smoothed,400,518,484) print (abs470.abs_feature()[0]) abs470_ftdef = abs470.abs_feature()[0] print (abs470_ftdef) abs470_crem = abs470.abs_feature()[2] if not abs470_ftdef == None: np.savetxt(reading_filename[0:-4]+'_abs470_ftdef.txt', abs470_ftdef, header=abs470.abs_feature()[1], delimiter=',') np.savetxt(reading_filename[0:-4]+'_abs470_crem.txt', abs470_crem, delimiter=',') except: pass try: abs670 = absorption_feature(spec_smoothed,548,800,670) abs670_ftdef = abs670.abs_feature()[0] abs670_crem = abs670.abs_feature()[2] if not abs670_ftdef == None: np.savetxt(reading_filename[0:-4]+'_abs670_ftdef.txt', abs670_ftdef, header=abs670.abs_feature()[1], delimiter=',') np.savetxt(reading_filename[0:-4]+'_abs670_crem.txt', abs670_crem, delimiter=',') except: pass try: abs970 = absorption_feature(spec_smoothed,880,1115,970) abs970_ftdef = abs970.abs_feature()[0] abs970_crem = abs970.abs_feature()[2] if not abs970_ftdef == None: np.savetxt(reading_filename[0:-4]+'_abs970_ftdef.txt', abs970_ftdef, header=abs970.abs_feature()[1], delimiter=',') np.savetxt(reading_filename[0:-4]+'_abs970_crem.txt', abs970_crem, delimiter=',') except: pass try: abs1200 = absorption_feature(spec_smoothed,1080,1300,1190) abs1200_ftdef = abs1200.abs_feature()[0] abs1200_crem = abs1200.abs_feature()[2] if not abs1200_ftdef == None: np.savetxt(reading_filename[0:-4]+'_abs1200_ftdef.txt', abs1200_ftdef, header=abs1200.abs_feature()[1], delimiter=',') np.savetxt(reading_filename[0:-4]+'_abs1200_crem.txt', abs1200_crem, delimiter=',') except: pass try: abs1730 = absorption_feature(spec_smoothed,1630,1790,1708) abs1730_ftdef = abs1730.abs_feature()[0] abs1730_crem = abs1730.abs_feature()[2] if not abs1730_ftdef == None: np.savetxt(reading_filename[0:-4]+'_abs1730_ftdef.txt', abs1730_ftdef, header=abs1730.abs_feature()[1], delimiter=',') np.savetxt(reading_filename[0:-4]+'_abs1730_crem.txt', abs1730_crem, delimiter=',') except: pass print (spec_smoothed.shape) try: abs2100 = absorption_feature(spec_smoothed,2001,2196,2188) abs2100_ftdef = abs2100.abs_feature()[0] abs2100_crem = abs2100.abs_feature()[2] if not abs2100_ftdef == None: np.savetxt(reading_filename[0:-4]+'_abs2100_ftdef.txt', abs2100_ftdet, header=abs2100.abs_feature()[1], delimiter=',') np.savetxt(reading_filename[0:-4]+'_abs2100_crem.txt', abs2100_crem, delimiter=',') except: pass veg_indices = Indices(spec_smoothed) indices = np.column_stack((veg_indices.visnir()[0], veg_indices.nir_swir()[0], veg_indices.swir()[0])) print (veg_indices.visnir()[1],veg_indices.nir_swir()[1],veg_indices.swir()[1]) hdr = str(veg_indices.visnir()[1]+','+veg_indices.nir_swir()[1]+','+veg_indices.swir()[1]) np.savetxt(reading_filename[0:-4]+'_indices.txt', indices, header=hdr, delimiter=',') mtbvi = veg_indices.multi_tbvi() np.savetxt(reading_filename[0:-4]+'_mtbvi.txt', mtbvi, delimiter=',') redge = red_edge(spec_smoothed) print (redge.redge_vals.shape) print (redge.redge_vals) np.savetxt(reading_filename[0:-4]+'_redge.txt', redge.redge_vals, delimiter=',') fluo = fluorescence(spec_smoothed) np.savetxt(reading_filename[0:-4]+'_flou.txt', np.transpose(fluo.simple_ratios()[0]), header = fluo.simple_ratios()[1], delimiter=',') np.savetxt(reading_filename[0:-4]+'_spec.txt', spec_smoothed, delimiter=',') class load_image(): def __init__(self, wavlengths_dir,image_dir,out_dir): os.chdir(wavelengths_dir) wavelengths = np.genfromtxt('wavelengths.txt') print ('wavelengths array', wavelengths) os.chdir(image_dir) image_list = os.listdir(image_dir) for image in image_list: import_image = self.get_image(image) image_name = image[:-4] print ('IMAGE NAME:', image_name) row = 1 img_array = import_image[0] print ('Image_array', img_array) projection = import_image[1] print ('Projection',projection) x_size = import_image[2] print ('Xdim',x_size) y_size = import_image[3] print ('Ydim', y_size) spatial = import_image[4] print (spatial) x_top_left = spatial[0] ew_pix_size = spatial[1] rotation_ew = spatial[2] y_top_left = spatial[3] rotation_y = spatial[4] ns_pixel_size = spatial[5] print ('Spatial', x_top_left,ew_pix_size,rotation_ew,y_top_left,rotation_y,ns_pixel_size) print ('IMAGE ARRAY SHAPE',img_array.shape) img_dims = img_array.shape print (img_dims[0],'/',img_dims[1]) #indices+29 indices_out = np.zeros((img_dims[0],img_dims[1],29), dtype=np.float32) #print indices_out #redge=3 redge_out = np.zeros((img_dims[0],img_dims[1]),dtype=np.float32) #fluo=14 fluo_out=np.zeros((img_dims[0],img_dims[1],14), dtype=np.float32) print ('fluo out', fluo_out.shape) ft470_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32) ft670_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32) ft970_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32) x470 = np.argmin(np.abs(wavelengths-400)) y470 = np.argmin(np.abs(wavelengths-518)) len470 = y470-x470 cr470_out = np.zeros((img_dims[0],img_dims[1],len470), dtype=np.float32) x670 = np.argmin(np.abs(wavelengths-548)) y670 = np.argmin(np.abs(wavelengths-800)) len670 = y670-x670 cr670_out = np.zeros((img_dims[0],img_dims[1],len670), dtype=np.float32) print (cr670_out) x970 = np.argmin(np.abs(wavelengths-880)) y970 = np.argmin(np.abs(wavelengths-1000)) len970 = y970-x970 cr970_out = np.zeros((img_dims[0],img_dims[1],len970), dtype=np.float32) #print cr970_out print (wavelengths) row = 0 print ('***', row, img_dims[0]) for i in range(0,img_dims[0]): print (i) column = 0 #print 'COL',column for j in range(0,img_dims[1]): print ('COLUMN',column) #print 'Pixel',pixel name = '%s_pix-%s_%s' % (image_name,row,column) print ('NAME',name) pixel = img_array[row,column,:] #smoothed = savgol_filter(pixel,5,2) #spec_smoothed = np.column_stack((wavelengths,smoothed)) spec_smoothed = np.column_stack((wavelengths,pixel)) print (spec_smoothed) veg_indices = Indices(spec_smoothed) indices = veg_indices.visnir()[0] print ('(*&)(*)(*&&^)^)^)*&^)*^)*&', indices) indices_out[row,column,:]=indices fluo = fluorescence(spec_smoothed) fluo_out[row,column,:]=np.transpose(fluo.simple_ratios()[0]) redge = red_edge(spec_smoothed) print (redge.redge_vals.shape) redge_out[row,column]= redge.redge_vals[0,2] try: abs470 = absorption_feature(spec_smoothed,400,518,484) abs470_ftdef = abs470.abs_feature()[0] abs470_crem = abs470.abs_feature()[2] abs470_crem = np.column_stack((abs470_crem[:,0],abs470_crem[:,4])) print ('!*!*!*!*!&!*!*', abs470_crem) crem470_fill = self.crem_fill(x470,y470,abs470_crem,wavelengths) ft470_out[row,column,:]=abs470_ftdef cr470_out[row,column,:]=crem470_fill except: pass try: abs670 = absorption_feature(spec_smoothed,548,800,670) abs670_ftdef = abs670.abs_feature()[0] abs670_crem = abs670.abs_feature()[2] abs670_crem = np.column_stack((abs670_crem[:,0],abs670_crem[:,4])) ft670_out[row,column,:]=abs670_ftdef crem670_fill = self.crem_fill(x670,y670,abs670_crem,wavelengths) cr670_out[row,column,:]=crem670_fill except: pass try: abs970 = absorption_feature(spec_smoothed,880,1000,970) abs970_ftdef = abs970.abs_feature()[0] abs970_crem = abs970.abs_feature()[2] abs970_crem = np.column_stack((abs970_crem[:,0],abs970_crem[:,4])) crem970_fill = self.crem_fill(x970,y970,abs970_crem,wavelengths) ft970_out[row,column,:]=abs970_ftdef cr970_out[row,column,:]=crem970_fill except: pass column = column+1 print (pixel.shape) row = row+1 self.writeimage(out_dir,image+'_indices.tif',indices_out,spatial) self.writeimage(out_dir,image+'_fluo.tif',fluo_out,spatial) self.writeimage(out_dir,image+'_redge.tif',redge_out,spatial) self.writeimage(out_dir,image+'_ft470.tif',ft470_out,spatial) self.writeimage(out_dir,image+'_cr470.tif',cr470_out,spatial) self.writeimage(out_dir,image+'_ft670.tif',ft670_out,spatial) self.writeimage(out_dir,image+'_cr670.tif',cr670_out,spatial) self.writeimage(out_dir,image+'_ft970.tif',ft970_out,spatial) self.writeimage(out_dir,image+'_cr970.tif',cr970_out,spatial) def crem_fill(self,xwl,ywl,bna,wavelengths): bna_out=np.zeros((ywl-xwl)) bna_wvl = bna[:,0] bna_refl= bna[:,1] full_wl = wavelengths[xwl:ywl] index = np.argmin(np.abs(wavelengths-bna_wvl[0])) bna_out[index:]=bna_refl return bna_out def get_image(self, image): print ('call to get_image') # open the dataset dataset = gdal.Open(image, GA_ReadOnly) print ('Dataset',dataset) # if there's nothign there print error if dataset is None: print ('BORK: Could not load file: %s' %(image)) # otherwise do stuff else: #get the format driver = dataset.GetDriver().ShortName #get the x dimension xsize = dataset.RasterXSize #get the y dimension ysize = dataset.RasterYSize #get the projection proj = dataset.GetProjection() #get the number of bands bands = dataset.RasterCount #get the geotransform Returns a list object. This is standard GDAL ordering: #spatial[0] = top left x #spatial[1] = w-e pixel size #spatial[2] = rotation (should be 0) #spatial[3] = top left y #spatial[4] = rotation (should be 0) #spatial[5] = n-s pixel size spatial = dataset.GetGeoTransform() #print some stuff to console to show we're paying attention print ('Found raster in %s format. Raster has %s bands' %(driver,bands)) print ('Projected as %s' %(proj)) print ('Dimensions: %s x %s' %(xsize,ysize)) #instantiate a counter count = 1 #OK. This is the bit that catually loads the bands in in a while loop # Loop through bands as long as count is equal to or less than total while (count<=bands): #show that your computer's fans are whining for a reason print ('Loading band: %s of %s' %(count,bands)) #get the band band = dataset.GetRasterBand(count) # load this as a numpy array data_array = band.ReadAsArray() '''data_array = ma.masked_where(data_array == 0, data_array) data_array = data_array.filled(-999)''' data_array = data_array.astype(np.float32, copy=False) # close the band object band = None #this bit stacks the bands into a combined numpy array #if it's the first band copy the array directly to the combined one if count == 1: stacked = data_array #else combine these else: stacked = np.dstack((stacked,data_array)) #stacked = stacked.filled(-999) #just to check it's working #print stacked.shape # increment the counter count = count+1 #stacked = stacked.astype(np.float32, copy=False) return stacked,proj,xsize,ysize,spatial def writeimage(self, outpath, outname, image, spatial): data_out = image print ('ROWS,COLS',image.shape) print ('Call to write image') os.chdir(outpath) print ('OUTPATH',outpath) print ('OUTNAME',outname) #load the driver for the format of choice driver = gdal.GetDriverByName("Gtiff") #create an empty output file #get the number of bands we'll need try: bands = image.shape[2] except: bands=1 print ('BANDS OUT', bands) #file name, x columns, y columns, bands, dtype out = driver.Create(outname, image.shape[1], image.shape[0], bands, gdal.GDT_Float32) #define the location using coords of top-left corner # minimum x, e-w pixel size, rotation, maximum y, n-s pixel size, rotation out.SetGeoTransform(spatial) srs = osr.SpatialReference() #get the coodrinate system using the ESPG code srs.SetWellKnownGeogCS("EPSG:27700") #set pstackedstackedstackedtojection of output file out.SetProjection(srs.ExportToWkt()) band = 1 if bands == 1: out.GetRasterBand(band).WriteArray(data_out) #set the no data value out.GetRasterBand(band).SetNoDataValue(-999) #apend the statistics to dataset out.GetRasterBand(band).GetStatistics(0,1) print ('Saving %s/%s' % (band,bands)) else: while (band<=bands): data = data_out[:,:,band-1] #write values to empty array out.GetRasterBand(band).WriteArray( data ) #set the no data value out.GetRasterBand(band).SetNoDataValue(-999) #apend the statistics to dataset out.GetRasterBand(band).GetStatistics(0,1) print ('Saving %s/%s' % (band,bands)) band = band+1 out = None print ('Processing of %s complete' % (outname)) return outname if __name__ == "__main__": #dir_path = os.path.dirname(os.path.abspath('...')) #data_root = os.path.join(dir_path, 'data') data_root = '/home/dav/data/temp/test/test_spec' for folder in os.listdir(data_root): input_dir = os.path.join(data_root,folder) print (input_dir) surveys_list = os.listdir(input_dir) print (surveys_list) for survey_dir in surveys_list: print (survey_dir) site_dir=os.path.join(input_dir,survey_dir) print (site_dir) image_path = os.path.join(site_dir, 'image') print (image_path) wavelengths_dir = os.path.join(site_dir, 'wavelengths') print (wavelengths_dir) out_dir = os.path.join(site_dir,'output') if not os.path.exists(out_dir): os.mkdir(out_dir) load_image(wavelengths_dir,image_path,out_dir)
mit
tmhm/scikit-learn
examples/plot_kernel_approximation.py
262
8004
""" ================================================== Explicit feature map approximation for RBF kernels ================================================== An example illustrating the approximation of the feature map of an RBF kernel. .. currentmodule:: sklearn.kernel_approximation It shows how to use :class:`RBFSampler` and :class:`Nystroem` to approximate the feature map of an RBF kernel for classification with an SVM on the digits dataset. Results using a linear SVM in the original space, a linear SVM using the approximate mappings and using a kernelized SVM are compared. Timings and accuracy for varying amounts of Monte Carlo samplings (in the case of :class:`RBFSampler`, which uses random Fourier features) and different sized subsets of the training set (for :class:`Nystroem`) for the approximate mapping are shown. Please note that the dataset here is not large enough to show the benefits of kernel approximation, as the exact SVM is still reasonably fast. Sampling more dimensions clearly leads to better classification results, but comes at a greater cost. This means there is a tradeoff between runtime and accuracy, given by the parameter n_components. Note that solving the Linear SVM and also the approximate kernel SVM could be greatly accelerated by using stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`. This is not easily possible for the case of the kernelized SVM. The second plot visualized the decision surfaces of the RBF kernel SVM and the linear SVM with approximate kernel maps. The plot shows decision surfaces of the classifiers projected onto the first two principal components of the data. This visualization should be taken with a grain of salt since it is just an interesting slice through the decision surface in 64 dimensions. In particular note that a datapoint (represented as a dot) does not necessarily be classified into the region it is lying in, since it will not lie on the plane that the first two principal components span. The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail in :ref:`kernel_approximation`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # Andreas Mueller <amueller@ais.uni-bonn.de> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt import numpy as np from time import time # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, pipeline from sklearn.kernel_approximation import (RBFSampler, Nystroem) from sklearn.decomposition import PCA # The digits dataset digits = datasets.load_digits(n_class=9) # To apply an classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.data) data = digits.data / 16. data -= data.mean(axis=0) # We learn the digits on the first half of the digits data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2] # Now predict the value of the digit on the second half: data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:] #data_test = scaler.transform(data_test) # Create a classifier: a support vector classifier kernel_svm = svm.SVC(gamma=.2) linear_svm = svm.LinearSVC() # create pipeline from kernel approximation # and linear svm feature_map_fourier = RBFSampler(gamma=.2, random_state=1) feature_map_nystroem = Nystroem(gamma=.2, random_state=1) fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier), ("svm", svm.LinearSVC())]) nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem), ("svm", svm.LinearSVC())]) # fit and predict using linear and kernel svm: kernel_svm_time = time() kernel_svm.fit(data_train, targets_train) kernel_svm_score = kernel_svm.score(data_test, targets_test) kernel_svm_time = time() - kernel_svm_time linear_svm_time = time() linear_svm.fit(data_train, targets_train) linear_svm_score = linear_svm.score(data_test, targets_test) linear_svm_time = time() - linear_svm_time sample_sizes = 30 * np.arange(1, 10) fourier_scores = [] nystroem_scores = [] fourier_times = [] nystroem_times = [] for D in sample_sizes: fourier_approx_svm.set_params(feature_map__n_components=D) nystroem_approx_svm.set_params(feature_map__n_components=D) start = time() nystroem_approx_svm.fit(data_train, targets_train) nystroem_times.append(time() - start) start = time() fourier_approx_svm.fit(data_train, targets_train) fourier_times.append(time() - start) fourier_score = fourier_approx_svm.score(data_test, targets_test) nystroem_score = nystroem_approx_svm.score(data_test, targets_test) nystroem_scores.append(nystroem_score) fourier_scores.append(fourier_score) # plot the results: plt.figure(figsize=(8, 8)) accuracy = plt.subplot(211) # second y axis for timeings timescale = plt.subplot(212) accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel") timescale.plot(sample_sizes, nystroem_times, '--', label='Nystroem approx. kernel') accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel") timescale.plot(sample_sizes, fourier_times, '--', label='Fourier approx. kernel') # horizontal lines for exact rbf and linear kernels: accuracy.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_score, linear_svm_score], label="linear svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [linear_svm_time, linear_svm_time], '--', label='linear svm') accuracy.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_score, kernel_svm_score], label="rbf svm") timescale.plot([sample_sizes[0], sample_sizes[-1]], [kernel_svm_time, kernel_svm_time], '--', label='rbf svm') # vertical line for dataset dimensionality = 64 accuracy.plot([64, 64], [0.7, 1], label="n_features") # legends and labels accuracy.set_title("Classification accuracy") timescale.set_title("Training times") accuracy.set_xlim(sample_sizes[0], sample_sizes[-1]) accuracy.set_xticks(()) accuracy.set_ylim(np.min(fourier_scores), 1) timescale.set_xlabel("Sampling steps = transformed feature dimension") accuracy.set_ylabel("Classification accuracy") timescale.set_ylabel("Training time in seconds") accuracy.legend(loc='best') timescale.legend(loc='best') # visualize the decision surface, projected down to the first # two principal components of the dataset pca = PCA(n_components=8).fit(data_train) X = pca.transform(data_train) # Gemerate grid along first two principal components multiples = np.arange(-2, 2, 0.1) # steps along first component first = multiples[:, np.newaxis] * pca.components_[0, :] # steps along second component second = multiples[:, np.newaxis] * pca.components_[1, :] # combine grid = first[np.newaxis, :, :] + second[:, np.newaxis, :] flat_grid = grid.reshape(-1, data.shape[1]) # title for the plots titles = ['SVC with rbf kernel', 'SVC (linear kernel)\n with Fourier rbf feature map\n' 'n_components=100', 'SVC (linear kernel)\n with Nystroem rbf feature map\n' 'n_components=100'] plt.tight_layout() plt.figure(figsize=(12, 5)) # predict and plot for i, clf in enumerate((kernel_svm, nystroem_approx_svm, fourier_approx_svm)): # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(1, 3, i + 1) Z = clf.predict(flat_grid) # Put the result into a color plot Z = Z.reshape(grid.shape[:-1]) plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired) plt.axis('off') # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired) plt.title(titles[i]) plt.tight_layout() plt.show()
bsd-3-clause
ahaberlie/MetPy
examples/plots/Hodograph_Inset.py
8
2367
# Copyright (c) 2016 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause """ Hodograph Inset =============== Layout a Skew-T plot with a hodograph inset into the plot. """ import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1.inset_locator import inset_axes import pandas as pd import metpy.calc as mpcalc from metpy.cbook import get_test_data from metpy.plots import add_metpy_logo, Hodograph, SkewT from metpy.units import units ########################################### # Upper air data can be obtained using the siphon package, but for this example we will use # some of MetPy's sample data. col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed'] df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False), skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names) # Drop any rows with all NaN values for T, Td, winds df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed' ), how='all').reset_index(drop=True) ########################################### # We will pull the data out of the example dataset into individual variables and # assign units. hght = df['height'].values * units.hPa p = df['pressure'].values * units.hPa T = df['temperature'].values * units.degC Td = df['dewpoint'].values * units.degC wind_speed = df['speed'].values * units.knots wind_dir = df['direction'].values * units.degrees u, v = mpcalc.wind_components(wind_speed, wind_dir) ########################################### # Create a new figure. The dimensions here give a good aspect ratio fig = plt.figure(figsize=(9, 9)) add_metpy_logo(fig, 115, 100) # Grid for plots skew = SkewT(fig, rotation=45) # Plot the data using normal plotting functions, in this case using # log scaling in Y, as dictated by the typical meteorological plot skew.plot(p, T, 'r') skew.plot(p, Td, 'g') skew.plot_barbs(p, u, v) skew.ax.set_ylim(1000, 100) # Add the relevant special lines skew.plot_dry_adiabats() skew.plot_moist_adiabats() skew.plot_mixing_lines() # Good bounds for aspect ratio skew.ax.set_xlim(-50, 60) # Create a hodograph ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1) h = Hodograph(ax_hod, component_range=80.) h.add_grid(increment=20) h.plot_colormapped(u, v, hght) # Show the plot plt.show()
bsd-3-clause
shahankhatch/scikit-learn
examples/cluster/plot_agglomerative_clustering.py
343
2931
""" Agglomerative clustering with and without structure =================================================== This example shows the effect of imposing a connectivity graph to capture local structure in the data. The graph is simply the graph of 20 nearest neighbors. Two consequences of imposing a connectivity can be seen. First clustering with a connectivity matrix is much faster. Second, when using a connectivity matrix, average and complete linkage are unstable and tend to create a few clusters that grow very quickly. Indeed, average and complete linkage fight this percolation behavior by considering all the distances between two clusters when merging them. The connectivity graph breaks this mechanism. This effect is more pronounced for very sparse graphs (try decreasing the number of neighbors in kneighbors_graph) and with complete linkage. In particular, having a very small number of neighbors in the graph, imposes a geometry that is close to that of single linkage, which is well known to have this percolation instability. """ # Authors: Gael Varoquaux, Nelle Varoquaux # License: BSD 3 clause import time import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.neighbors import kneighbors_graph # Generate sample data n_samples = 1500 np.random.seed(0) t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples)) x = t * np.cos(t) y = t * np.sin(t) X = np.concatenate((x, y)) X += .7 * np.random.randn(2, n_samples) X = X.T # Create a graph capturing local connectivity. Larger number of neighbors # will give more homogeneous clusters to the cost of computation # time. A very large number of neighbors gives more evenly distributed # cluster sizes, but may not impose the local manifold structure of # the data knn_graph = kneighbors_graph(X, 30, include_self=False) for connectivity in (None, knn_graph): for n_clusters in (30, 3): plt.figure(figsize=(10, 4)) for index, linkage in enumerate(('average', 'complete', 'ward')): plt.subplot(1, 3, index + 1) model = AgglomerativeClustering(linkage=linkage, connectivity=connectivity, n_clusters=n_clusters) t0 = time.time() model.fit(X) elapsed_time = time.time() - t0 plt.scatter(X[:, 0], X[:, 1], c=model.labels_, cmap=plt.cm.spectral) plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time), fontdict=dict(verticalalignment='top')) plt.axis('equal') plt.axis('off') plt.subplots_adjust(bottom=0, top=.89, wspace=0, left=0, right=1) plt.suptitle('n_cluster=%i, connectivity=%r' % (n_clusters, connectivity is not None), size=17) plt.show()
bsd-3-clause
MartinSavc/scikit-learn
examples/decomposition/plot_pca_3d.py
354
2432
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Principal components analysis (PCA) ========================================================= These figures aid in illustrating how a point cloud can be very flat in one direction--which is where PCA comes in to choose a direction that is not flat. """ print(__doc__) # Authors: Gael Varoquaux # Jaques Grobler # Kevin Hughes # License: BSD 3 clause from sklearn.decomposition import PCA from mpl_toolkits.mplot3d import Axes3D import numpy as np import matplotlib.pyplot as plt from scipy import stats ############################################################################### # Create the data e = np.exp(1) np.random.seed(4) def pdf(x): return 0.5 * (stats.norm(scale=0.25 / e).pdf(x) + stats.norm(scale=4 / e).pdf(x)) y = np.random.normal(scale=0.5, size=(30000)) x = np.random.normal(scale=0.5, size=(30000)) z = np.random.normal(scale=0.1, size=len(x)) density = pdf(x) * pdf(y) pdf_z = pdf(5 * z) density *= pdf_z a = x + y b = 2 * y c = a - b + z norm = np.sqrt(a.var() + b.var()) a /= norm b /= norm ############################################################################### # Plot the figures def plot_figs(fig_num, elev, azim): fig = plt.figure(fig_num, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim) ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4) Y = np.c_[a, b, c] # Using SciPy's SVD, this would be: # _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False) pca = PCA(n_components=3) pca.fit(Y) pca_score = pca.explained_variance_ratio_ V = pca.components_ x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min() x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]] y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]] z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]] x_pca_plane.shape = (2, 2) y_pca_plane.shape = (2, 2) z_pca_plane.shape = (2, 2) ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane) ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) elev = -40 azim = -80 plot_figs(1, elev, azim) elev = 30 azim = 20 plot_figs(2, elev, azim) plt.show()
bsd-3-clause
jblackburne/scikit-learn
doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py
104
3139
"""Build a sentiment analysis / polarity model Sentiment analysis can be casted as a binary text classification problem, that is fitting a linear classifier on features extracted from the text of the user messages so as to guess wether the opinion of the author is positive or negative. In this examples we will use a movie review dataset. """ # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: Simplified BSD import sys from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.svm import LinearSVC from sklearn.pipeline import Pipeline from sklearn.model_selection import GridSearchCV from sklearn.datasets import load_files from sklearn.model_selection import train_test_split from sklearn import metrics if __name__ == "__main__": # NOTE: we put the following in a 'if __name__ == "__main__"' protected # block to be able to use a multi-core grid search that also works under # Windows, see: http://docs.python.org/library/multiprocessing.html#windows # The multiprocessing module is used as the backend of joblib.Parallel # that is used when n_jobs != 1 in GridSearchCV # the training data folder must be passed as first argument movie_reviews_data_folder = sys.argv[1] dataset = load_files(movie_reviews_data_folder, shuffle=False) print("n_samples: %d" % len(dataset.data)) # split the dataset in training and test set: docs_train, docs_test, y_train, y_test = train_test_split( dataset.data, dataset.target, test_size=0.25, random_state=None) # TASK: Build a vectorizer / classifier pipeline that filters out tokens # that are too rare or too frequent pipeline = Pipeline([ ('vect', TfidfVectorizer(min_df=3, max_df=0.95)), ('clf', LinearSVC(C=1000)), ]) # TASK: Build a grid search to find out whether unigrams or bigrams are # more useful. # Fit the pipeline on the training set using grid search for the parameters parameters = { 'vect__ngram_range': [(1, 1), (1, 2)], } grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1) grid_search.fit(docs_train, y_train) # TASK: print the mean and std for each candidate along with the parameter # settings for all the candidates explored by grid search. n_candidates = len(grid_search.cv_results_['params']) for i in range(n_candidates): print(i, 'params - %s; mean - %0.2f; std - %0.2f' % (grid_search.cv_results_['params'][i], grid_search.cv_results_['mean_test_score'][i], grid_search.cv_results_['std_test_score'][i])) # TASK: Predict the outcome on the testing set and store it in a variable # named y_predicted y_predicted = grid_search.predict(docs_test) # Print the classification report print(metrics.classification_report(y_test, y_predicted, target_names=dataset.target_names)) # Print and plot the confusion matrix cm = metrics.confusion_matrix(y_test, y_predicted) print(cm) # import matplotlib.pyplot as plt # plt.matshow(cm) # plt.show()
bsd-3-clause
petosegan/scikit-learn
examples/ensemble/plot_adaboost_multiclass.py
354
4124
""" ===================================== Multi-class AdaBoosted Decision Trees ===================================== This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can improve prediction accuracy on a multi-class problem. The classification dataset is constructed by taking a ten-dimensional standard normal distribution and defining three classes separated by nested concentric ten-dimensional spheres such that roughly equal numbers of samples are in each class (quantiles of the :math:`\chi^2` distribution). The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R uses the probability estimates to update the additive model, while SAMME uses the classifications only. As the example illustrates, the SAMME.R algorithm typically converges faster than SAMME, achieving a lower test error with fewer boosting iterations. The error of each algorithm on the test set after each boosting iteration is shown on the left, the classification error on the test set of each tree is shown in the middle, and the boost weight of each tree is shown on the right. All trees have a weight of one in the SAMME.R algorithm and therefore are not shown. .. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause from sklearn.externals.six.moves import zip import matplotlib.pyplot as plt from sklearn.datasets import make_gaussian_quantiles from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import accuracy_score from sklearn.tree import DecisionTreeClassifier X, y = make_gaussian_quantiles(n_samples=13000, n_features=10, n_classes=3, random_state=1) n_split = 3000 X_train, X_test = X[:n_split], X[n_split:] y_train, y_test = y[:n_split], y[n_split:] bdt_real = AdaBoostClassifier( DecisionTreeClassifier(max_depth=2), n_estimators=600, learning_rate=1) bdt_discrete = AdaBoostClassifier( DecisionTreeClassifier(max_depth=2), n_estimators=600, learning_rate=1.5, algorithm="SAMME") bdt_real.fit(X_train, y_train) bdt_discrete.fit(X_train, y_train) real_test_errors = [] discrete_test_errors = [] for real_test_predict, discrete_train_predict in zip( bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)): real_test_errors.append( 1. - accuracy_score(real_test_predict, y_test)) discrete_test_errors.append( 1. - accuracy_score(discrete_train_predict, y_test)) n_trees_discrete = len(bdt_discrete) n_trees_real = len(bdt_real) # Boosting might terminate early, but the following arrays are always # n_estimators long. We crop them to the actual number of trees here: discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete] real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real] discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete] plt.figure(figsize=(15, 5)) plt.subplot(131) plt.plot(range(1, n_trees_discrete + 1), discrete_test_errors, c='black', label='SAMME') plt.plot(range(1, n_trees_real + 1), real_test_errors, c='black', linestyle='dashed', label='SAMME.R') plt.legend() plt.ylim(0.18, 0.62) plt.ylabel('Test Error') plt.xlabel('Number of Trees') plt.subplot(132) plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors, "b", label='SAMME', alpha=.5) plt.plot(range(1, n_trees_real + 1), real_estimator_errors, "r", label='SAMME.R', alpha=.5) plt.legend() plt.ylabel('Error') plt.xlabel('Number of Trees') plt.ylim((.2, max(real_estimator_errors.max(), discrete_estimator_errors.max()) * 1.2)) plt.xlim((-20, len(bdt_discrete) + 20)) plt.subplot(133) plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights, "b", label='SAMME') plt.legend() plt.ylabel('Weight') plt.xlabel('Number of Trees') plt.ylim((0, discrete_estimator_weights.max() * 1.2)) plt.xlim((-20, n_trees_discrete + 20)) # prevent overlapping y-axis labels plt.subplots_adjust(wspace=0.25) plt.show()
bsd-3-clause
akrherz/iem
htdocs/plotting/auto/scripts100/p153.py
1
6880
"""Highest hourly values""" from collections import OrderedDict import datetime import pandas as pd from pandas.io.sql import read_sql from matplotlib.font_manager import FontProperties from pyiem.util import get_autoplot_context, get_dbconn from pyiem.plot.use_agg import plt from pyiem.exceptions import NoDataFound PDICT = OrderedDict( [ ("max_dwpf", "Highest Dew Point Temperature"), ("min_dwpf", "Lowest Dew Point Temperature"), ("max_tmpf", "Highest Air Temperature"), ("min_tmpf", "Lowest Air Temperature"), ("max_feel", "Highest Feels Like Temperature"), ("min_feel", "Lowest Feels Like Temperature"), ("max_mslp", "Maximum Sea Level Pressure"), ("min_mslp", "Minimum Sea Level Pressure"), ("max_alti", "Maximum Pressure Altimeter"), ("min_alti", "Minimum Pressure Altimeter"), ] ) UNITS = { "max_dwpf": "F", "max_tmpf": "F", "min_dwpf": "F", "min_tmpf": "F", "min_feel": "F", "max_feel": "F", "max_mslp": "mb", "min_mslp": "mb", "max_alti": "in", "min_alti": "in", } MDICT = OrderedDict( [ ("all", "No Month Limit"), ("spring", "Spring (MAM)"), ("fall", "Fall (SON)"), ("winter", "Winter (DJF)"), ("summer", "Summer (JJA)"), ("gs", "1 May to 30 Sep"), ("jan", "January"), ("feb", "February"), ("mar", "March"), ("apr", "April"), ("may", "May"), ("jun", "June"), ("jul", "July"), ("aug", "August"), ("sep", "September"), ("oct", "October"), ("nov", "November"), ("dec", "December"), ] ) def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc["data"] = True desc[ "description" ] = """This table presents the extreme hourly value of some variable of your choice based on available observations maintained by the IEM. Sadly, this app will likely point out some bad data points as such points tend to be obvious at extremes. If you contact us to point out troubles, we'll certainly attempt to fix the archive to remove the bad data points. Observations are arbitrarly bumped 10 minutes into the future to place the near to top of the hour obs on that hour. For example, a 9:53 AM observation becomes the ob for 10 AM. """ desc["arguments"] = [ dict( type="zstation", name="zstation", default="AMW", network="IA_ASOS", label="Select Station:", ), dict( type="select", name="month", default="all", options=MDICT, label="Select Month/Season/All", ), dict( type="select", name="var", options=PDICT, default="max_dwpf", label="Which Variable to Plot", ), ] return desc def plotter(fdict): """ Go """ font0 = FontProperties() font0.set_family("monospace") font0.set_size(16) font1 = FontProperties() font1.set_size(16) pgconn = get_dbconn("asos") ctx = get_autoplot_context(fdict, get_description()) varname = ctx["var"] varname2 = varname.split("_")[1] if varname2 in ["dwpf", "tmpf", "feel"]: varname2 = "i" + varname2 month = ctx["month"] station = ctx["zstation"] if month == "all": months = range(1, 13) elif month == "fall": months = [9, 10, 11] elif month == "winter": months = [12, 1, 2] elif month == "spring": months = [3, 4, 5] elif month == "summer": months = [6, 7, 8] elif month == "gs": months = [5, 6, 7, 8, 9] else: ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d") # make sure it is length two for the trick below in SQL months = [ts.month] df = read_sql( f""" WITH obs as ( SELECT (valid + '10 minutes'::interval) at time zone %s as ts, tmpf::int as itmpf, dwpf::int as idwpf, feel::int as ifeel, mslp, alti from alldata where station = %s and extract(month from valid at time zone %s) in %s), agg1 as ( SELECT extract(hour from ts) as hr, max(idwpf) as max_dwpf, max(itmpf) as max_tmpf, min(idwpf) as min_dwpf, min(itmpf) as min_tmpf, min(ifeel) as min_feel, max(ifeel) as max_feel, max(alti) as max_alti, min(alti) as min_alti, max(mslp) as max_mslp, min(mslp) as min_mslp from obs GROUP by hr) SELECT o.ts, a.hr::int as hr, a.{varname} from agg1 a JOIN obs o on (a.hr = extract(hour from o.ts) and a.{varname} = o.{varname2}) ORDER by a.hr ASC, o.ts DESC """, pgconn, params=( ctx["_nt"].sts[station]["tzname"], station, ctx["_nt"].sts[station]["tzname"], tuple(months), ), index_col=None, ) if df.empty: raise NoDataFound("No Data was found.") y0 = 0.1 yheight = 0.8 dy = yheight / 24.0 (fig, ax) = plt.subplots(1, 1, figsize=(8, 8)) ax.set_position([0.12, y0, 0.57, yheight]) ax.barh(df["hr"], df[varname], align="center") ax.set_ylim(-0.5, 23.5) ax.set_yticks([0, 4, 8, 12, 16, 20]) ax.set_yticklabels(["Mid", "4 AM", "8 AM", "Noon", "4 PM", "8 PM"]) ax.grid(True) ax.set_xlim([df[varname].min() - 5, df[varname].max() + 5]) ax.set_ylabel( "Local Time %s" % (ctx["_nt"].sts[station]["tzname"],), fontproperties=font1, ) ab = ctx["_nt"].sts[station]["archive_begin"] if ab is None: raise NoDataFound("Unknown station metadata") fig.text( 0.5, 0.93, ("%s [%s] %s-%s\n" "%s [%s]") % ( ctx["_nt"].sts[station]["name"], station, ab.year, datetime.date.today().year, PDICT[varname], MDICT[month], ), ha="center", fontproperties=font1, ) ypos = y0 + (dy / 2.0) for hr in range(24): sdf = df[df["hr"] == hr] if sdf.empty: continue row = sdf.iloc[0] fig.text( 0.7, ypos, "%3.0f: %s%s" % ( row[varname], pd.Timestamp(row["ts"]).strftime("%d %b %Y"), ("*" if len(sdf.index) > 1 else ""), ), fontproperties=font0, va="center", ) ypos += dy ax.set_xlabel( "%s %s, * denotes ties" % (PDICT[varname], UNITS[varname]), fontproperties=font1, ) return plt.gcf(), df if __name__ == "__main__": plotter(dict())
mit
ComputoCienciasUniandes/MetodosComputacionalesLaboratorio
2017-1/lab8_EJ3/lab8SOL_eJ3/spring_mass.py
1
1084
import numpy as np import matplotlib.pyplot as plt N = 5000 #number of steps to take xo = 0.2 #initial position in m vo = 0.0 #initial velocity tau = 4.0 #total time for the simulation in s . dt = tau/float(N) # time step k = 42.0 #spring constant in N/m m = 0.25 #mass in kg g = 9.8 #in m/ s ^2 mu = 0.15 #friction coefficient y = np.zeros([N,2]) #y is the vector of positions and velocities. y[0,0] = xo #initial position y[0,1] = vo #initial velocity #This function defines the derivatives of the system. def SpringMass(state,time) : g0=state[1] if g0 > 0 : g1=-k/m*state[0]-g*mu else: g1=-k/m*state[0]+g*mu return np.array([g0,g1]) #This is the basic step in the Euler Method for solving ODEs. def euler (y,time,dt,derivs) : k0 = dt*derivs(y,time) ynext = y + k0 return ynext for j in range (N-1): y[j+1] = euler(y[j],0,dt,SpringMass) #Just to plot time = np.linspace(0,tau,N) plt.plot(time, y[:,0],'b',label="position") plt.xlabel( "time" ) plt.ylabel( "position" ) plt.savefig('spring_mass.png')
mit
hughdbrown/QSTK-nohist
src/qstkfeat/featutil.py
1
18051
''' (c) 2011, 2012 Georgia Tech Research Corporation This source code is released under the New BSD license. Please see http://wiki.quantsoftware.org/index.php?title=QSTK_License for license details. Created on Nov 7, 2011 @author: John Cornwell @contact: JohnWCornwellV@gmail.com @summary: Contains utility functions to interact with feature functions in features.py ''' ''' Python imports ''' import math import pickle import datetime as dt from dateutil.relativedelta import relativedelta ''' 3rd Party Imports ''' import numpy as np import matplotlib.pyplot as plt ''' Our Imports ''' import qstklearn.kdtknn as kdt from qstkutil import DataAccess as da from qstkutil import qsdateutil as du from qstkutil import tsutil as tsu from qstkfeat.features import * from qstkfeat.classes import class_fut_ret def getMarketRel(dData, sRel='$SPX'): ''' @summary: Calculates market relative data. @param dData - Dictionary containing data to be used, requires specific naming: open/high/low/close/volume @param sRel - Stock ticker to make the data relative to, $SPX is default. @return: Dictionary of market relative values ''' if sRel not in dData['close'].columns: raise KeyError('Market relative stock %s not found in getMR()' % sRel) dRet = {} ''' Make all data market relative, except for volume ''' for sKey in dData.keys(): ''' Don't calculate market relative volume, but still copy it over ''' if sKey == 'volume': dRet['volume'] = dData['volume'] continue dfAbsolute = dData[sKey] dfRelative = pand.DataFrame(index=dfAbsolute.index, columns=dfAbsolute.columns, data=np.zeros(dfAbsolute.shape)) ''' Get returns and strip off the market returns ''' naRets = dfAbsolute.values.copy() tsu.returnize0(naRets) naMarkRets = naRets[:, list(dfAbsolute.columns).index(sRel)] for i, sStock in enumerate(dfAbsolute.columns): ''' Don't change the 'market' stock ''' if sStock == sRel: dfRelative.values[:, i] = dfAbsolute.values[:, i] continue naMarkRel = (naRets[:, i] - naMarkRets) + 1.0 ''' Find the first non-nan value and start the price at 100 ''' for j in range(0, dfAbsolute.values.shape[0]): if pand.isnull(dfAbsolute.values[j][i]): dfRelative.values[j][i] = float('nan') continue dfRelative.values[j][i] = 100 break ''' Now fill prices out using market relative returns ''' for j in range(j + 1, dfAbsolute.values.shape[0]): dfRelative.values[j][i] = dfRelative.values[j - 1][i] * naMarkRel[j] ''' Add dataFrame to dictionary to return, move to next key ''' dRet[sKey] = dfRelative return dRet def applyFeatures(dData, lfcFeatures, ldArgs, sMarketRel=None, sLog=None): ''' @summary: Calculates the feature values using a list of feature functions and arguments. @param dData - Dictionary containing data to be used, requires specific naming: open/high/low/close/volume @param lfcFeatures: List of feature functions, most likely coming from features.py @param ldArgs: List of dictionaries containing arguments, passed as **kwargs There is a special argument 'MR', if it exists, the data will be made market relative @param sMarketRel: If not none, the data will all be made relative to the symbol provided @param sLog: If not None, will be filename to log all of the features to @return: list of dataframes containing values ''' ldfRet = [] ''' Calculate market relative data ''' if sMarketRel is not None: dDataRelative = getMarketRel(dData, sRel=sMarketRel) ''' Loop though feature functions, pass each data dictionary and arguments ''' for i, fcFeature in enumerate(lfcFeatures): ''' Check for special arguments ''' if 'MR' in ldArgs[i]: if not ldArgs[i]['MR']: print 'Warning, setting MR to false will still be Market Relative',\ 'simply do not include MR key in args' if sMarketRel is None: raise AssertionError('Functions require market relative stock but sMarketRel=None') del ldArgs[i]['MR'] ldfRet.append(fcFeature(dDataRelative, **ldArgs[i])) else: ldfRet.append(fcFeature(dData, **ldArgs[i])) if not sLog is None: with open(sLog, 'wb') as fFile: pickle.dump(ldfRet, fFile, -1) return ldfRet def loadFeatures(sLog): ''' @summary: Loads cached features. @param sLog: Filename of features. @return: Numpy array containing values ''' ldfRet = [] if not sLog is None: with open(sLog, 'rb') as fFile: ldfRet = pickle.load(fFile) return ldfRet def stackSyms(ldfFeatures, dtStart=None, dtEnd=None, lsSym=None, sDelNan='ALL', bShowRemoved=False): ''' @summary: Remove symbols from the dataframes, effectively stacking all stocks on top of each other. @param ldfFeatures: List of data frames of features. @param dtStart: Start time, if None, uses all @param dtEnd: End time, if None uses all @param lsSym: List of symbols to use, if None, all are used. @param sDelNan: Optional, default is ALL: delete any rows with a NaN in it FEAT: Delete if any of the feature points are NaN, allow NaN classification None: Do not delete any NaN rows @return: Numpy array containing all features as columns and all ''' if dtStart is None: dtStart = ldfFeatures[0].index[0] if dtEnd is None: dtEnd = ldfFeatures[0].index[-1] naRet = None ''' Stack stocks vertically ''' for sStock in ldfFeatures[0].columns: if lsSym is not None and sStock not in lsSym: continue naStkData = None ''' Loop through all features, stacking columns horizontally ''' for dfFeat in ldfFeatures: dfFeat = dfFeat.ix[dtStart:dtEnd] if naStkData is None: naStkData = np.array(dfFeat[sStock].values.reshape(-1, 1)) else: naStkData = np.hstack((naStkData, dfFeat[sStock].values.reshape(-1, 1))) ''' Remove nan rows possibly''' if 'ALL' == sDelNan or 'FEAT' == sDelNan: llValidRows = [] for i in range(naStkData.shape[0]): if 'ALL' == sDelNan and not math.isnan(np.sum(naStkData[i, :])) or \ 'FEAT' == sDelNan and not math.isnan(np.sum(naStkData[i, :-1])): llValidRows.append(i) elif bShowRemoved: print 'Removed', sStock, naStkData[i, :] naStkData = naStkData[llValidRows, :] ''' Now stack each block of stock data vertically ''' if naRet is None: naRet = naStkData else: naRet = np.vstack((naRet, naStkData)) return naRet def normFeatures(naFeatures, fMin, fMax, bAbsolute, bIgnoreLast=True): ''' @summary: Normalizes the featurespace. @param naFeatures: Numpy array of features, @param fMin: Data frame containing the price information for all of the stocks. @param fMax: List of feature functions, most likely coming from features.py @param bAbsolute: If true, min value will be scaled to fMin, max to fMax, if false, +-1 standard deviations will be scaled to fit between fMin and fMax, i.e. ~69% of the values @param bIgnoreLast: If true, last column is ignored (assumed to be classification) @return: list of (weights, shifts) to be used to normalize the query points ''' fNewRange = fMax - fMin lUseCols = naFeatures.shape[1] if bIgnoreLast: lUseCols -= 1 ltRet = [] ''' Loop through all features ''' for i in range(lUseCols): ''' If absolutely scaled use exact min and max ''' if bAbsolute: fFeatMin = np.min(naFeatures[:, i]) fFeatMax = np.max(naFeatures[:, i]) else: ''' Otherwise use mean +-1 std deviations for min/max (~94% of data) ''' fMean = np.average(naFeatures[:, i]) fStd = np.std(naFeatures[:, i]) fFeatMin = fMean - fStd fFeatMax = fMean + fStd ''' Calculate multiplier and shift variable so that new data fits in specified range ''' fRange = fFeatMax - fFeatMin fMult = fNewRange / fRange fShift = fMin - (fFeatMin * fMult) ''' scale and shift, save in return array ''' naFeatures[:, i] *= fMult naFeatures[:, i] += fShift ltRet.append((fMult, fShift)) return ltRet def normQuery(naQueries, ltWeightShift): ''' @summary: Normalizes the queries using the given normalization parameters generated from training data. @param naQueries: Numpy array of queries @param ltWeightShift: List of weights and shift amounts to be applied to each query. @return: None, modifies naQueries ''' assert naQueries.shape[1] == len(ltWeightShift) for i in range(naQueries.shape[1]): ''' scale and shift, save in return array ''' naQueries[:, i] *= ltWeightShift[i][0] naQueries[:, i] += ltWeightShift[i][1] def createKnnLearner(naFeatures, lKnn=30, leafsize=10, method='mean'): ''' @summary: Creates a quick KNN learner @param naFeatures: Numpy array of features, @param fMin: Data frame containing the price information for all of the stocks. @param fMax: List of feature functions, most likely coming from features.py @param bAbsolute: If true, min value will be scaled to fMin, max to fMax, if false, +-1 standard deviations will be scaled to fit between fMin and fMax, i.e. ~69% of the values @param bIgnoreLast: If true, last column is ignored (assumed to be classification) @return: None, data is modified in place ''' cLearner = kdt.kdtknn(k=lKnn, method=method, leafsize=leafsize) cLearner.addEvidence(naFeatures) return cLearner def log500(sLog): ''' @summary: Loads cached features. @param sLog: Filename of features. @return: Nothing, logs features to desired location ''' lsSym = ['A', 'AA', 'AAPL', 'ABC', 'ABT', 'ACE', 'ACN', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADSK', 'AEE', 'AEP', 'AES', 'AET', 'AFL', 'AGN', 'AIG', 'AIV', 'AIZ', 'AKAM', 'AKS', 'ALL', 'ALTR', 'AMAT', 'AMD', 'AMGN', 'AMP', 'AMT', 'AMZN', 'AN', 'ANF', 'ANR', 'AON', 'APA', 'APC', 'APD', 'APH', 'APOL', 'ARG', 'ATI', 'AVB', 'AVP', 'AVY', 'AXP', 'AZO', 'BA', 'BAC', 'BAX', 'BBBY', 'BBT', 'BBY', 'BCR', 'BDX', 'BEN', 'BF.B', 'BHI', 'BIG', 'BIIB', 'BK', 'BLK', 'BLL', 'BMC', 'BMS', 'BMY', 'BRCM', 'BRK.B', 'BSX', 'BTU', 'BXP', 'C', 'CA', 'CAG', 'CAH', 'CAM', 'CAT', 'CB', 'CBG', 'CBS', 'CCE', 'CCL', 'CEG', 'CELG', 'CERN', 'CF', 'CFN', 'CHK', 'CHRW', 'CI', 'CINF', 'CL', 'CLF', 'CLX', 'CMA', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMS', 'CNP', 'CNX', 'COF', 'COG', 'COH', 'COL', 'COP', 'COST', 'COV', 'CPB', 'CPWR', 'CRM', 'CSC', 'CSCO', 'CSX', 'CTAS', 'CTL', 'CTSH', 'CTXS', 'CVC', 'CVH', 'CVS', 'CVX', 'D', 'DD', 'DE', 'DELL', 'DF', 'DFS', 'DGX', 'DHI', 'DHR', 'DIS', 'DISCA', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRI', 'DTE', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'EBAY', 'ECL', 'ED', 'EFX', 'EIX', 'EL', 'EMC', 'EMN', 'EMR', 'EOG', 'EP', 'EQR', 'EQT', 'ERTS', 'ESRX', 'ETFC', 'ETN', 'ETR', 'EW', 'EXC', 'EXPD', 'EXPE', 'F', 'FAST', 'FCX', 'FDO', 'FDX', 'FE', 'FFIV', 'FHN', 'FII', 'FIS', 'FISV', 'FITB', 'FLIR', 'FLR', 'FLS', 'FMC', 'FO', 'FRX', 'FSLR', 'FTI', 'FTR', 'GAS', 'GCI', 'GD', 'GE', 'GILD', 'GIS', 'GLW', 'GME', 'GNW', 'GOOG', 'GPC', 'GPS', 'GR', 'GS', 'GT', 'GWW', 'HAL', 'HAR', 'HAS', 'HBAN', 'HCBK', 'HCN', 'HCP', 'HD', 'HES', 'HIG', 'HNZ', 'HOG', 'HON', 'HOT', 'HP', 'HPQ', 'HRB', 'HRL', 'HRS', 'HSP', 'HST', 'HSY', 'HUM', 'IBM', 'ICE', 'IFF', 'IGT', 'INTC', 'INTU', 'IP', 'IPG', 'IR', 'IRM', 'ISRG', 'ITT', 'ITW', 'IVZ', 'JBL', 'JCI', 'JCP', 'JDSU', 'JEC', 'JNJ', 'JNPR', 'JNS', 'JOYG', 'JPM', 'JWN', 'K', 'KEY', 'KFT', 'KIM', 'KLAC', 'KMB', 'KMX', 'KO', 'KR', 'KSS', 'L', 'LEG', 'LEN', 'LH', 'LIFE', 'LLL', 'LLTC', 'LLY', 'LM', 'LMT', 'LNC', 'LO', 'LOW', 'LSI', 'LTD', 'LUK', 'LUV', 'LXK', 'M', 'MA', 'MAR', 'MAS', 'MAT', 'MCD', 'MCHP', 'MCK', 'MCO', 'MDT', 'MET', 'MHP', 'MHS', 'MJN', 'MKC', 'MMC', 'MMI', 'MMM', 'MO', 'MOLX', 'MON', 'MOS', 'MPC', 'MRK', 'MRO', 'MS', 'MSFT', 'MSI', 'MTB', 'MU', 'MUR', 'MWV', 'MWW', 'MYL', 'NBL', 'NBR', 'NDAQ', 'NE', 'NEE', 'NEM', 'NFLX', 'NFX', 'NI', 'NKE', 'NOC', 'NOV', 'NRG', 'NSC', 'NTAP', 'NTRS', 'NU', 'NUE', 'NVDA', 'NVLS', 'NWL', 'NWSA', 'NYX', 'OI', 'OKE', 'OMC', 'ORCL', 'ORLY', 'OXY', 'PAYX', 'PBCT', 'PBI', 'PCAR', 'PCG', 'PCL', 'PCLN', 'PCP', 'PCS', 'PDCO', 'PEG', 'PEP', 'PFE', 'PFG', 'PG', 'PGN', 'PGR', 'PH', 'PHM', 'PKI', 'PLD', 'PLL', 'PM', 'PNC', 'PNW', 'POM', 'PPG', 'PPL', 'PRU', 'PSA', 'PWR', 'PX', 'PXD', 'QCOM', 'QEP', 'R', 'RAI', 'RDC', 'RF', 'RHI', 'RHT', 'RL', 'ROK', 'ROP', 'ROST', 'RRC', 'RRD', 'RSG', 'RTN', 'S', 'SAI', 'SBUX', 'SCG', 'SCHW', 'SE', 'SEE', 'SHLD', 'SHW', 'SIAL', 'SJM', 'SLB', 'SLE', 'SLM', 'SNA', 'SNDK', 'SNI', 'SO', 'SPG', 'SPLS', 'SRCL', 'SRE', 'STI', 'STJ', 'STT', 'STZ', 'SUN', 'SVU', 'SWK', 'SWN', 'SWY', 'SYK', 'SYMC', 'SYY', 'T', 'TAP', 'TDC', 'TE', 'TEG', 'TEL', 'TER', 'TGT', 'THC', 'TIE', 'TIF', 'TJX', 'TLAB', 'TMK', 'TMO', 'TROW', 'TRV', 'TSN', 'TSO', 'TSS', 'TWC', 'TWX', 'TXN', 'TXT', 'TYC', 'UNH', 'UNM', 'UNP', 'UPS', 'URBN', 'USB', 'UTX', 'V', 'VAR', 'VFC', 'VIA.B', 'VLO', 'VMC', 'VNO', 'VRSN', 'VTR', 'VZ', 'WAG', 'WAT', 'WDC', 'WEC', 'WFC', 'WFM', 'WFR', 'WHR', 'WIN', 'WLP', 'WM', 'WMB', 'WMT', 'WPI', 'WPO', 'WU', 'WY', 'WYN', 'WYNN', 'X', 'XEL', 'XL', 'XLNX', 'XOM', 'XRAY', 'XRX', 'YHOO', 'YUM', 'ZION', 'ZMH'] lsSym.append('$SPX') lsSym.sort() ''' Max lookback is 6 months ''' dtEnd = dt.datetime.now() dtEnd = dtEnd.replace(hour=16, minute=0, second=0, microsecond=0) dtStart = dtEnd - relativedelta(months=6) ''' Pull in current data ''' norObj = da.DataAccess('Norgate') ''' Get 2 extra months for moving averages and future returns ''' ldtTimestamps = du.getNYSEdays(dtStart - relativedelta(months=2), dtEnd + relativedelta(months=2), dt.timedelta(hours=16)) dfPrice = norObj.get_data(ldtTimestamps, lsSym, 'close') dfVolume = norObj.get_data(ldtTimestamps, lsSym, 'volume') ''' Imported functions from qstkfeat.features, NOTE: last function is classification ''' lfcFeatures, ldArgs, lsNames = getFeatureFuncs() ''' Generate a list of DataFrames, one for each feature, with the same index/column structure as price data ''' applyFeatures(dfPrice, dfVolume, lfcFeatures, ldArgs, sLog=sLog) def getFeatureFuncs(): ''' @summary: Gets feature functions supported by the website. @return: Tuple containing (list of functions, list of arguments, list of names) ''' lfcFeatures = [featMA, featMA, featRSI, featDrawDown, featRunUp, featVolumeDelta, featAroon, featAroon, featStochastic, featBeta, featBollinger, featCorrelation, featPrice, class_fut_ret] lsNames = ['MovingAverage', 'RelativeMovingAverage', 'RSI', 'DrawDown', 'RunUp', 'VolumeDelta', 'AroonUp', 'AroonLow', 'Stochastic', 'Beta', 'Bollinger', 'Correlation', 'Price', 'FutureReturn'] ''' Custom Arguments ''' ldArgs = [ {'lLookback':30, 'bRel':False}, {'lLookback':30, 'bRel':True}, {'lLookback':14}, {'lLookback':30}, {'lLookback':30}, {'lLookback':30}, {'bDown':False, 'lLookback':25}, {'bDown':True, 'lLookback':25}, {'lLookback':14}, {'lLookback':14, 'sMarket':'SPY'}, {'lLookback':20}, {'lLookback':20, 'sRel':'SPY'}, {}, {'lLookforward':5, 'sRel':None, 'bUseOpen':False} ] return lfcFeatures, ldArgs, lsNames def testFeature(fcFeature, dArgs): ''' @summary: Quick function to run a feature on some data and plot it to see if it works. @param fcFeature: Feature function to test @param dArgs: Arguments to pass into feature function @return: Void ''' ''' Get Train data for 2009-2010 ''' dtStart = dt.datetime(2009, 1, 1) dtEnd = dt.datetime(2009, 5, 1) ''' Pull in current training data and test data ''' norObj = da.DataAccess('Norgate') ''' Get 2 extra months for moving averages and future returns ''' ldtTimestamps = du.getNYSEdays(dtStart, dtEnd, dt.timedelta(hours=16)) lsSym = ['GOOG'] lsSym.append('WMT') lsSym.append('$SPX') lsSym.append('$VIX') lsSym.sort() lsKeys = ['open', 'high', 'low', 'close', 'volume'] ldfData = norObj.get_data(ldtTimestamps, lsSym, lsKeys) dData = dict(zip(lsKeys, ldfData)) dfPrice = dData['close'] #print dfPrice.values ''' Generate a list of DataFrames, one for each feature, with the same index/column structure as price data ''' dtStart = dt.datetime.now() ldfFeatures = applyFeatures(dData, [fcFeature], [dArgs], sMarketRel='$SPX') print 'Runtime:', dt.datetime.now() - dtStart ''' Use last 3 months of index, to avoid lookback nans ''' dfPrint = ldfFeatures[0]['GOOG'] print 'GOOG values:', dfPrint.values print 'GOOG Sum:', dfPrint.ix[dfPrint.notnull()].sum() for sSym in lsSym: plt.subplot(211) plt.plot(ldfFeatures[0].index[-60:], dfPrice[sSym].values[-60:]) plt.plot(ldfFeatures[0].index[-60:], dfPrice['$SPX'].values[-60:] * dfPrice[sSym].values[-60] / dfPrice['$SPX'].values[-60]) plt.legend((sSym, '$SPX')) plt.title(sSym) plt.subplot(212) plt.plot(ldfFeatures[0].index[-60:], ldfFeatures[0][sSym].values[-60:]) plt.title('%s-%s' % (fcFeature.__name__, str(dArgs))) plt.show() if __name__ == '__main__': pass
bsd-3-clause
atsao72/sympy
sympy/physics/quantum/tensorproduct.py
64
13572
"""Abstract tensor product.""" from __future__ import print_function, division from sympy import Expr, Add, Mul, Matrix, Pow, sympify from sympy.core.compatibility import u, range from sympy.core.trace import Tr from sympy.printing.pretty.stringpict import prettyForm from sympy.physics.quantum.qexpr import QuantumError from sympy.physics.quantum.dagger import Dagger from sympy.physics.quantum.commutator import Commutator from sympy.physics.quantum.anticommutator import AntiCommutator from sympy.physics.quantum.state import Ket, Bra from sympy.physics.quantum.matrixutils import ( numpy_ndarray, scipy_sparse_matrix, matrix_tensor_product ) __all__ = [ 'TensorProduct', 'tensor_product_simp' ] #----------------------------------------------------------------------------- # Tensor product #----------------------------------------------------------------------------- _combined_printing = False def combined_tensor_printing(combined): """Set flag controlling whether tensor products of states should be printed as a combined bra/ket or as an explicit tensor product of different bra/kets. This is a global setting for all TensorProduct class instances. Parameters ---------- combine : bool When true, tensor product states are combined into one ket/bra, and when false explicit tensor product notation is used between each ket/bra. """ global _combined_printing _combined_printing = combined class TensorProduct(Expr): """The tensor product of two or more arguments. For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker or tensor product matrix. For other objects a symbolic ``TensorProduct`` instance is returned. The tensor product is a non-commutative multiplication that is used primarily with operators and states in quantum mechanics. Currently, the tensor product distinguishes between commutative and non- commutative arguments. Commutative arguments are assumed to be scalars and are pulled out in front of the ``TensorProduct``. Non-commutative arguments remain in the resulting ``TensorProduct``. Parameters ========== args : tuple A sequence of the objects to take the tensor product of. Examples ======== Start with a simple tensor product of sympy matrices:: >>> from sympy import I, Matrix, symbols >>> from sympy.physics.quantum import TensorProduct >>> m1 = Matrix([[1,2],[3,4]]) >>> m2 = Matrix([[1,0],[0,1]]) >>> TensorProduct(m1, m2) Matrix([ [1, 0, 2, 0], [0, 1, 0, 2], [3, 0, 4, 0], [0, 3, 0, 4]]) >>> TensorProduct(m2, m1) Matrix([ [1, 2, 0, 0], [3, 4, 0, 0], [0, 0, 1, 2], [0, 0, 3, 4]]) We can also construct tensor products of non-commutative symbols: >>> from sympy import Symbol >>> A = Symbol('A',commutative=False) >>> B = Symbol('B',commutative=False) >>> tp = TensorProduct(A, B) >>> tp AxB We can take the dagger of a tensor product (note the order does NOT reverse like the dagger of a normal product): >>> from sympy.physics.quantum import Dagger >>> Dagger(tp) Dagger(A)xDagger(B) Expand can be used to distribute a tensor product across addition: >>> C = Symbol('C',commutative=False) >>> tp = TensorProduct(A+B,C) >>> tp (A + B)xC >>> tp.expand(tensorproduct=True) AxC + BxC """ is_commutative = False def __new__(cls, *args): if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)): return matrix_tensor_product(*args) c_part, new_args = cls.flatten(sympify(args)) c_part = Mul(*c_part) if len(new_args) == 0: return c_part elif len(new_args) == 1: return c_part * new_args[0] else: tp = Expr.__new__(cls, *new_args) return c_part * tp @classmethod def flatten(cls, args): # TODO: disallow nested TensorProducts. c_part = [] nc_parts = [] for arg in args: cp, ncp = arg.args_cnc() c_part.extend(list(cp)) nc_parts.append(Mul._from_args(ncp)) return c_part, nc_parts def _eval_adjoint(self): return TensorProduct(*[Dagger(i) for i in self.args]) def _eval_rewrite(self, pattern, rule, **hints): sargs = self.args terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs] return TensorProduct(*terms).expand(tensorproduct=True) def _sympystr(self, printer, *args): from sympy.printing.str import sstr length = len(self.args) s = '' for i in range(length): if isinstance(self.args[i], (Add, Pow, Mul)): s = s + '(' s = s + sstr(self.args[i]) if isinstance(self.args[i], (Add, Pow, Mul)): s = s + ')' if i != length - 1: s = s + 'x' return s def _pretty(self, printer, *args): if (_combined_printing and (all([isinstance(arg, Ket) for arg in self.args]) or all([isinstance(arg, Bra) for arg in self.args]))): length = len(self.args) pform = printer._print('', *args) for i in range(length): next_pform = printer._print('', *args) length_i = len(self.args[i].args) for j in range(length_i): part_pform = printer._print(self.args[i].args[j], *args) next_pform = prettyForm(*next_pform.right(part_pform)) if j != length_i - 1: next_pform = prettyForm(*next_pform.right(', ')) if len(self.args[i].args) > 1: next_pform = prettyForm( *next_pform.parens(left='{', right='}')) pform = prettyForm(*pform.right(next_pform)) if i != length - 1: pform = prettyForm(*pform.right(',' + ' ')) pform = prettyForm(*pform.left(self.args[0].lbracket)) pform = prettyForm(*pform.right(self.args[0].rbracket)) return pform length = len(self.args) pform = printer._print('', *args) for i in range(length): next_pform = printer._print(self.args[i], *args) if isinstance(self.args[i], (Add, Mul)): next_pform = prettyForm( *next_pform.parens(left='(', right=')') ) pform = prettyForm(*pform.right(next_pform)) if i != length - 1: if printer._use_unicode: pform = prettyForm(*pform.right(u('\N{N-ARY CIRCLED TIMES OPERATOR}') + u(' '))) else: pform = prettyForm(*pform.right('x' + ' ')) return pform def _latex(self, printer, *args): if (_combined_printing and (all([isinstance(arg, Ket) for arg in self.args]) or all([isinstance(arg, Bra) for arg in self.args]))): def _label_wrap(label, nlabels): return label if nlabels == 1 else r"\left\{%s\right\}" % label s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args), len(arg.args)) for arg in self.args]) return r"{%s%s%s}" % (self.args[0].lbracket_latex, s, self.args[0].rbracket_latex) length = len(self.args) s = '' for i in range(length): if isinstance(self.args[i], (Add, Mul)): s = s + '\\left(' # The extra {} brackets are needed to get matplotlib's latex # rendered to render this properly. s = s + '{' + printer._print(self.args[i], *args) + '}' if isinstance(self.args[i], (Add, Mul)): s = s + '\\right)' if i != length - 1: s = s + '\\otimes ' return s def doit(self, **hints): return TensorProduct(*[item.doit(**hints) for item in self.args]) def _eval_expand_tensorproduct(self, **hints): """Distribute TensorProducts across addition.""" args = self.args add_args = [] stop = False for i in range(len(args)): if isinstance(args[i], Add): for aa in args[i].args: tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:]) if isinstance(tp, TensorProduct): tp = tp._eval_expand_tensorproduct() add_args.append(tp) break if add_args: return Add(*add_args) else: return self def _eval_trace(self, **kwargs): indices = kwargs.get('indices', None) exp = tensor_product_simp(self) if indices is None or len(indices) == 0: return Mul(*[Tr(arg).doit() for arg in exp.args]) else: return Mul(*[Tr(value).doit() if idx in indices else value for idx, value in enumerate(exp.args)]) def tensor_product_simp_Mul(e): """Simplify a Mul with TensorProducts. Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s to a ``TensorProduct`` of ``Muls``. It currently only works for relatively simple cases where the initial ``Mul`` only has scalars and raw ``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of ``TensorProduct``s. Parameters ========== e : Expr A ``Mul`` of ``TensorProduct``s to be simplified. Returns ======= e : Expr A ``TensorProduct`` of ``Mul``s. Examples ======== This is an example of the type of simplification that this function performs:: >>> from sympy.physics.quantum.tensorproduct import \ tensor_product_simp_Mul, TensorProduct >>> from sympy import Symbol >>> A = Symbol('A',commutative=False) >>> B = Symbol('B',commutative=False) >>> C = Symbol('C',commutative=False) >>> D = Symbol('D',commutative=False) >>> e = TensorProduct(A,B)*TensorProduct(C,D) >>> e AxB*CxD >>> tensor_product_simp_Mul(e) (A*C)x(B*D) """ # TODO: This won't work with Muls that have other composites of # TensorProducts, like an Add, Pow, Commutator, etc. # TODO: This only works for the equivalent of single Qbit gates. if not isinstance(e, Mul): return e c_part, nc_part = e.args_cnc() n_nc = len(nc_part) if n_nc == 0 or n_nc == 1: return e elif e.has(TensorProduct): current = nc_part[0] if not isinstance(current, TensorProduct): raise TypeError('TensorProduct expected, got: %r' % current) n_terms = len(current.args) new_args = list(current.args) for next in nc_part[1:]: # TODO: check the hilbert spaces of next and current here. if isinstance(next, TensorProduct): if n_terms != len(next.args): raise QuantumError( 'TensorProducts of different lengths: %r and %r' % (current, next) ) for i in range(len(new_args)): new_args[i] = new_args[i] * next.args[i] else: # this won't quite work as we don't want next in the # TensorProduct for i in range(len(new_args)): new_args[i] = new_args[i] * next current = next return Mul(*c_part) * TensorProduct(*new_args) else: return e def tensor_product_simp(e, **hints): """Try to simplify and combine TensorProducts. In general this will try to pull expressions inside of ``TensorProducts``. It currently only works for relatively simple cases where the products have only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators`` of ``TensorProducts``. It is best to see what it does by showing examples. Examples ======== >>> from sympy.physics.quantum import tensor_product_simp >>> from sympy.physics.quantum import TensorProduct >>> from sympy import Symbol >>> A = Symbol('A',commutative=False) >>> B = Symbol('B',commutative=False) >>> C = Symbol('C',commutative=False) >>> D = Symbol('D',commutative=False) First see what happens to products of tensor products: >>> e = TensorProduct(A,B)*TensorProduct(C,D) >>> e AxB*CxD >>> tensor_product_simp(e) (A*C)x(B*D) This is the core logic of this function, and it works inside, powers, sums, commutators and anticommutators as well: >>> tensor_product_simp(e**2) (A*C)x(B*D)**2 """ if isinstance(e, Add): return Add(*[tensor_product_simp(arg) for arg in e.args]) elif isinstance(e, Pow): return tensor_product_simp(e.base) ** e.exp elif isinstance(e, Mul): return tensor_product_simp_Mul(e) elif isinstance(e, Commutator): return Commutator(*[tensor_product_simp(arg) for arg in e.args]) elif isinstance(e, AntiCommutator): return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args]) else: return e
bsd-3-clause
panda4life/idpserver
mysite/idp/plotting.py
1
3702
# -*- coding: utf-8 -*- """ Created on Wed Apr 30 16:43:00 2014 @author: jahad """ import matplotlib.pyplot as plt from matplotlib.font_manager import FontProperties import os def phasePlot(fp,fm,seqname,saveAs): if(os.path.exists(saveAs)): os.remove(saveAs) for x,y,label in zip(fp,fm,seqname): plt.scatter(x,y,marker='.',color='Black') plt.annotate(label,xy=(x+.01,y+.01)) reg1, = plt.fill([0,0,.25],[0,.25,0],color = 'Chartreuse',alpha=.75) reg2, = plt.fill([0,0,.35,.25],[.25,.35,0,0],color = 'MediumSeaGreen',alpha=.75) reg3, = plt.fill([0,.35,.65,.35],[.35,.65,.35,0],color = 'DarkGreen',alpha=.75) reg4, = plt.fill([0,0,.35],[.35,1,.65],color = 'Red',alpha=.75) reg5, = plt.fill([.35,.65,1],[0,.35,0],color = 'Blue',alpha=.75) plt.ylim([0,1]) plt.xlim([0,1]) plt.xlabel('f+') plt.ylabel('f-') plt.title('Phase Diagram') fontP = FontProperties() fontP.set_size('x-small') plt.legend([reg1,reg2,reg3,reg4,reg5], ['Weak Polyampholytes & Polyelectrolytes:\nGlobules & Tadpoles', 'Boundary Region', 'Strong Polyampholytes:\nCoils, Hairpins, Chimeras', 'Negatively Charged Strong Polyelectrolytes:\nSwollen Coils', 'Positively Charged Strong Polyelectrolytes:\nSwollen Coils'], prop = fontP) plt.savefig(saveAs,dpi=200) plt.close() return plt def testPhasePlot(): graph = phasePlot([.65,.32,.15],[.34,.21,.42],['derp1','harro','nyan'],'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\test.png') def testPhasePlotNull(): graph = phasePlot([],[],[],'/work/jahad/IDP_patterning/idpserver/mysite/output/test.png') import computation as comp def NCPRPlot(sequence, bloblen, saveAs): if(not sequence is None): data = sequence.NCPRdist(bloblen) plt.plot(data[0,:], data[1,:]) else: plt.plot([],[]) plt.xlim([0,50]) plt.title('NCPR Distribution') plt.xlabel('Blob Index') plt.ylabel('NCPR') plt.ylim([-1.1,1.1]) plt.savefig(saveAs, dpi=200) plt.close() return plt def testNCPRPlot(): graph = NCPRPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testNCPR.png') def SigmaPlot(sequence, bloblen, saveAs): if(not sequence is None): data = sequence.Sigmadist(bloblen) plt.plot(data[0,:], data[1,:]) else: plt.plot([],[]) plt.xlim([0,50]) plt.title('Sigma Distribution') plt.xlabel('Blob Index') plt.ylabel('Sigma') plt.ylim([-.1,1.1]) plt.savefig(saveAs, dpi=200) plt.close() return plt def testSigmaPlot(): graph = SigmaPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testSigma.png') def HydroPlot(sequence, bloblen, saveAs): if(not sequence is None): data = sequence.Hydrodist(bloblen) plt.plot(data[0,:], data[1,:]) else: plt.plot([],[]) plt.xlim([0,50]) plt.title('Hydropathy Distribution') plt.xlabel('Blob Index') plt.ylabel('Hydropathy') plt.savefig(saveAs, dpi=200) plt.close() return plt def testHydroPlot(): graph = HydroPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testHydro.png') testNCPRPlot() testSigmaPlot() testHydroPlot()
gpl-3.0
olologin/scikit-learn
examples/ensemble/plot_adaboost_twoclass.py
347
3268
""" ================== Two-class AdaBoost ================== This example fits an AdaBoosted decision stump on a non-linearly separable classification dataset composed of two "Gaussian quantiles" clusters (see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision boundary and decision scores. The distributions of decision scores are shown separately for samples of class A and B. The predicted class label for each sample is determined by the sign of the decision score. Samples with decision scores greater than zero are classified as B, and are otherwise classified as A. The magnitude of a decision score determines the degree of likeness with the predicted class label. Additionally, a new dataset could be constructed containing a desired purity of class B, for example, by only selecting samples with a decision score above some value. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import make_gaussian_quantiles # Construct dataset X1, y1 = make_gaussian_quantiles(cov=2., n_samples=200, n_features=2, n_classes=2, random_state=1) X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1) X = np.concatenate((X1, X2)) y = np.concatenate((y1, - y2 + 1)) # Create and fit an AdaBoosted decision tree bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200) bdt.fit(X, y) plot_colors = "br" plot_step = 0.02 class_names = "AB" plt.figure(figsize=(10, 5)) # Plot the decision boundaries plt.subplot(121) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis("tight") # Plot the training points for i, n, c in zip(range(2), class_names, plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, cmap=plt.cm.Paired, label="Class %s" % n) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc='upper right') plt.xlabel('x') plt.ylabel('y') plt.title('Decision Boundary') # Plot the two-class decision scores twoclass_output = bdt.decision_function(X) plot_range = (twoclass_output.min(), twoclass_output.max()) plt.subplot(122) for i, n, c in zip(range(2), class_names, plot_colors): plt.hist(twoclass_output[y == i], bins=10, range=plot_range, facecolor=c, label='Class %s' % n, alpha=.5) x1, x2, y1, y2 = plt.axis() plt.axis((x1, x2, y1, y2 * 1.2)) plt.legend(loc='upper right') plt.ylabel('Samples') plt.xlabel('Score') plt.title('Decision Scores') plt.tight_layout() plt.subplots_adjust(wspace=0.35) plt.show()
bsd-3-clause
amchagas/python-neo
examples/generated_data.py
7
4873
# -*- coding: utf-8 -*- """ This is an example for creating simple plots from various Neo structures. It includes a function that generates toy data. """ from __future__ import division # Use same division in Python 2 and 3 import numpy as np import quantities as pq from matplotlib import pyplot as plt import neo def generate_block(n_segments=3, n_channels=8, n_units=3, data_samples=1000, feature_samples=100): """ Generate a block with a single recording channel group and a number of segments, recording channels and units with associated analog signals and spike trains. """ feature_len = feature_samples / data_samples # Create container and grouping objects segments = [neo.Segment(index=i) for i in range(n_segments)] rcg = neo.RecordingChannelGroup(name='T0') for i in range(n_channels): rc = neo.RecordingChannel(name='C%d' % i, index=i) rc.recordingchannelgroups = [rcg] rcg.recordingchannels.append(rc) units = [neo.Unit('U%d' % i) for i in range(n_units)] rcg.units = units block = neo.Block() block.segments = segments block.recordingchannelgroups = [rcg] # Create synthetic data for seg in segments: feature_pos = np.random.randint(0, data_samples - feature_samples) # Analog signals: Noise with a single sinewave feature wave = 3 * np.sin(np.linspace(0, 2 * np.pi, feature_samples)) for rc in rcg.recordingchannels: sig = np.random.randn(data_samples) sig[feature_pos:feature_pos + feature_samples] += wave signal = neo.AnalogSignal(sig * pq.mV, sampling_rate=1 * pq.kHz) seg.analogsignals.append(signal) rc.analogsignals.append(signal) # Spike trains: Random spike times with elevated rate in short period feature_time = feature_pos / data_samples for u in units: random_spikes = np.random.rand(20) feature_spikes = np.random.rand(5) * feature_len + feature_time spikes = np.hstack([random_spikes, feature_spikes]) train = neo.SpikeTrain(spikes * pq.s, 1 * pq.s) seg.spiketrains.append(train) u.spiketrains.append(train) block.create_many_to_one_relationship() return block block = generate_block() # In this example, we treat each segment in turn, averaging over the channels # in each: for seg in block.segments: print("Analysing segment %d" % seg.index) siglist = seg.analogsignals time_points = siglist[0].times avg = np.mean(siglist, axis=0) # Average over signals of Segment plt.figure() plt.plot(time_points, avg) plt.title("Peak response in segment %d: %f" % (seg.index, avg.max())) # The second alternative is spatial traversal of the data (by channel), with # averaging over trials. For example, perhaps you wish to see which physical # location produces the strongest response, and each stimulus was the same: # We assume that our block has only 1 RecordingChannelGroup and each # RecordingChannel only has 1 AnalogSignal. rcg = block.recordingchannelgroups[0] for rc in rcg.recordingchannels: print("Analysing channel %d: %s" % (rc.index, rc.name)) siglist = rc.analogsignals time_points = siglist[0].times avg = np.mean(siglist, axis=0) # Average over signals of RecordingChannel plt.figure() plt.plot(time_points, avg) plt.title("Average response on channel %d" % rc.index) # There are three ways to access the spike train data: by Segment, # by RecordingChannel or by Unit. # By Segment. In this example, each Segment represents data from one trial, # and we want a peristimulus time histogram (PSTH) for each trial from all # Units combined: for seg in block.segments: print("Analysing segment %d" % seg.index) stlist = [st - st.t_start for st in seg.spiketrains] count, bins = np.histogram(np.hstack(stlist)) plt.figure() plt.bar(bins[:-1], count, width=bins[1] - bins[0]) plt.title("PSTH in segment %d" % seg.index) # By Unit. Now we can calculate the PSTH averaged over trials for each Unit: for unit in block.list_units: stlist = [st - st.t_start for st in unit.spiketrains] count, bins = np.histogram(np.hstack(stlist)) plt.figure() plt.bar(bins[:-1], count, width=bins[1] - bins[0]) plt.title("PSTH of unit %s" % unit.name) # By RecordingChannelGroup. Here we calculate a PSTH averaged over trials by # channel location, blending all Units: for rcg in block.recordingchannelgroups: stlist = [] for unit in rcg.units: stlist.extend([st - st.t_start for st in unit.spiketrains]) count, bins = np.histogram(np.hstack(stlist)) plt.figure() plt.bar(bins[:-1], count, width=bins[1] - bins[0]) plt.title("PSTH blend of recording channel group %s" % rcg.name) plt.show()
bsd-3-clause
rafaellehmkuhl/OpenCV-Python-GUI
CvPyGui/PlotContainer.py
1
2407
import pandas as pd from PyQt5.QtCore import Qt from PyQt5.QtWidgets import (QWidget, QLabel, QHBoxLayout, QVBoxLayout, QPushButton, QSlider, QComboBox) from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.figure import Figure from .FilterCvQtContainer import Filter import random class SinglePlotContainer(QWidget): num_plots = 0 def __init__(self, parent=None): super().__init__() self.num_plots += 1 self.variable_df = pd.DataFrame() self.figure = Figure() # don't use matplotlib.pyplot at all! self.canvas = FigureCanvas(self.figure) self.hLayout = QHBoxLayout(self) self.dataConfigColumn = QVBoxLayout() self.filtersColumn = QVBoxLayout() self.hLayout.addLayout(self.dataConfigColumn) self.hLayout.addWidget(self.canvas) self.hLayout.addLayout(self.filtersColumn) self.comboLoadVariable = QComboBox() self.dataConfigColumn.addWidget(self.comboLoadVariable) self.filter1 = Filter('Moving Average', 3, 30, 5, 1) self.filtersColumn.addWidget(self.filter1) # drawEvent = self.figure.canvas.mpl_connect('draw', self.updatePlot) self.plotRandom() def connectButtons(self): self.comboLoadVariable.activated[str].connect(self.loadVariable) def loadVariable(self, variable): self.variable_df = self.parent().parent().original_df[variable] self.plot() def plot(self): if self.num_plots != 0: self.axes = self.figure.add_subplot(111, sharex=self.parent().parent().plots[0].axes) else: self.axes = self.figure.add_subplot(111) self.axes.clear() self.axes.plot(self.variable_df, '-') self.canvas.draw() def updatePlot(self): ymax,ymin = self.axes.get_ylim() self.axes.clear() self.axes.set_ylim(ymax,ymin) self.axes.plot(self.variable_df, '-') self.canvas.draw() def plotRandom(self): ''' plot some random stuff ''' data = [random.random() for i in range(10)] self.axes = self.figure.add_subplot(111) self.axes.clear() self.axes.plot(data, '-') self.canvas.draw()
mit
qifeigit/scikit-learn
examples/text/document_classification_20newsgroups.py
222
10500
""" ====================================================== Classification of text documents using sparse features ====================================================== This is an example showing how scikit-learn can be used to classify documents by topics using a bag-of-words approach. This example uses a scipy.sparse matrix to store the features and demonstrates various classifiers that can efficiently handle sparse matrices. The dataset used in this example is the 20 newsgroups dataset. It will be automatically downloaded, then cached. The bar plot indicates the accuracy, training time (normalized) and test time (normalized) of each classifier. """ # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com> # Olivier Grisel <olivier.grisel@ensta.org> # Mathieu Blondel <mathieu@mblondel.org> # Lars Buitinck <L.J.Buitinck@uva.nl> # License: BSD 3 clause from __future__ import print_function import logging import numpy as np from optparse import OptionParser import sys from time import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_20newsgroups from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.feature_extraction.text import HashingVectorizer from sklearn.feature_selection import SelectKBest, chi2 from sklearn.linear_model import RidgeClassifier from sklearn.pipeline import Pipeline from sklearn.svm import LinearSVC from sklearn.linear_model import SGDClassifier from sklearn.linear_model import Perceptron from sklearn.linear_model import PassiveAggressiveClassifier from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.neighbors import KNeighborsClassifier from sklearn.neighbors import NearestCentroid from sklearn.ensemble import RandomForestClassifier from sklearn.utils.extmath import density from sklearn import metrics # Display progress logs on stdout logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s') # parse commandline arguments op = OptionParser() op.add_option("--report", action="store_true", dest="print_report", help="Print a detailed classification report.") op.add_option("--chi2_select", action="store", type="int", dest="select_chi2", help="Select some number of features using a chi-squared test") op.add_option("--confusion_matrix", action="store_true", dest="print_cm", help="Print the confusion matrix.") op.add_option("--top10", action="store_true", dest="print_top10", help="Print ten most discriminative terms per class" " for every classifier.") op.add_option("--all_categories", action="store_true", dest="all_categories", help="Whether to use all categories or not.") op.add_option("--use_hashing", action="store_true", help="Use a hashing vectorizer.") op.add_option("--n_features", action="store", type=int, default=2 ** 16, help="n_features when using the hashing vectorizer.") op.add_option("--filtered", action="store_true", help="Remove newsgroup information that is easily overfit: " "headers, signatures, and quoting.") (opts, args) = op.parse_args() if len(args) > 0: op.error("this script takes no arguments.") sys.exit(1) print(__doc__) op.print_help() print() ############################################################################### # Load some categories from the training set if opts.all_categories: categories = None else: categories = [ 'alt.atheism', 'talk.religion.misc', 'comp.graphics', 'sci.space', ] if opts.filtered: remove = ('headers', 'footers', 'quotes') else: remove = () print("Loading 20 newsgroups dataset for categories:") print(categories if categories else "all") data_train = fetch_20newsgroups(subset='train', categories=categories, shuffle=True, random_state=42, remove=remove) data_test = fetch_20newsgroups(subset='test', categories=categories, shuffle=True, random_state=42, remove=remove) print('data loaded') categories = data_train.target_names # for case categories == None def size_mb(docs): return sum(len(s.encode('utf-8')) for s in docs) / 1e6 data_train_size_mb = size_mb(data_train.data) data_test_size_mb = size_mb(data_test.data) print("%d documents - %0.3fMB (training set)" % ( len(data_train.data), data_train_size_mb)) print("%d documents - %0.3fMB (test set)" % ( len(data_test.data), data_test_size_mb)) print("%d categories" % len(categories)) print() # split a training set and a test set y_train, y_test = data_train.target, data_test.target print("Extracting features from the training data using a sparse vectorizer") t0 = time() if opts.use_hashing: vectorizer = HashingVectorizer(stop_words='english', non_negative=True, n_features=opts.n_features) X_train = vectorizer.transform(data_train.data) else: vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english') X_train = vectorizer.fit_transform(data_train.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_train.shape) print() print("Extracting features from the test data using the same vectorizer") t0 = time() X_test = vectorizer.transform(data_test.data) duration = time() - t0 print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration)) print("n_samples: %d, n_features: %d" % X_test.shape) print() # mapping from integer feature name to original token string if opts.use_hashing: feature_names = None else: feature_names = vectorizer.get_feature_names() if opts.select_chi2: print("Extracting %d best features by a chi-squared test" % opts.select_chi2) t0 = time() ch2 = SelectKBest(chi2, k=opts.select_chi2) X_train = ch2.fit_transform(X_train, y_train) X_test = ch2.transform(X_test) if feature_names: # keep selected feature names feature_names = [feature_names[i] for i in ch2.get_support(indices=True)] print("done in %fs" % (time() - t0)) print() if feature_names: feature_names = np.asarray(feature_names) def trim(s): """Trim string to fit on terminal (assuming 80-column display)""" return s if len(s) <= 80 else s[:77] + "..." ############################################################################### # Benchmark classifiers def benchmark(clf): print('_' * 80) print("Training: ") print(clf) t0 = time() clf.fit(X_train, y_train) train_time = time() - t0 print("train time: %0.3fs" % train_time) t0 = time() pred = clf.predict(X_test) test_time = time() - t0 print("test time: %0.3fs" % test_time) score = metrics.accuracy_score(y_test, pred) print("accuracy: %0.3f" % score) if hasattr(clf, 'coef_'): print("dimensionality: %d" % clf.coef_.shape[1]) print("density: %f" % density(clf.coef_)) if opts.print_top10 and feature_names is not None: print("top 10 keywords per class:") for i, category in enumerate(categories): top10 = np.argsort(clf.coef_[i])[-10:] print(trim("%s: %s" % (category, " ".join(feature_names[top10])))) print() if opts.print_report: print("classification report:") print(metrics.classification_report(y_test, pred, target_names=categories)) if opts.print_cm: print("confusion matrix:") print(metrics.confusion_matrix(y_test, pred)) print() clf_descr = str(clf).split('(')[0] return clf_descr, score, train_time, test_time results = [] for clf, name in ( (RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"), (Perceptron(n_iter=50), "Perceptron"), (PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"), (KNeighborsClassifier(n_neighbors=10), "kNN"), (RandomForestClassifier(n_estimators=100), "Random forest")): print('=' * 80) print(name) results.append(benchmark(clf)) for penalty in ["l2", "l1"]: print('=' * 80) print("%s penalty" % penalty.upper()) # Train Liblinear model results.append(benchmark(LinearSVC(loss='l2', penalty=penalty, dual=False, tol=1e-3))) # Train SGD model results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty=penalty))) # Train SGD with Elastic Net penalty print('=' * 80) print("Elastic-Net penalty") results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50, penalty="elasticnet"))) # Train NearestCentroid without threshold print('=' * 80) print("NearestCentroid (aka Rocchio classifier)") results.append(benchmark(NearestCentroid())) # Train sparse Naive Bayes classifiers print('=' * 80) print("Naive Bayes") results.append(benchmark(MultinomialNB(alpha=.01))) results.append(benchmark(BernoulliNB(alpha=.01))) print('=' * 80) print("LinearSVC with L1-based feature selection") # The smaller C, the stronger the regularization. # The more regularization, the more sparsity. results.append(benchmark(Pipeline([ ('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)), ('classification', LinearSVC()) ]))) # make some plots indices = np.arange(len(results)) results = [[x[i] for x in results] for i in range(4)] clf_names, score, training_time, test_time = results training_time = np.array(training_time) / np.max(training_time) test_time = np.array(test_time) / np.max(test_time) plt.figure(figsize=(12, 8)) plt.title("Score") plt.barh(indices, score, .2, label="score", color='r') plt.barh(indices + .3, training_time, .2, label="training time", color='g') plt.barh(indices + .6, test_time, .2, label="test time", color='b') plt.yticks(()) plt.legend(loc='best') plt.subplots_adjust(left=.25) plt.subplots_adjust(top=.95) plt.subplots_adjust(bottom=.05) for i, c in zip(indices, clf_names): plt.text(-.3, i, c) plt.show()
bsd-3-clause
mattilyra/scikit-learn
benchmarks/bench_plot_omp_lars.py
28
4471
"""Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle regression (:ref:`least_angle_regression`) The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path, orthogonal_mp from sklearn.datasets.samples_generator import make_sparse_coded_signal def compute_bench(samples_range, features_range): it = 0 results = dict() lars = np.empty((len(features_range), len(samples_range))) lars_gram = lars.copy() omp = lars.copy() omp_gram = lars.copy() max_it = len(samples_range) * len(features_range) for i_s, n_samples in enumerate(samples_range): for i_f, n_features in enumerate(features_range): it += 1 n_informative = n_features / 10 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') # dataset_kwargs = { # 'n_train_samples': n_samples, # 'n_test_samples': 2, # 'n_features': n_features, # 'n_informative': n_informative, # 'effective_rank': min(n_samples, n_features) / 10, # #'effective_rank': None, # 'bias': 0.0, # } dataset_kwargs = { 'n_samples': 1, 'n_components': n_features, 'n_features': n_samples, 'n_nonzero_coefs': n_informative, 'random_state': 0 } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) y, X, _ = make_sparse_coded_signal(**dataset_kwargs) X = np.asfortranarray(X) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative) delta = time() - tstart print("%0.3fs" % delta) lars_gram[i_f, i_s] = delta gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, Gram=None, max_iter=n_informative) delta = time() - tstart print("%0.3fs" % delta) lars[i_f, i_s] = delta gc.collect() print("benchmarking orthogonal_mp (with Gram):", end='') sys.stdout.flush() tstart = time() orthogonal_mp(X, y, precompute=True, n_nonzero_coefs=n_informative) delta = time() - tstart print("%0.3fs" % delta) omp_gram[i_f, i_s] = delta gc.collect() print("benchmarking orthogonal_mp (without Gram):", end='') sys.stdout.flush() tstart = time() orthogonal_mp(X, y, precompute=False, n_nonzero_coefs=n_informative) delta = time() - tstart print("%0.3fs" % delta) omp[i_f, i_s] = delta results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram) results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp) return results if __name__ == '__main__': samples_range = np.linspace(1000, 5000, 5).astype(np.int) features_range = np.linspace(1000, 5000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(np.max(t) for t in results.values()) import matplotlib.pyplot as plt fig = plt.figure('scikit-learn OMP vs. LARS benchmark results') for i, (label, timings) in enumerate(sorted(results.iteritems())): ax = fig.add_subplot(1, 2, i+1) vmax = max(1 - timings.min(), -1 + timings.max()) plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax) ax.set_xticklabels([''] + map(str, samples_range)) ax.set_yticklabels([''] + map(str, features_range)) plt.xlabel('n_samples') plt.ylabel('n_features') plt.title(label) plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63) ax = plt.axes([0.1, 0.08, 0.8, 0.06]) plt.colorbar(cax=ax, orientation='horizontal') plt.show()
bsd-3-clause
Vettejeep/Boulder_County_Home_Prices
value_vs_price.py
1
4101
# Simply uses the assessors estimate to predict price, so we can see how much better the machine learning models are. # requires data from Assemble_Data.py # Copyright (C) 2017 Kevin Maher # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # Data for this project may be the property of the Boulder County Assessor's office, # they gave me free access as a student but were not clear about any restrictions regarding # sharing the URL from which the data was downloaded. # The data has been pre-processed from xlsx to csv files because OpenOffice had # problems with the xlsx files. # Data was pre-processed by a data setup script, Assemble_Data.py which produced the # file '$working_data_5c.csv' import pandas as pd import numpy as np from math import sqrt from sklearn.model_selection import train_test_split from sklearn.model_selection import cross_val_score from sklearn.metrics import mean_squared_error import matplotlib.pyplot as plt from scipy import stats from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor from sklearn.linear_model import LinearRegression # https://stats.stackexchange.com/questions/58391/mean-absolute-percentage-error-mape-in-scikit-learn def mean_absolute_percentage_error(y_true, y_pred): return np.mean(np.abs((y_true - y_pred) / y_true)) * 100 working_df = pd.read_csv('Data\\$working_data_5c.csv') # eliminate some outliers, homes above an estimated value of $2 million are especially difficult to model # with the available data working_df = working_df[working_df['Age_Yrs'] > 0] working_df = working_df[working_df['totalActualVal'] <= 2000000] y = working_df['price'] columns = working_df.columns[2:] X = working_df.drop(columns, axis=1) # , 'totalActualVal' X = X.drop(labels=['price'], axis=1) # 70/30 split of data into training and test sets X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=245) # determine metrics gradient, intercept, r_value, p_value, std_err = stats.linregress(X_test['totalActualVal'], y_test) print 'Gradient: %.4f' % gradient print 'R Value: %.4f' % r_value print 'R-Squared: %.4f' % r_value ** 2 # adjusted R-squared - https://www.easycalculation.com/statistics/learn-adjustedr2.php r_sq_adj = 1 - ((1 - r_value ** 2) * (len(y_test) - 1) / (len(y_test) - X_train.shape[1] - 1)) print 'R-Squared Adjusted: %.4f' % r_sq_adj mape = mean_absolute_percentage_error(y_test, X_test['totalActualVal']) print 'MAPE: %.4f' % mape # plot with regression lines, one for actual data, one to represent ideal answer z = np.polyfit(X_test['totalActualVal'], y_test, 1) print 'z' print z y_poly = [z[0] * x + z[1] for x in range(int(intercept), 3100000 + int(intercept), 100000)] x_poly = [x for x in range(0, 3100000, 100000)] y_perfect = [x for x in range(0, 3100000, 100000)] plt.figure(0) plt.plot(X_test, y_test, ".") plt.plot(x_poly, y_poly, "-") plt.plot(x_poly, y_perfect, "-") plt.xlim(0, 4000000) plt.ylim(0, 4000000) plt.xlabel("Est Price") plt.ylabel("Actual Price") plt.title("Estimated vs. Actual Sales Price") plt.show() plt.close() # delta_price = pd.Series((X_test['totalActualVal'] / y_test * 100.0) - 100.0) # delta_price.to_csv('Data\\delta_price_basic.csv', index=False) print 'min price, actual: %.2f' % np.min(y_test) print 'min price, assessor estimate: %.2f' % np.min(X_test['totalActualVal'])
gpl-3.0
wkfwkf/statsmodels
statsmodels/examples/ex_kernel_semilinear_dgp.py
33
4969
# -*- coding: utf-8 -*- """ Created on Sun Jan 06 09:50:54 2013 Author: Josef Perktold """ from __future__ import print_function if __name__ == '__main__': import numpy as np import matplotlib.pyplot as plt #from statsmodels.nonparametric.api import KernelReg import statsmodels.sandbox.nonparametric.kernel_extras as smke import statsmodels.sandbox.nonparametric.dgp_examples as dgp class UnivariateFunc1a(dgp.UnivariateFunc1): def het_scale(self, x): return 0.5 seed = np.random.randint(999999) #seed = 430973 #seed = 47829 seed = 648456 #good seed for het_scale = 0.5 print(seed) np.random.seed(seed) nobs, k_vars = 300, 3 x = np.random.uniform(-2, 2, size=(nobs, k_vars)) xb = x.sum(1) / 3 #beta = [1,1,1] k_vars_lin = 2 x2 = np.random.uniform(-2, 2, size=(nobs, k_vars_lin)) funcs = [#dgp.UnivariateFanGijbels1(), #dgp.UnivariateFanGijbels2(), #dgp.UnivariateFanGijbels1EU(), #dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4)) UnivariateFunc1a(x=xb) ] res = [] fig = plt.figure() for i,func in enumerate(funcs): #f = func() f = func y = f.y + x2.sum(1) model = smke.SemiLinear(y, x2, x, 'ccc', k_vars_lin) mean, mfx = model.fit() ax = fig.add_subplot(1, 1, i+1) f.plot(ax=ax) xb_est = np.dot(model.exog, model.b) sortidx = np.argsort(xb_est) #f.x) ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, label='est. mean') # ax.plot(f.x, mean0, color='g', lw=2, label='est. mean') ax.legend(loc='upper left') res.append((model, mean, mfx)) print('beta', model.b) print('scale - est', (y - (xb_est+mean)).std()) print('scale - dgp realised, true', (y - (f.y_true + x2.sum(1))).std(), \ 2 * f.het_scale(1)) fittedvalues = xb_est + mean resid = np.squeeze(model.endog) - fittedvalues print('corrcoef(fittedvalues, resid)', np.corrcoef(fittedvalues, resid)[0,1]) print('variance of components, var and as fraction of var(y)') print('fitted values', fittedvalues.var(), fittedvalues.var() / y.var()) print('linear ', xb_est.var(), xb_est.var() / y.var()) print('nonparametric', mean.var(), mean.var() / y.var()) print('residual ', resid.var(), resid.var() / y.var()) print('\ncovariance decomposition fraction of var(y)') print(np.cov(fittedvalues, resid) / model.endog.var(ddof=1)) print('sum', (np.cov(fittedvalues, resid) / model.endog.var(ddof=1)).sum()) print('\ncovariance decomposition, xb, m, resid as fraction of var(y)') print(np.cov(np.column_stack((xb_est, mean, resid)), rowvar=False) / model.endog.var(ddof=1)) fig.suptitle('Kernel Regression') fig.show() alpha = 0.7 fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(f.x[sortidx], f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed') ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean') ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean') ax.legend(loc='upper left') sortidx = np.argsort(xb_est + mean) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(f.x[sortidx], y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed') ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean') ax.plot(f.x[sortidx], (xb_est + mean)[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean') ax.legend(loc='upper left') ax.set_title('Semilinear Model - observed and total fitted') fig = plt.figure() # ax = fig.add_subplot(1, 2, 1) # ax.plot(f.x, f.y, 'o', color='b', lw=2, alpha=alpha, label='observed') # ax.plot(f.x, f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean') # ax.plot(f.x, mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean') # ax.legend(loc='upper left') sortidx0 = np.argsort(xb) ax = fig.add_subplot(1, 2, 1) ax.plot(f.y[sortidx0], 'o', color='b', lw=2, alpha=alpha, label='observed') ax.plot(f.y_true[sortidx0], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean') ax.plot(mean[sortidx0], 'o', color='r', lw=2, alpha=alpha, label='est. mean') ax.legend(loc='upper left') ax.set_title('Single Index Model (sorted by true xb)') ax = fig.add_subplot(1, 2, 2) ax.plot(y - xb_est, 'o', color='b', lw=2, alpha=alpha, label='observed') ax.plot(f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean') ax.plot(mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean') ax.legend(loc='upper left') ax.set_title('Single Index Model (nonparametric)') plt.figure() plt.plot(y, xb_est+mean, '.') plt.title('observed versus fitted values') plt.show()
bsd-3-clause
mayblue9/scikit-learn
examples/ensemble/plot_forest_importances_faces.py
403
1519
""" ================================================= Pixel importances with a parallel forest of trees ================================================= This example shows the use of forests of trees to evaluate the importance of the pixels in an image classification task (faces). The hotter the pixel, the more important. The code below also illustrates how the construction and the computation of the predictions can be parallelized within multiple jobs. """ print(__doc__) from time import time import matplotlib.pyplot as plt from sklearn.datasets import fetch_olivetti_faces from sklearn.ensemble import ExtraTreesClassifier # Number of cores to use to perform parallel fitting of the forest model n_jobs = 1 # Load the faces dataset data = fetch_olivetti_faces() X = data.images.reshape((len(data.images), -1)) y = data.target mask = y < 5 # Limit to 5 classes X = X[mask] y = y[mask] # Build a forest and compute the pixel importances print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs) t0 = time() forest = ExtraTreesClassifier(n_estimators=1000, max_features=128, n_jobs=n_jobs, random_state=0) forest.fit(X, y) print("done in %0.3fs" % (time() - t0)) importances = forest.feature_importances_ importances = importances.reshape(data.images[0].shape) # Plot pixel importances plt.matshow(importances, cmap=plt.cm.hot) plt.title("Pixel importances with forests of trees") plt.show()
bsd-3-clause
mmottahedi/nilmtk
nilmtk/metergroup.py
4
70748
from __future__ import print_function, division import networkx as nx import pandas as pd import numpy as np import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter from datetime import timedelta from warnings import warn from sys import stdout from collections import Counter from copy import copy, deepcopy import gc from collections import namedtuple # NILMTK imports from .elecmeter import ElecMeter, ElecMeterID from .appliance import Appliance from .datastore.datastore import join_key from .utils import (tree_root, nodes_adjacent_to_root, simplest_type_for, flatten_2d_list, convert_to_timestamp, normalise_timestamp, print_on_line, convert_to_list, append_or_extend_list, most_common, capitalise_first_letter) from .plots import plot_series from .measurement import (select_best_ac_type, AC_TYPES, LEVEL_NAMES, PHYSICAL_QUANTITIES_TO_AVERAGE) from nilmtk.exceptions import MeasurementError from .electric import Electric from .timeframe import TimeFrame, split_timeframes from .preprocessing import Apply from .datastore import MAX_MEM_ALLOWANCE_IN_BYTES from nilmtk.timeframegroup import TimeFrameGroup # MeterGroupID.meters is a tuple of ElecMeterIDs. Order doesn't matter. # (we can't use a set because sets aren't hashable so we can't use # a set as a dict key or a DataFrame column name.) MeterGroupID = namedtuple('MeterGroupID', ['meters']) class MeterGroup(Electric): """A group of ElecMeter objects. Can contain nested MeterGroup objects. Implements many of the same methods as ElecMeter. Attributes ---------- meters : list of ElecMeters or nested MeterGroups disabled_meters : list of ElecMeters or nested MeterGroups name : only set by functions like 'groupby' and 'select_top_k' """ def __init__(self, meters=None, disabled_meters=None): self.meters = convert_to_list(meters) self.disabled_meters = convert_to_list(disabled_meters) self.name = "" def import_metadata(self, store, elec_meters, appliances, building_id): """ Parameters ---------- store : nilmtk.DataStore elec_meters : dict of dicts metadata for each ElecMeter appliances : list of dicts metadata for each Appliance building_id : BuildingID """ # Sanity checking assert isinstance(elec_meters, dict) assert isinstance(appliances, list) assert isinstance(building_id, tuple) if not elec_meters: warn("Building {} has an empty 'elec_meters' object." .format(building_id.instance), RuntimeWarning) if not appliances: warn("Building {} has an empty 'appliances' list." .format(building_id.instance), RuntimeWarning) # Load static Meter Devices ElecMeter.load_meter_devices(store) # Load each meter for meter_i, meter_metadata_dict in elec_meters.iteritems(): meter_id = ElecMeterID(instance=meter_i, building=building_id.instance, dataset=building_id.dataset) meter = ElecMeter(store, meter_metadata_dict, meter_id) self.meters.append(meter) # Load each appliance for appliance_md in appliances: appliance_md['dataset'] = building_id.dataset appliance_md['building'] = building_id.instance appliance = Appliance(appliance_md) meter_ids = [ElecMeterID(instance=meter_instance, building=building_id.instance, dataset=building_id.dataset) for meter_instance in appliance.metadata['meters']] if appliance.n_meters == 1: # Attach this appliance to just a single meter meter = self[meter_ids[0]] if isinstance(meter, MeterGroup): # MeterGroup of site_meters metergroup = meter for meter in metergroup.meters: meter.appliances.append(appliance) else: meter.appliances.append(appliance) else: # DualSupply or 3-phase appliance so need a meter group metergroup = MeterGroup() metergroup.meters = [self[meter_id] for meter_id in meter_ids] for meter in metergroup.meters: # We assume that any meters used for measuring # dual-supply or 3-phase appliances are not also used # for measuring single-supply appliances. self.meters.remove(meter) meter.appliances.append(appliance) self.meters.append(metergroup) # disable disabled meters meters_to_disable = [m for m in self.meters if isinstance(m, ElecMeter) and m.metadata.get('disabled')] for meter in meters_to_disable: self.meters.remove(meter) self.disabled_meters.append(meter) def union(self, other): """ Returns ------- new MeterGroup where its set of `meters` is the union of `self.meters` and `other.meters`. """ if not isinstance(other, MeterGroup): raise TypeError() return MeterGroup(set(self.meters).union(other.meters)) def dominant_appliance(self): dominant_appliances = [meter.dominant_appliance() for meter in self.meters] dominant_appliances = list(set(dominant_appliances)) n_dominant_appliances = len(dominant_appliances) if n_dominant_appliances == 0: return elif n_dominant_appliances == 1: return dominant_appliances[0] else: raise RuntimeError( "More than one dominant appliance in MeterGroup!" " (The dominant appliance per meter should be manually" " specified in the metadata. If it isn't and if there are" " multiple appliances for a meter then NILMTK assumes" " all appliances on that meter are dominant. NILMTK" " can't automatically distinguish between multiple" " appliances on the same meter (at least," " not without using NILM!))") def nested_metergroups(self): return [m for m in self.meters if isinstance(m, MeterGroup)] def __getitem__(self, key): """Get a single meter using appliance type and instance unless ElecMeterID is supplied. These formats for `key` are accepted: Retrieve a meter using details of the meter: * `1` - retrieves meter instance 1, raises Exception if there are more than one meter with this instance, raises KeyError if none are found. If meter instance 1 is in a nested MeterGroup then retrieve the ElecMeter, not the MeterGroup. * `ElecMeterID(1, 1, 'REDD')` - retrieves meter with specified meter ID * `MeterGroupID(meters=(ElecMeterID(1, 1, 'REDD')))` - retrieves existing nested MeterGroup containing exactly meter instances 1 and 2. * `[ElecMeterID(1, 1, 'REDD'), ElecMeterID(2, 1, 'REDD')]` - retrieves existing nested MeterGroup containing exactly meter instances 1 and 2. * `ElecMeterID(0, 1, 'REDD')` - instance `0` means `mains`. This returns a new MeterGroup of all site_meters in building 1 in REDD. * `ElecMeterID((1,2), 1, 'REDD')` - retrieve existing MeterGroup which contains exactly meters 1 & 2. * `(1, 2, 'REDD')` - converts to ElecMeterID and treats as an ElecMeterID. Items must be in the order expected for an ElecMeterID. Retrieve a meter using details of appliances attached to the meter: * `'toaster'` - retrieves meter or group upstream of toaster instance 1 * `'toaster', 2` - retrieves meter or group upstream of toaster instance 2 * `{'dataset': 'redd', 'building': 3, 'type': 'toaster', 'instance': 2}` - specify an appliance Returns ------- ElecMeter or MeterGroup """ if isinstance(key, str): # default to get first meter return self[(key, 1)] elif isinstance(key, ElecMeterID): if isinstance(key.instance, tuple): # find meter group from a key of the form # ElecMeterID(instance=(1,2), building=1, dataset='REDD') for group in self.nested_metergroups(): if (set(group.instance()) == set(key.instance) and group.building() == key.building and group.dataset() == key.dataset): return group # Else try to find an ElecMeter with instance=(1,2) for meter in self.meters: if meter.identifier == key: return meter elif key.instance == 0: metergroup_of_building = self.select( building=key.building, dataset=key.dataset) return metergroup_of_building.mains() else: for meter in self.meters: if meter.identifier == key: return meter raise KeyError(key) elif isinstance(key, MeterGroupID): key_meters = set(key.meters) for group in self.nested_metergroups(): if (set(group.identifier.meters) == key_meters): return group raise KeyError(key) # find MeterGroup from list of ElecMeterIDs elif isinstance(key, list): if not all([isinstance(item, tuple) for item in key]): raise TypeError("requires a list of ElecMeterID objects.") for meter in self.meters: # TODO: write unit tests for this # list of ElecMeterIDs. Return existing MeterGroup if isinstance(meter, MeterGroup): metergroup = meter meter_ids = set(metergroup.identifier.meters) if meter_ids == set(key): return metergroup raise KeyError(key) elif isinstance(key, tuple): if len(key) == 2: if isinstance(key[0], str): return self[{'type': key[0], 'instance': key[1]}] else: # Assume we're dealing with a request for 2 ElecMeters return MeterGroup([self[i] for i in key]) elif len(key) == 3: return self[ElecMeterID(*key)] else: raise TypeError() elif isinstance(key, dict): meters = [] for meter in self.meters: if meter.matches_appliances(key): meters.append(meter) if len(meters) == 1: return meters[0] elif len(meters) > 1: raise Exception('search terms match {} appliances' .format(len(meters))) else: raise KeyError(key) elif isinstance(key, int) and not isinstance(key, bool): meters_found = [] for meter in self.meters: if isinstance(meter.instance(), int): if meter.instance() == key: meters_found.append(meter) elif isinstance(meter.instance(), (tuple, list)): if key in meter.instance(): if isinstance(meter, MeterGroup): print("Meter", key, "is in a nested meter group." " Retrieving just the ElecMeter.") meters_found.append(meter[key]) else: meters_found.append(meter) n_meters_found = len(meters_found) if n_meters_found > 1: raise Exception('{} meters found with instance == {}: {}' .format(n_meters_found, key, meters_found)) elif n_meters_found == 0: raise KeyError( 'No meters found with instance == {}'.format(key)) else: return meters_found[0] else: raise TypeError() def matches(self, key): for meter in self.meters: if meter.matches(key): return True return False def select(self, **kwargs): """Select a group of meters based on meter metadata. e.g. * select(building=1, sample_period=6) * select(room='bathroom') If multiple criteria are supplied then these are ANDed together. Returns ------- new MeterGroup of selected meters. Ideas for the future (not implemented yet!) ------------------------------------------- * select(category=['ict', 'lighting']) * select([(fridge, 1), (tv, 1)]) # get specifically fridge 1 and tv 1 * select(name=['fridge', 'tv']) # get all fridges and tvs * select(category='lighting', except={'room'=['kitchen lights']}) * select('all', except=[('tv', 1)]) Also: see if we can do select(category='lighting' | name='tree lights') or select(energy > 100)?? Perhaps using: * Python's eval function something like this: >>> s = pd.Series(np.random.randn(5)) >>> eval('(x > 0) | (index > 2)', {'x':s, 'index':s.index}) Hmm, yes, maybe we should just implement this! e.g. select("(category == 'lighting') | (category == 'ict')") But what about: * select('total_energy > 100') * select('mean(hours_on_per_day) > 3') * select('max(hours_on_per_day) > 5') * select('max(power) > 2000') * select('energy_per_day > 2') * select('rank_by_energy > 5') # top_k(5) * select('rank_by_proportion > 0.2') Maybe don't bother. That's easy enough to get with itemised_energy(). Although these are quite nice and shouldn't be too hard. Would need to only calculate these stats if necessary though (e.g. by checking if 'total_energy' is in the query string before running `eval`) * or numexpr: https://github.com/pydata/numexpr * see Pandas.eval(): * http://pandas.pydata.org/pandas-docs/stable/indexing.html#the-query-method-experimental * https://github.com/pydata/pandas/blob/master/pandas/computation/eval.py#L119 """ selected_meters = [] func = kwargs.pop('func', 'matches') def get(_kwargs): exception_raised_every_time = True exception = None no_match = True for meter in self.meters: try: match = getattr(meter, func)(_kwargs) except KeyError as e: exception = e else: exception_raised_every_time = False if match: selected_meters.append(meter) no_match = False if no_match: raise KeyError("'No match for {}'".format(_kwargs)) if exception_raised_every_time and exception is not None: raise exception if len(kwargs) == 1 and isinstance(kwargs.values()[0], list): attribute = kwargs.keys()[0] list_of_values = kwargs.values()[0] for value in list_of_values: get({attribute: value}) else: get(kwargs) return MeterGroup(selected_meters) def select_using_appliances(self, **kwargs): """Select a group of meters based on appliance metadata. e.g. * select_using_appliances(category='lighting') * select_using_appliances(type='fridge') * select_using_appliances(type=['fridge', 'kettle', 'toaster']) * select_using_appliances(building=1, category='lighting') * select_using_appliances(room='bathroom') If multiple criteria are supplied then these are ANDed together. Returns ------- new MeterGroup of selected meters. """ return self.select(func='matches_appliances', **kwargs) def from_list(self, meter_ids): """ Parameters ---------- meter_ids : list or tuple Each element is an ElecMeterID or a MeterGroupID. Returns ------- MeterGroup """ meter_ids = list(meter_ids) meter_ids = list(set(meter_ids)) # make unique meters = [] def append_meter_group(meter_id): try: # see if there is an existing MeterGroup metergroup = self[meter_id] except KeyError: # there is no existing MeterGroup so assemble one metergroup = self.from_list(meter_id.meters) meters.append(metergroup) for meter_id in meter_ids: if isinstance(meter_id, ElecMeterID): meters.append(self[meter_id]) elif isinstance(meter_id, MeterGroupID): append_meter_group(meter_id) elif isinstance(meter_id, tuple): meter_id = MeterGroupID(meters=meter_id) append_meter_group(meter_id) else: raise TypeError() return MeterGroup(meters) @classmethod def from_other_metergroup(cls, other, dataset): """Assemble a new meter group using the same meter IDs and nested MeterGroups as `other`. This is useful for preparing a ground truth metergroup from a meter group of NILM predictions. Parameters ---------- other : MeterGroup dataset : string The `name` of the dataset for the ground truth. e.g. 'REDD' Returns ------- MeterGroup """ other_identifiers = other.identifier.meters new_identifiers = [] for other_id in other_identifiers: new_id = other_id._replace(dataset=dataset) if isinstance(new_id.instance, tuple): nested = [] for instance in new_id.instance: new_nested_id = new_id._replace(instance=instance) nested.append(new_nested_id) new_identifiers.append(tuple(nested)) else: new_identifiers.append(new_id) metergroup = MeterGroup() metergroup.from_list(new_identifiers) return metergroup def __eq__(self, other): if isinstance(other, MeterGroup): return set(other.meters) == set(self.meters) else: return False def __ne__(self, other): return not self.__eq__(other) @property def appliances(self): appliances = set() for meter in self.meters: appliances.update(meter.appliances) return list(appliances) def dominant_appliances(self): appliances = set() for meter in self.meters: appliances.add(meter.dominant_appliance()) return list(appliances) def values_for_appliance_metadata_key(self, key, only_consider_dominant_appliance=True): """ Parameters ---------- key : str e.g. 'type' or 'categories' or 'room' Returns ------- list """ values = [] if only_consider_dominant_appliance: appliances = self.dominant_appliances() else: appliances = self.appliances for appliance in appliances: value = appliance.metadata.get(key) append_or_extend_list(values, value) value = appliance.type.get(key) append_or_extend_list(values, value) return list(set(values)) def get_labels(self, meter_ids, pretty=True): """Create human-readable meter labels. Parameters ---------- meter_ids : list of ElecMeterIDs (or 3-tuples in same order as ElecMeterID) Returns ------- list of strings describing the appliances. """ meters = [self[meter_id] for meter_id in meter_ids] labels = [meter.label(pretty=pretty) for meter in meters] return labels def __repr__(self): s = "{:s}(meters=\n".format(self.__class__.__name__) for meter in self.meters: s += " " + str(meter).replace("\n", "\n ") + "\n" s += ")" return s @property def identifier(self): """Returns a MeterGroupID.""" return MeterGroupID(meters=tuple([meter.identifier for meter in self.meters])) def instance(self): """Returns tuple of integers where each int is a meter instance.""" return tuple([meter.instance() for meter in self.meters]) def building(self): """Returns building instance integer(s).""" buildings = set([meter.building() for meter in self.meters]) return simplest_type_for(buildings) def contains_meters_from_multiple_buildings(self): """Returns True if this MeterGroup contains meters from more than one building.""" building = self.building() try: n = len(building) except TypeError: return False else: return n > 1 def dataset(self): """Returns dataset string(s).""" datasets = set([meter.dataset() for meter in self.meters]) return simplest_type_for(datasets) def sample_period(self): """Returns max of all meter sample periods.""" return max([meter.sample_period() for meter in self.meters]) def wiring_graph(self): """Returns a networkx.DiGraph of connections between meters.""" wiring_graph = nx.DiGraph() def _build_wiring_graph(meters): for meter in meters: if isinstance(meter, MeterGroup): metergroup = meter _build_wiring_graph(metergroup.meters) else: upstream_meter = meter.upstream_meter(raise_warning=False) # Need to ensure we use the same object # if upstream meter already exists. if upstream_meter is not None: for node in wiring_graph.nodes(): if upstream_meter == node: upstream_meter = node break wiring_graph.add_edge(upstream_meter, meter) _build_wiring_graph(self.meters) return wiring_graph def draw_wiring_graph(self, show_meter_labels=True): graph = self.wiring_graph() meter_labels = {meter: meter.instance() for meter in graph.nodes()} pos = nx.graphviz_layout(graph, prog='dot') nx.draw(graph, pos, labels=meter_labels, arrows=False) if show_meter_labels: meter_labels = {meter: meter.label() for meter in graph.nodes()} for meter, name in meter_labels.iteritems(): x, y = pos[meter] if meter.is_site_meter(): delta_y = 5 else: delta_y = -5 plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center') ax = plt.gca() return graph, ax def load(self, **kwargs): """Returns a generator of DataFrames loaded from the DataStore. By default, `load` will load all available columns from the DataStore. Specific columns can be selected in one or two mutually exclusive ways: 1. specify a list of column names using the `cols` parameter. 2. specify a `physical_quantity` and/or an `ac_type` parameter to ask `load` to automatically select columns. Each meter in the MeterGroup will first be resampled before being added. The returned DataFrame will include NaNs at timestamps where no meter had a sample (after resampling the meter). Parameters ---------- sample_period : int or float, optional Number of seconds to use as sample period when reindexing meters. If not specified then will use the max of all meters' sample_periods. resample_kwargs : dict of key word arguments (other than 'rule') to `pass to pd.DataFrame.resample()` chunksize : int, optional the maximum number of rows per chunk. Note that each chunk is guaranteed to be of length <= chunksize. Each chunk is *not* guaranteed to be exactly of length == chunksize. **kwargs : any other key word arguments to pass to `self.store.load()` including: physical_quantity : string or list of strings e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy']. If a single string then load columns only for that physical quantity. If a list of strings then load columns for all those physical quantities. ac_type : string or list of strings, defaults to None Where 'ac_type' is short for 'alternating current type'. e.g. 'reactive' or 'active' or 'apparent'. If set to None then will load all AC types per physical quantity. If set to 'best' then load the single best AC type per physical quantity. If set to a single AC type then load just that single AC type per physical quantity, else raise an Exception. If set to a list of AC type strings then will load all those AC types and will raise an Exception if any cannot be found. cols : list of tuples, using NILMTK's vocabulary for measurements. e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')] `cols` can't be used if `ac_type` and/or `physical_quantity` are set. preprocessing : list of Node subclass instances e.g. [Clip()] Returns --------- Always return a generator of DataFrames (even if it only has a single column). .. note:: Different AC types will be treated separately. """ # Handle kwargs sample_period = kwargs.setdefault('sample_period', self.sample_period()) sections = kwargs.pop('sections', [self.get_timeframe()]) chunksize = kwargs.pop('chunksize', MAX_MEM_ALLOWANCE_IN_BYTES) duration_threshold = sample_period * chunksize columns = pd.MultiIndex.from_tuples( self._convert_physical_quantity_and_ac_type_to_cols(**kwargs)['cols'], names=LEVEL_NAMES) freq = '{:d}S'.format(int(sample_period)) verbose = kwargs.get('verbose') # Check for empty sections sections = [section for section in sections if section] if not sections: print("No sections to load.") yield pd.DataFrame(columns=columns) return # Loop through each section to load for section in split_timeframes(sections, duration_threshold): kwargs['sections'] = [section] start = normalise_timestamp(section.start, freq) tz = None if start.tz is None else start.tz.zone index = pd.date_range( start.tz_localize(None), section.end.tz_localize(None), tz=tz, closed='left', freq=freq) chunk = combine_chunks_from_generators( index, columns, self.meters, kwargs) yield chunk def _convert_physical_quantity_and_ac_type_to_cols(self, **kwargs): all_columns = set() kwargs = deepcopy(kwargs) for meter in self.meters: kwargs_copy = deepcopy(kwargs) new_kwargs = meter._convert_physical_quantity_and_ac_type_to_cols(**kwargs_copy) cols = new_kwargs.get('cols', []) for col in cols: all_columns.add(col) kwargs['cols'] = list(all_columns) return kwargs def _meter_generators(self, **kwargs): """Returns (list of identifiers, list of generators).""" generators = [] identifiers = [] for meter in self.meters: kwargs_copy = deepcopy(kwargs) generator = meter.load(**kwargs_copy) generators.append(generator) identifiers.append(meter.identifier) return identifiers, generators def simultaneous_switches(self, threshold=40): """ Parameters ---------- threshold : number, threshold in Watts Returns ------- sim_switches : pd.Series of type {timestamp: number of simultaneous switches} Notes ----- This function assumes that the submeters in this MeterGroup are all aligned. If they are not then you should align the meters, e.g. by using an `Apply` node with `resample`. """ submeters = self.submeters().meters count = Counter() for meter in submeters: switch_time_meter = meter.switch_times(threshold) for timestamp in switch_time_meter: count[timestamp] += 1 sim_switches = pd.Series(count) # Should be 2 or more appliances changing state at the same time sim_switches = sim_switches[sim_switches >= 2] return sim_switches def mains(self): """ Returns ------- ElecMeter or MeterGroup or None """ if self.contains_meters_from_multiple_buildings(): msg = ("This MeterGroup contains meters from buildings '{}'." " It only makes sense to get `mains` if the MeterGroup" " contains meters from a single building." .format(self.building())) raise RuntimeError(msg) site_meters = [meter for meter in self.meters if meter.is_site_meter()] n_site_meters = len(site_meters) if n_site_meters == 0: return elif n_site_meters == 1: return site_meters[0] else: return MeterGroup(meters=site_meters) def use_alternative_mains(self): """Swap present mains meter(s) for mains meter(s) in `disabled_meters`. This is useful if the dataset has multiple, redundant mains meters (e.g. in UK-DALE buildings 1, 2 and 5). """ present_mains = [m for m in self.meters if m.is_site_meter()] alternative_mains = [m for m in self.disabled_meters if m.is_site_meter()] if not alternative_mains: raise RuntimeError("No site meters found in `self.disabled_meters`") for meter in present_mains: self.meters.remove(meter) self.disabled_meters.append(meter) for meter in alternative_mains: self.meters.append(meter) self.disabled_meters.remove(meter) def upstream_meter(self): """Returns single upstream meter. Raises RuntimeError if more than 1 upstream meter. """ upstream_meters = [] for meter in self.meters: upstream_meters.append(meter.upstream_meter()) unique_upstream_meters = list(set(upstream_meters)) if len(unique_upstream_meters) > 1: raise RuntimeError("{:d} upstream meters found for meter group." " Should be 1.".format(len(unique_upstream_meters))) return unique_upstream_meters[0] def meters_directly_downstream_of_mains(self): """Returns new MeterGroup.""" meters = nodes_adjacent_to_root(self.wiring_graph()) assert isinstance(meters, list) return MeterGroup(meters) def submeters(self): """Returns new MeterGroup of all meters except site_meters""" submeters = [meter for meter in self.meters if not meter.is_site_meter()] return MeterGroup(submeters) def is_site_meter(self): """Returns True if any meters are site meters""" return any([meter.is_site_meter() for meter in self.meters]) def total_energy(self, **load_kwargs): """Sums together total meter_energy for each meter. Note that this function does *not* return the total aggregate energy for a building. Instead this function adds up the total energy for all the meters contained in this MeterGroup. If you want the total aggregate energy then please use `MeterGroup.mains().total_energy()`. Parameters ---------- full_results : bool, default=False **loader_kwargs : key word arguments for DataStore.load() Returns ------- if `full_results` is True then return TotalEnergyResults object else return a pd.Series with a row for each AC type. """ self._check_kwargs_for_full_results_and_sections(load_kwargs) full_results = load_kwargs.pop('full_results', False) meter_energies = self._collect_stats_on_all_meters( load_kwargs, 'total_energy', full_results) if meter_energies: total_energy_results = meter_energies[0] for meter_energy in meter_energies[1:]: if full_results: total_energy_results.unify(meter_energy) else: total_energy_results += meter_energy return total_energy_results def _collect_stats_on_all_meters(self, load_kwargs, func, full_results): collected_stats = [] for meter in self.meters: print_on_line("\rCalculating", func, "for", meter.identifier, "... ") single_stat = getattr(meter, func)(full_results=full_results, **load_kwargs) collected_stats.append(single_stat) if (full_results and len(self.meters) > 1 and not meter.store.all_sections_smaller_than_chunksize): warn("at least one section requested from '{}' required" " multiple chunks to be loaded into memory. This may cause" " a failure when we try to unify results from multiple" " meters.".format(meter)) return collected_stats def dropout_rate(self, **load_kwargs): """Sums together total energy for each meter. Parameters ---------- full_results : bool, default=False **loader_kwargs : key word arguments for DataStore.load() Returns ------- if `full_results` is True then return TotalEnergyResults object else return either a single number of, if there are multiple AC types, then return a pd.Series with a row for each AC type. """ self._check_kwargs_for_full_results_and_sections(load_kwargs) full_results = load_kwargs.pop('full_results', False) dropout_rates = self._collect_stats_on_all_meters( load_kwargs, 'dropout_rate', full_results) if full_results and dropout_rates: dropout_rate_results = dropout_rates[0] for dr in dropout_rates[1:]: dropout_rate_results.unify(dr) return dropout_rate_results else: return np.mean(dropout_rates) def _check_kwargs_for_full_results_and_sections(self, load_kwargs): if (load_kwargs.get('full_results') and 'sections' not in load_kwargs and len(self.meters) > 1): raise RuntimeError("MeterGroup stats can only return full results" " objects if you specify 'sections' to load. If" " you do not specify periods then the results" " from individual meters are likely to be for" " different periods and hence" " cannot be unified.") def good_sections(self, **kwargs): """Returns good sections for just the first meter. TODO: combine good sections from every meter. """ if self.meters: if len(self.meters) > 1: warn("As a quick implementation we only get Good Sections from" " the first meter in the meter group. We should really" " return the intersection of the good sections for all" " meters. This will be fixed...") return self.meters[0].good_sections(**kwargs) else: return [] def dataframe_of_meters(self, **kwargs): """ Parameters ---------- sample_period : int or float, optional Number of seconds to use as sample period when reindexing meters. If not specified then will use the max of all meters' sample_periods. resample : bool, defaults to True If True then resample to `sample_period`. **kwargs : any other key word arguments to pass to `self.store.load()` including: ac_type : string, defaults to 'best' physical_quantity: string, defaults to 'power' Returns ------- DataFrame Each column is a meter. """ kwargs.setdefault('sample_period', self.sample_period()) kwargs.setdefault('ac_type', 'best') kwargs.setdefault('physical_quantity', 'power') identifiers, generators = self._meter_generators(**kwargs) segments = [] while True: chunks = [] ids = [] for meter_id, generator in zip(identifiers, generators): try: chunk_from_next_meter = next(generator) except StopIteration: continue if not chunk_from_next_meter.empty: ids.append(meter_id) chunks.append(chunk_from_next_meter.sum(axis=1)) if chunks: df = pd.concat(chunks, axis=1) df.columns = ids segments.append(df) else: break if segments: return pd.concat(segments) else: return pd.DataFrame(columns=self.identifier.meters) def entropy_per_meter(self): """Finds the entropy of each meter in this MeterGroup. Returns ------- pd.Series of entropy """ return self.call_method_on_all_meters('entropy') def call_method_on_all_meters(self, method): """Calls `method` on each element in `self.meters`. Parameters ---------- method : str Name of a stats method in `ElecMeter`. e.g. 'correlation'. Returns ------- pd.Series of result of `method` called on each element in `self.meters`. """ meter_identifiers = list(self.identifier.meters) result = pd.Series(index=meter_identifiers) for meter in self.meters: id_meter = meter.identifier result[id_meter] = getattr(meter, method)() return result def pairwise(self, method): """ Calls `method` on all pairs in `self.meters`. Assumes `method` is symmetrical. Parameters ---------- method : str Name of a stats method in `ElecMeter`. e.g. 'correlation'. Returns ------- pd.DataFrame of the result of `method` called on each pair in `self.meters`. """ meter_identifiers = list(self.identifier.meters) result = pd.DataFrame(index=meter_identifiers, columns=meter_identifiers) for i, m_i in enumerate(self.meters): for j, m_j in enumerate(self.meters): id_i = m_i.identifier id_j = m_j.identifier if i > j: result[id_i][id_j] = result[id_j][id_i] else: result[id_i][id_j] = getattr(m_i, method)(m_j) return result def pairwise_mutual_information(self): """ Finds the pairwise mutual information among different meters in a MeterGroup. Returns ------- pd.DataFrame of mutual information between pair of ElecMeters. """ return self.pairwise('mutual_information') def pairwise_correlation(self): """ Finds the pairwise correlation among different meters in a MeterGroup. Returns ------- pd.DataFrame of correlation between pair of ElecMeters. """ return self.pairwise('correlation') def proportion_of_energy_submetered(self, **loader_kwargs): """ Returns ------- float [0,1] or NaN if mains total_energy == 0 """ print("Running MeterGroup.proportion_of_energy_submetered...") mains = self.mains() downstream_meters = self.meters_directly_downstream_of_mains() proportion = 0.0 verbose = loader_kwargs.get('verbose') all_nan = True for m in downstream_meters.meters: if verbose: print("Calculating proportion for", m) prop = m.proportion_of_energy(mains, **loader_kwargs) if not np.isnan(prop): proportion += prop all_nan = False if verbose: print(" {:.2%}".format(prop)) if all_nan: proportion = np.NaN return proportion def available_ac_types(self, physical_quantity): """Returns set of all available alternating current types for a specific physical quantity. Parameters ---------- physical_quantity : str or list of strings Returns ------- list of strings e.g. ['apparent', 'active'] """ all_ac_types = [meter.available_ac_types(physical_quantity) for meter in self.meters] return list(set(flatten_2d_list(all_ac_types))) def available_physical_quantities(self): """ Returns ------- list of strings e.g. ['power', 'energy'] """ all_physical_quants = [meter.available_physical_quantities() for meter in self.meters] return list(set(flatten_2d_list(all_physical_quants))) def energy_per_meter(self, per_period=None, mains=None, use_meter_labels=False, **load_kwargs): """Returns pd.DataFrame where columns is meter.identifier and each value is total energy. Index is AC types. Does not care about wiring hierarchy. Does not attempt to ensure all channels share the same time sections. Parameters ---------- per_period : None or offset alias If None then returns absolute energy used per meter. If a Pandas offset alias (e.g. 'D' for 'daily') then will return the average energy per period. ac_type : None or str e.g. 'active' or 'best'. Defaults to 'best'. use_meter_labels : bool If True then columns will be human-friendly meter labels. If False then columns will be ElecMeterIDs or MeterGroupIDs mains : None or MeterGroup or ElecMeter If None then will return DataFrame without remainder. If not None then will return a Series including a 'remainder' row which will be `mains.total_energy() - energy_per_meter.sum()` and an attempt will be made to use the correct AC_TYPE. Returns ------- pd.DataFrame if mains is None else a pd.Series """ meter_identifiers = list(self.identifier.meters) energy_per_meter = pd.DataFrame(columns=meter_identifiers, index=AC_TYPES) n_meters = len(self.meters) load_kwargs.setdefault('ac_type', 'best') for i, meter in enumerate(self.meters): print('\r{:d}/{:d} {}'.format(i+1, n_meters, meter), end='') stdout.flush() if per_period is None: meter_energy = meter.total_energy(**load_kwargs) else: load_kwargs.setdefault('use_uptime', False) meter_energy = meter.average_energy_per_period( offset_alias=per_period, **load_kwargs) energy_per_meter[meter.identifier] = meter_energy energy_per_meters = energy_per_meter.dropna(how='all') if use_meter_labels: energy_per_meter.columns = self.get_labels(energy_per_meter.columns) if mains is not None: energy_per_meter = self._energy_per_meter_with_remainder( energy_per_meter, mains, per_period, **load_kwargs) return energy_per_meter def _energy_per_meter_with_remainder(self, energy_per_meter, mains, per_period, **kwargs): ac_types = energy_per_meter.keys() energy_per_meter = energy_per_meter.sum() # Collapse AC_TYPEs into Series # Find most common ac_type in energy_per_meter: most_common_ac_type = most_common(ac_types) mains_ac_types = mains.available_ac_types( ['power', 'energy', 'cumulative energy']) if most_common_ac_type in mains_ac_types: mains_ac_type = most_common_ac_type else: mains_ac_type = 'best' # Get mains energy_per_meter kwargs['ac_type'] = mains_ac_type if per_period is None: mains_energy = mains.total_energy(**kwargs) else: mains_energy = mains.average_energy_per_period( offset_alias=per_period, **kwargs) mains_energy = mains_energy[mains_energy.keys()[0]] # Calculate remainder energy_per_meter['Remainder'] = mains_energy - energy_per_meter.sum() energy_per_meter.sort(ascending=False) return energy_per_meter def fraction_per_meter(self, **load_kwargs): """Fraction of energy per meter. Return pd.Series. Index is meter.instance. Each value is a float in the range [0,1]. """ energy_per_meter = self.energy_per_meter(**load_kwargs).max() total_energy = energy_per_meter.sum() return energy_per_meter / total_energy def proportion_of_upstream_total_per_meter(self, **load_kwargs): prop_per_meter = pd.Series(index=self.identifier.meters) n_meters = len(self.meters) for i, meter in enumerate(self.meters): proportion = meter.proportion_of_upstream(**load_kwargs) print('\r{:d}/{:d} {} = {:.3f}' .format(i+1, n_meters, meter, proportion), end='') stdout.flush() prop_per_meter[meter.identifier] = proportion prop_per_meter.sort(ascending=False) return prop_per_meter def train_test_split(self, train_fraction=0.5): """ Parameters ---------- train_fraction Returns ------- split_time: pd.Timestamp where split should happen """ assert( 0 < train_fraction < 1), "`train_fraction` should be between 0 and 1" # TODO: currently just works with the first mains meter, assuming # both to be simultaneosly sampled mains = self.mains() good_sections = self.mains().good_sections() sample_period = mains.device['sample_period'] appx_num_records_in_each_good_section = [ int((ts.end - ts.start).total_seconds() / sample_period) for ts in good_sections] appx_total_records = sum(appx_num_records_in_each_good_section) records_in_train = appx_total_records * train_fraction seconds_in_train = int(records_in_train * sample_period) if len(good_sections) == 1: # all data is contained in one good section split_point = good_sections[ 0].start + timedelta(seconds=seconds_in_train) return split_point else: # data is split across multiple time deltas records_remaining = records_in_train while records_remaining: for i, records_in_section in enumerate(appx_num_records_in_each_good_section): if records_remaining > records_in_section: records_remaining -= records_in_section elif records_remaining == records_in_section: # Next TimeFrame is the split point!! split_point = good_sections[i + 1].start return split_point else: # Need to split this timeframe split_point = good_sections[ i].start + timedelta(seconds=sample_period * records_remaining) return split_point ################## FUNCTIONS NOT YET IMPLEMENTED ################### # def init_new_dataset(self): # self.infer_and_set_meter_connections() # self.infer_and_set_dual_supply_appliances() # def infer_and_set_meter_connections(self): # """ # Arguments # --------- # meters : list of Meter objects # """ # Maybe this should be a stand-alone function which # takes a list of meters??? # raise NotImplementedError # def infer_and_set_dual_supply_appliances(self): # raise NotImplementedError # def total_on_duration(self): # """Return timedelta""" # raise NotImplementedError # def on_durations(self): # self.get_unique_upstream_meters() # for each meter, get the on time, # assuming the on-power-threshold for the # smallest appliance connected to that meter??? # raise NotImplementedError # def activity_distribution(self, bin_size, timespan): # raise NotImplementedError # def on_off_events(self, minimum_state_duration): # raise NotImplementedError def select_top_k(self, k=5, by="energy", asc=False, group_remainder=False, **kwargs): """Only select the top K meters, according to energy. Functions on the entire MeterGroup. So if you mean to select the top K from only the submeters, please do something like this: elec.submeters().select_top_k() Parameters ---------- k : int, optional, defaults to 5 by: string, optional, defaults to energy Can select top k by: * energy * entropy asc: bool, optional, defaults to False By default top_k is in descending order. To select top_k by ascending order, use asc=True group_remainder : bool, optional, defaults to False If True then place all remaining meters into a nested metergroup. **kwargs : key word arguments to pass to load() Returns ------- MeterGroup """ function_map = {'energy': self.fraction_per_meter, 'entropy': self.entropy_per_meter} top_k_series = function_map[by](**kwargs) top_k_series.sort(ascending=asc) top_k_elec_meter_ids = top_k_series[:k].index top_k_metergroup = self.from_list(top_k_elec_meter_ids) if group_remainder: remainder_ids = top_k_series[k:].index remainder_metergroup = self.from_list(remainder_ids) remainder_metergroup.name = 'others' top_k_metergroup.meters.append(remainder_metergroup) return top_k_metergroup def groupby(self, key, use_appliance_metadata=True, **kwargs): """ e.g. groupby('category') Returns ------- MeterGroup of nested MeterGroups: one per group """ if not use_appliance_metadata: raise NotImplementedError() values = self.values_for_appliance_metadata_key(key) groups = [] for value in values: group = self.select_using_appliances(**{key: value}) group.name = value groups.append(group) return MeterGroup(groups) def get_timeframe(self): """ Returns ------- nilmtk.TimeFrame representing the timeframe which is the union of all meters in self.meters. """ timeframe = None for meter in self.meters: if timeframe is None: timeframe = meter.get_timeframe() elif meter.get_timeframe().empty: pass else: timeframe = timeframe.union(meter.get_timeframe()) return timeframe def plot(self, kind='separate lines', **kwargs): """ Parameters ---------- width : int, optional Number of points on the x axis required ax : matplotlib.axes, optional plot_legend : boolean, optional Defaults to True. Set to False to not plot legend. kind : {'separate lines', 'sum', 'area', 'snakey', 'energy bar'} timeframe : nilmtk.TimeFrame, optional Defaults to self.get_timeframe() """ # Load data and plot each meter function_map = { 'separate lines': self._plot_separate_lines, 'sum': super(MeterGroup, self).plot, 'area': self._plot_area, 'sankey': self._plot_sankey, 'energy bar': self._plot_energy_bar } try: ax = function_map[kind](**kwargs) except KeyError: raise ValueError("'{}' not a valid setting for 'kind' parameter." .format(kind)) return ax def _plot_separate_lines(self, ax=None, plot_legend=True, **kwargs): for meter in self.meters: if isinstance(meter, MeterGroup): ax = meter.plot(ax=ax, plot_legend=False, kind='sum', **kwargs) else: ax = meter.plot(ax=ax, plot_legend=False, **kwargs) if plot_legend: plt.legend() return ax def _plot_sankey(self): graph = self.wiring_graph() meter_labels = {meter: meter.instance() for meter in graph.nodes()} pos = nx.graphviz_layout(graph, prog='dot') #nx.draw(graph, pos, labels=meter_labels, arrows=False) meter_labels = {meter: meter.label() for meter in graph.nodes()} for meter, name in meter_labels.iteritems(): x, y = pos[meter] if meter.is_site_meter(): delta_y = 5 else: delta_y = -5 plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center') if not meter.is_site_meter(): upstream_meter = meter.upstream_meter() proportion_of_upstream = meter.proportion_of_upstream() print(meter.instance(), upstream_meter.instance(), proportion_of_upstream) graph[upstream_meter][meter]["weight"] = proportion_of_upstream*10 graph[upstream_meter][meter]["color"] = "blue" nx.draw(graph, pos, labels=meter_labels, arrows=False) def _plot_area(self, ax=None, timeframe=None, pretty_labels=True, unit='W', label_kwargs=None, plot_kwargs=None, threshold=None, **load_kwargs): """ Parameters ---------- plot_kwargs : dict of key word arguments for DataFrame.plot() unit : {kW or W} threshold : float or None if set to a float then any measured value under this threshold will be set to 0. Returns ------- ax, dataframe """ # Get start and end times for the plot timeframe = self.get_timeframe() if timeframe is None else timeframe if not timeframe: return ax load_kwargs['sections'] = [timeframe] load_kwargs = self._set_sample_period(timeframe, **load_kwargs) df = self.dataframe_of_meters(**load_kwargs) if threshold is not None: df[df <= threshold] = 0 if unit == 'kW': df /= 1000 if plot_kwargs is None: plot_kwargs = {} df.columns = self.get_labels(df.columns, pretty=pretty_labels) # Set a tiny linewidth otherwise we get lines even if power is zero # and this looks ugly when drawn above other lines. plot_kwargs.setdefault('linewidth', 0.0001) ax = df.plot(kind='area', **plot_kwargs) ax.set_ylabel("Power ({:s})".format(unit)) return ax, df def plot_when_on(self, **load_kwargs): meter_identifiers = list(self.identifier.meters) fig, ax = plt.subplots() for i, meter in enumerate(self.meters): id_meter = meter.identifier for chunk_when_on in meter.when_on(**load_kwargs): series_to_plot = chunk_when_on[chunk_when_on==True] if len(series_to_plot.index): (series_to_plot+i-1).plot(ax=ax, style='k.') labels = self.get_labels(meter_identifiers) plt.yticks(range(len(self.meters)), labels) plt.ylim((-0.5, len(self.meters)+0.5)) return ax def plot_good_sections(self, ax=None, label_func='instance', include_disabled_meters=True, load_kwargs=None, **plot_kwargs): """ Parameters ---------- label_func : str or None e.g. 'instance' (default) or 'label' if None then no labels will be produced. include_disabled_meters : bool """ if ax is None: ax = plt.gca() if load_kwargs is None: load_kwargs = {} # Prepare list of meters if include_disabled_meters: meters = self.all_meters() else: meters = self.meters meters = copy(meters) meters.sort(key=meter_sorting_key, reverse=True) n = len(meters) labels = [] for i, meter in enumerate(meters): good_sections = meter.good_sections(**load_kwargs) ax = good_sections.plot(ax=ax, y=i, **plot_kwargs) del good_sections if label_func: labels.append(getattr(meter, label_func)()) # Just end numbers if label_func is None: labels = [n] + ([''] * (n-1)) # Y tick formatting ax.set_yticks(np.arange(0, n) + 0.5) def y_formatter(y, pos): try: label = labels[int(y)] except IndexError: label = '' return label ax.yaxis.set_major_formatter(FuncFormatter(y_formatter)) ax.set_ylim([0, n]) return ax def _plot_energy_bar(self, ax=None, mains=None): """Plot a stacked bar of the energy per meter, in order. Parameters ---------- ax : matplotlib axes mains : MeterGroup or ElecMeter, optional Used to calculate Remainder. Returns ------- ax """ energy = self.energy_per_meter(mains=mains, per_period='D', use_meter_labels=True) energy.sort(ascending=False) # Plot ax = pd.DataFrame(energy).T.plot(kind='bar', stacked=True, grid=True, edgecolor="none", legend=False, width=2) ax.set_xticks([]) ax.set_ylabel('kWh\nper\nday', rotation=0, ha='center', va='center', labelpad=15) cumsum = energy.cumsum() text_ys = cumsum - (cumsum.diff().fillna(energy['Remainder']) / 2) for kwh, (label, y) in zip(energy.values, text_ys.iteritems()): label += " ({:.2f})".format(kwh) ax.annotate(label, (0, y), color='white', size=8, horizontalalignment='center', verticalalignment='center') return ax def plot_multiple(self, axes, meter_keys, plot_func, kwargs_per_meter=None, pretty_label=True, **kwargs): """Create multiple subplots. Parameters ----------- axes : list of matplotlib axes objects. e.g. created using `fix, axes = plt.subplots()` meter_keys : list of keys for identifying ElecMeters or MeterGroups. e.g. ['fridge', 'kettle', 4, MeterGroupID, ElecMeterID]. Each element is anything that MeterGroup.__getitem__() accepts. plot_func : string Name of function from ElecMeter or Electric or MeterGroup e.g. `plot_power_histogram` kwargs_per_meter : dict Provide key word arguments for the plot_func for each meter. each key is a parameter name for plot_func each value is a list (same length as `meters`) for specifying a value for this parameter for each meter. e.g. {'range': [(0,100), (0,200)]} pretty_label : bool **kwargs : any key word arguments to pass the same values to the plot func for every meter. Returns ------- axes (flattened into a 1D list) """ axes = flatten_2d_list(axes) if len(axes) != len(meter_keys): raise ValueError("`axes` and `meters` must be of equal length.") if kwargs_per_meter is None: kwargs_per_meter = {} meters = [self[meter_key] for meter_key in meter_keys] for i, (ax, meter) in enumerate(zip(axes, meters)): kwargs_copy = deepcopy(kwargs) for parameter, arguments in kwargs_per_meter.iteritems(): kwargs_copy[parameter] = arguments[i] getattr(meter, plot_func)(ax=ax, **kwargs_copy) ax.set_title(meter.label(pretty=pretty_label)) return axes def sort_meters(self): """Sorts meters by instance.""" self.meters.sort(key=meter_sorting_key) def label(self, **kwargs): """ Returns ------- string : A label listing all the appliance types. """ if self.name: label = self.name if kwargs.get('pretty'): label = capitalise_first_letter(label) return label return ", ".join(set([meter.label(**kwargs) for meter in self.meters])) def clear_cache(self): """Clear cache on all meters in this MeterGroup.""" for meter in self.meters: meter.clear_cache() def correlation_of_sum_of_submeters_with_mains(self, **load_kwargs): print("Running MeterGroup.correlation_of_sum_of_submeters_with_mains...") submeters = self.meters_directly_downstream_of_mains() return self.mains().correlation(submeters, **load_kwargs) def all_meters(self): """Returns a list of self.meters + self.disabled_meters.""" return self.meters + self.disabled_meters def describe(self, compute_expensive_stats=True, **kwargs): """Returns pd.Series describing this MeterGroup.""" series = pd.Series() all_meters = self.all_meters() series['total_n_meters'] = len(all_meters) site_meters = [m for m in all_meters if m.is_site_meter()] series['total_n_site_meters'] = len(site_meters) if compute_expensive_stats: series['correlation_of_sum_of_submeters_with_mains'] = ( self.correlation_of_sum_of_submeters_with_mains(**kwargs)) series['proportion_of_energy_submetered'] = ( self.proportion_of_energy_submetered(**kwargs)) dropout_rates = self._collect_stats_on_all_meters( kwargs, 'dropout_rate', False) dropout_rates = np.array(dropout_rates) series['dropout_rates_ignoring_gaps'] = ( "min={}, mean={}, max={}".format( dropout_rates.min(), dropout_rates.mean(), dropout_rates.max())) series['mains_sample_period'] = self.mains().sample_period() series['submeter_sample_period'] = self.submeters().sample_period() timeframe = self.get_timeframe() series['timeframe'] = "start={}, end={}".format(timeframe.start, timeframe.end) series['total_duration'] = str(timeframe.timedelta) mains_uptime = self.mains().uptime(**kwargs) series['mains_uptime'] = str(mains_uptime) try: series['proportion_uptime'] = (mains_uptime.total_seconds() / timeframe.timedelta.total_seconds()) except ZeroDivisionError: series['proportion_uptime'] = np.NaN series['average_mains_energy_per_day'] = self.mains().average_energy_per_period() return series def replace_dataset(identifier, dataset): """ Parameters ---------- identifier : ElecMeterID or MeterGroupID Returns ------- ElecMeterID or MeterGroupID with dataset replaced with `dataset` """ if isinstance(identifier, MeterGroupID): new_meter_ids = [replace_dataset(id, dataset) for id in identifier.meters] new_id = MeterGroupID(meters=tuple(new_meter_ids)) elif isinstance(identifier, ElecMeterID): new_id = identifier._replace(dataset=dataset) else: raise TypeError() return new_id def iterate_through_submeters_of_two_metergroups(master, slave): """ Parameters ---------- master, slave : MeterGroup Returns ------- list of 2-tuples of the form (`master_meter`, `slave_meter`) """ zipped = [] for master_meter in master.submeters().meters: slave_identifier = replace_dataset(master_meter.identifier, slave.dataset()) slave_meter = slave[slave_identifier] zipped.append((master_meter, slave_meter)) return zipped def combine_chunks_from_generators(index, columns, meters, kwargs): """Combines chunks into a single DataFrame. Adds or averages columns, depending on whether each column is in PHYSICAL_QUANTITIES_TO_AVERAGE. Returns ------- DataFrame """ # Regarding columns (e.g. voltage) that we need to average: # The approach is that we first add everything together # in the first for-loop, whilst also keeping a # `columns_to_average_counter` DataFrame # which tells us what to divide by in order to compute the # mean for PHYSICAL_QUANTITIES_TO_AVERAGE. # Regarding doing an in-place addition: # We convert out cumulator dataframe to a numpy matrix. # This allows us to use np.add to do an in-place add. # If we didn't do this then we'd get horrible memory fragmentation. # See http://stackoverflow.com/a/27526721/732596 DTYPE = np.float32 cumulator = pd.DataFrame(np.NaN, index=index, columns=columns, dtype=DTYPE) cumulator_arr = cumulator.as_matrix() columns_to_average_counter = pd.DataFrame(dtype=np.uint16) timeframe = None # Go through each generator to try sum values together for meter in meters: print_on_line("\rLoading data for meter", meter.identifier, " ") kwargs_copy = deepcopy(kwargs) generator = meter.load(**kwargs_copy) try: chunk_from_next_meter = generator.next() except StopIteration: continue del generator del kwargs_copy gc.collect() if chunk_from_next_meter.empty or not chunk_from_next_meter.timeframe: continue if timeframe is None: timeframe = chunk_from_next_meter.timeframe else: timeframe = timeframe.union(chunk_from_next_meter.timeframe) # Add (in-place) for i, column_name in enumerate(columns): try: column = chunk_from_next_meter[column_name] except KeyError: continue aligned = column.reindex(index, copy=False).values del column cumulator_col = cumulator_arr[:,i] where_both_are_nan = np.isnan(cumulator_col) & np.isnan(aligned) np.nansum([cumulator_col, aligned], axis=0, out=cumulator_col, dtype=DTYPE) cumulator_col[where_both_are_nan] = np.NaN del aligned del where_both_are_nan gc.collect() # Update columns_to_average_counter - this is necessary so we do not # add up columns like 'voltage' which should be averaged. physical_quantities = chunk_from_next_meter.columns.get_level_values('physical_quantity') columns_to_average = (set(PHYSICAL_QUANTITIES_TO_AVERAGE) .intersection(physical_quantities)) if columns_to_average: counter_increment = pd.DataFrame(1, columns=columns_to_average, dtype=np.uint16, index=chunk_from_next_meter.index) columns_to_average_counter = columns_to_average_counter.add( counter_increment, fill_value=0) del counter_increment del chunk_from_next_meter gc.collect() del cumulator_arr gc.collect() # Create mean values by dividing any columns which need dividing for column in columns_to_average_counter: cumulator[column] /= columns_to_average_counter[column] del columns_to_average_counter gc.collect() print() print("Done loading data all meters for this chunk.") cumulator.timeframe = timeframe return cumulator meter_sorting_key = lambda meter: meter.instance()
apache-2.0
akrherz/iem
htdocs/plotting/auto/scripts100/p120.py
1
5184
"""last spring temp""" import datetime from pandas.io.sql import read_sql import pandas as pd import matplotlib.dates as mdates from pyiem.plot import figure_axes from pyiem.util import get_autoplot_context, get_dbconn from pyiem.exceptions import NoDataFound def get_description(): """ Return a dict describing how to call this plotter """ desc = dict() desc["data"] = True desc["report"] = True desc[ "description" ] = """This chart presents the accumulated frequency of having the last spring temperature at or below a given threshold.""" desc["arguments"] = [ dict( type="station", name="station", default="IATDSM", label="Select Station", network="IACLIMATE", ), dict(type="int", name="t1", default=32, label="First Threshold (F)"), dict(type="int", name="t2", default=28, label="Second Threshold (F)"), dict(type="int", name="t3", default=26, label="Third Threshold (F)"), dict(type="int", name="t4", default=22, label="Fourth Threshold (F)"), dict( type="year", name="syear", min=1880, label="Potential (if data exists) minimum year", default=1880, ), dict( type="year", name="eyear", min=1880, label="Potential (if data exists) exclusive maximum year", default=datetime.date.today().year, ), ] return desc def plotter(fdict): """ Go """ pgconn = get_dbconn("coop") ctx = get_autoplot_context(fdict, get_description()) station = ctx["station"] thresholds = [ctx["t1"], ctx["t2"], ctx["t3"], ctx["t4"]] table = "alldata_%s" % (station[:2],) # Load up dict of dates.. df = pd.DataFrame( { "dates": pd.date_range("2000/01/29", "2000/06/30"), "%scnts" % (thresholds[0],): 0, "%scnts" % (thresholds[1],): 0, "%scnts" % (thresholds[2],): 0, "%scnts" % (thresholds[3],): 0, }, index=range(29, 183), ) df.index.name = "doy" for base in thresholds: # Query Last doy for each year in archive df2 = read_sql( f""" select year, max(case when low <= %s then extract(doy from day) else 0 end) as doy from {table} WHERE month < 7 and station = %s and year > %s and year < %s GROUP by year """, pgconn, params=(base, station, ctx["syear"], ctx["eyear"]), index_col=None, ) for _, row in df2.iterrows(): if row["doy"] == 0: continue df.loc[0 : row["doy"], "%scnts" % (base,)] += 1 df["%sfreq" % (base,)] = ( df["%scnts" % (base,)] / len(df2.index) * 100.0 ) bs = ctx["_nt"].sts[station]["archive_begin"] if bs is None: raise NoDataFound("No metadata found.") res = """\ # IEM Climodat https://mesonet.agron.iastate.edu/climodat/ # Report Generated: %s # Climate Record: %s -> %s # Site Information: [%s] %s # Contact Information: Daryl Herzmann akrherz@iastate.edu 515.294.5978 # Low Temperature exceedence probabilities # (On a certain date, what is the chance a temperature below a certain # threshold would be observed again that spring season) DOY Date <%s <%s <%s <%s """ % ( datetime.date.today().strftime("%d %b %Y"), max([bs.date(), datetime.date(ctx["syear"], 1, 1)]), min([datetime.date.today(), datetime.date(ctx["eyear"] - 1, 12, 31)]), station, ctx["_nt"].sts[station]["name"], thresholds[0] + 1, thresholds[1] + 1, thresholds[2] + 1, thresholds[3] + 1, ) fcols = ["%sfreq" % (s,) for s in thresholds] mindate = None for doy, row in df.iterrows(): if doy % 2 != 0: continue if row[fcols[3]] < 100 and mindate is None: mindate = row["dates"] - datetime.timedelta(days=5) res += (" %3s %s %3i %3i %3i %3i\n") % ( row["dates"].strftime("%-j"), row["dates"].strftime("%b %d"), row[fcols[0]], row[fcols[1]], row[fcols[2]], row[fcols[3]], ) title = "Frequency of Last Spring Temperature" subtitle = "%s %s (%s-%s)" % ( station, ctx["_nt"].sts[station]["name"], max([bs.date(), datetime.date(ctx["syear"], 1, 1)]), min([datetime.date.today(), datetime.date(ctx["eyear"] - 1, 12, 31)]), ) (fig, ax) = figure_axes(title=title, subtitle=subtitle) for base in thresholds: ax.plot( df["dates"].values, df["%sfreq" % (base,)], label="%s" % (base,), lw=2, ) ax.legend(loc="best") ax.set_xlim(mindate) ax.xaxis.set_major_locator(mdates.DayLocator([1, 7, 14, 21])) ax.xaxis.set_major_formatter(mdates.DateFormatter("%-d\n%b")) ax.grid(True) df.reset_index(inplace=True) return fig, df, res if __name__ == "__main__": plotter(dict())
mit
meduz/scikit-learn
examples/linear_model/plot_ols_3d.py
350
2040
#!/usr/bin/python # -*- coding: utf-8 -*- """ ========================================================= Sparsity Example: Fitting only features 1 and 2 ========================================================= Features 1 and 2 of the diabetes-dataset are fitted and plotted below. It illustrates that although feature 2 has a strong coefficient on the full model, it does not give us much regarding `y` when compared to just feature 1 """ print(__doc__) # Code source: Gaël Varoquaux # Modified for documentation by Jaques Grobler # License: BSD 3 clause import matplotlib.pyplot as plt import numpy as np from mpl_toolkits.mplot3d import Axes3D from sklearn import datasets, linear_model diabetes = datasets.load_diabetes() indices = (0, 1) X_train = diabetes.data[:-20, indices] X_test = diabetes.data[-20:, indices] y_train = diabetes.target[:-20] y_test = diabetes.target[-20:] ols = linear_model.LinearRegression() ols.fit(X_train, y_train) ############################################################################### # Plot the figure def plot_figs(fig_num, elev, azim, X_train, clf): fig = plt.figure(fig_num, figsize=(4, 3)) plt.clf() ax = Axes3D(fig, elev=elev, azim=azim) ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+') ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]), np.array([[-.1, .15], [-.1, .15]]), clf.predict(np.array([[-.1, -.1, .15, .15], [-.1, .15, -.1, .15]]).T ).reshape((2, 2)), alpha=.5) ax.set_xlabel('X_1') ax.set_ylabel('X_2') ax.set_zlabel('Y') ax.w_xaxis.set_ticklabels([]) ax.w_yaxis.set_ticklabels([]) ax.w_zaxis.set_ticklabels([]) #Generate the three different figures from different views elev = 43.5 azim = -110 plot_figs(1, elev, azim, X_train, ols) elev = -.5 azim = 0 plot_figs(2, elev, azim, X_train, ols) elev = -.5 azim = 90 plot_figs(3, elev, azim, X_train, ols) plt.show()
bsd-3-clause
sysid/kg
quora/Ensemble_CNN_TD_Quora.py
1
12948
# coding: utf-8 # In[1]: import pandas as pd import numpy as np import nltk from nltk.corpus import stopwords from nltk.stem import SnowballStemmer import re from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt # In[2]: train = pd.read_csv("../input/train.csv") test = pd.read_csv("../input/test.csv") # In[3]: train.head() # In[4]: test.head() # In[5]: print(train.shape) print(test.shape) # In[6]: print(train.isnull().sum()) print(test.isnull().sum()) # In[7]: train = train.fillna('empty') test = test.fillna('empty') # In[8]: print(train.isnull().sum()) print(test.isnull().sum()) # In[9]: test.head() # In[10]: for i in range(6): print(train.question1[i]) print(train.question2[i]) print() # In[17]: def text_to_wordlist(text, remove_stopwords=False, stem_words=False): # Clean the text, with the option to remove stopwords and to stem words. # Convert words to lower case and split them text = text.lower().split() # Optionally remove stop words (true by default) if remove_stopwords: stops = set(stopwords.words("english")) text = [w for w in text if not w in stops] text = " ".join(text) # Clean the text text = re.sub(r"[^A-Za-z0-9^,!.\'+-=]", " ", text) text = re.sub(r"\'s", " 's ", text) text = re.sub(r"\'ve", " have ", text) text = re.sub(r"can't", " cannot ", text) text = re.sub(r"n't", " not ", text) text = re.sub(r"\'re", " are ", text) text = re.sub(r"\'d", " would ", text) text = re.sub(r"\'ll", " will ", text) text = re.sub(r",", " ", text) text = re.sub(r"\.", " ", text) text = re.sub(r"!", " ! ", text) text = re.sub(r"\^", " ^ ", text) text = re.sub(r"\+", " + ", text) text = re.sub(r"\-", " - ", text) text = re.sub(r"\=", " = ", text) text = re.sub(r"\s{2,}", " ", text) # Shorten words to their stems if stem_words: text = text.split() stemmer = SnowballStemmer('english') stemmed_words = [stemmer.stem(word) for word in text] text = " ".join(stemmed_words) # Return a list of words return(text) # In[18]: def process_questions(question_list, questions, question_list_name, dataframe): # function to transform questions and display progress for question in questions: question_list.append(text_to_wordlist(question)) if len(question_list) % 100000 == 0: progress = len(question_list)/len(dataframe) * 100 print("{} is {}% complete.".format(question_list_name, round(progress, 1))) # In[19]: train_question1 = [] process_questions(train_question1, train.question1, 'train_question1', train) # In[35]: train_question2 = [] process_questions(train_question2, train.question2, 'train_question2', train) # In[36]: test_question1 = [] process_questions(test_question1, test.question1, 'test_question1', test) # In[37]: test_question2 = [] process_questions(test_question2, test.question2, 'test_question2', test) # # Using Keras # In[38]: from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences import datetime, time, json from keras.models import Sequential from keras.layers import Embedding, Dense, Dropout, Reshape, Merge, BatchNormalization, TimeDistributed, Lambda, Activation, LSTM, Flatten, Bidirectional, Convolution1D, GRU, MaxPooling1D, Convolution2D from keras.regularizers import l2 from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping from keras import backend as K from sklearn.model_selection import train_test_split from keras.optimizers import SGD from collections import defaultdict # In[39]: # Count the number of different words in the reviews word_count = defaultdict(int) for question in train_question1: word_count[question] += 1 print("train_question1 is complete.") for question in train_question2: word_count[question] += 1 print("train_question2 is complete") for question in test_question1: word_count[question] += 1 print("test_question1 is complete.") for question in test_question2: word_count[question] += 1 print("test_question2 is complete") print("Total number of unique words:", len(word_count)) # In[40]: # Find the length of questions lengths = [] for question in train_question1: lengths.append(len(question.split())) for question in train_question2: lengths.append(len(question.split())) # Create a dataframe so that the values can be inspected lengths = pd.DataFrame(lengths, columns=['counts']) # In[41]: lengths.counts.describe() # In[42]: np.percentile(lengths.counts, 99.5) # In[43]: num_words = 200000 train_questions = train_question1 + train_question2 tokenizer = Tokenizer(nb_words = num_words) tokenizer.fit_on_texts(train_questions) print("Fitting is compelte.") train_question1_word_sequences = tokenizer.texts_to_sequences(train_question1) print("train_question1 is complete.") train_question2_word_sequences = tokenizer.texts_to_sequences(train_question2) print("train_question2 is complete") # In[44]: test_question1_word_sequences = tokenizer.texts_to_sequences(test_question1) print("test_question1 is complete.") test_question2_word_sequences = tokenizer.texts_to_sequences(test_question2) print("test_question2 is complete.") # In[45]: word_index = tokenizer.word_index print("Words in index: %d" % len(word_index)) # In[46]: # Pad the questions so that they all have the same length. max_question_len = 37 train_q1 = pad_sequences(train_question1_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("train_q1 is complete.") train_q2 = pad_sequences(train_question2_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("train_q2 is complete.") # In[47]: test_q1 = pad_sequences(test_question1_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("test_q1 is complete.") test_q2 = pad_sequences(test_question2_word_sequences, maxlen = max_question_len, padding = 'post', truncating = 'post') print("test_q2 is complete.") # In[48]: y_train = train.is_duplicate # In[49]: # Load GloVe to use pretrained vectors # From this link: https://nlp.stanford.edu/projects/glove/ embeddings_index = {} with open('glove.840B.300d.txt', encoding='utf-8') as f: for line in f: values = line.split(' ') word = values[0] embedding = np.asarray(values[1:], dtype='float32') embeddings_index[word] = embedding print('Word embeddings:', len(embeddings_index)) # In[50]: # Need to use 300 for embedding dimensions to match GloVe vectors. embedding_dim = 300 nb_words = len(word_index) word_embedding_matrix = np.zeros((nb_words + 1, embedding_dim)) for word, i in word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. word_embedding_matrix[i] = embedding_vector print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0)) # In[66]: units = 150 dropout = 0.25 nb_filter = 32 filter_length = 3 embedding_dim = 300 model1 = Sequential() model1.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model1.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model1.add(BatchNormalization()) model1.add(Activation('relu')) model1.add(Dropout(dropout)) model1.add(Flatten()) model2 = Sequential() model2.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model2.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Convolution1D(nb_filter = nb_filter, filter_length = filter_length, border_mode = 'same')) model2.add(BatchNormalization()) model2.add(Activation('relu')) model2.add(Dropout(dropout)) model2.add(Flatten()) model3 = Sequential() model3.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model3.add(TimeDistributed(Dense(embedding_dim))) model3.add(BatchNormalization()) model3.add(Activation('relu')) model3.add(Dropout(dropout)) model3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, ))) model4 = Sequential() model4.add(Embedding(nb_words + 1, embedding_dim, weights = [word_embedding_matrix], input_length = max_question_len, trainable = False)) model4.add(TimeDistributed(Dense(embedding_dim))) model4.add(BatchNormalization()) model4.add(Activation('relu')) model4.add(Dropout(dropout)) model4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, ))) modela = Sequential() modela.add(Merge([model1, model2], mode='concat')) modela.add(Dense(units)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modela.add(Dense(units)) modela.add(BatchNormalization()) modela.add(Activation('relu')) modela.add(Dropout(dropout)) modelb = Sequential() modelb.add(Merge([model3, model4], mode='concat')) modelb.add(Dense(units)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) modelb.add(Dense(units)) modelb.add(BatchNormalization()) modelb.add(Activation('relu')) modelb.add(Dropout(dropout)) model = Sequential() model.add(Merge([modela, modelb], mode='concat')) model.add(Dense(units)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(units)) model.add(BatchNormalization()) model.add(Activation('relu')) model.add(Dropout(dropout)) model.add(Dense(1)) model.add(BatchNormalization()) model.add(Activation('sigmoid')) #sgd = SGD(lr=0.01, decay=5e-6, momentum=0.9, nesterov=True) model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) # In[67]: save_best_weights = 'question_pairs_weights.h5' t0 = time.time() callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True), EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')] history = model.fit([train_q1, train_q2], y_train, batch_size=200, nb_epoch=100, validation_split=0.1, verbose=True, shuffle=True, callbacks=callbacks) t1 = time.time() print("Minutes elapsed: %f" % ((t1 - t0) / 60.)) # In[68]: summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ], 'train_acc': history.history['acc'], 'valid_acc': history.history['val_acc'], 'train_loss': history.history['loss'], 'valid_loss': history.history['val_loss']}) # In[69]: summary_stats # In[70]: plt.plot(summary_stats.train_loss) plt.plot(summary_stats.valid_loss) plt.show() # In[71]: min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss'])) print('Minimum loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss)) min_loss = round(min_loss, 4) # In[72]: model.load_weights(save_best_weights) predictions = model.predict([test_q1, test_q2], verbose = True) # In[73]: #Create submission submission = pd.DataFrame(predictions, columns=['is_duplicate']) submission.insert(0, 'test_id', test.test_id) file_name = 'submission_{}.csv'.format(min_loss) submission.to_csv(file_name, index=False) # In[74]: submission.head(10)
mit
CVL-dev/cvl-fabric-launcher
pyinstaller-2.1/PyInstaller/loader/rthooks/pyi_rth_mplconfig.py
10
1430
#----------------------------------------------------------------------------- # Copyright (c) 2013, PyInstaller Development Team. # # Distributed under the terms of the GNU General Public License with exception # for distributing bootloader. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- # matplotlib will create $HOME/.matplotlib folder in user's home directory. # In this directory there is fontList.cache file which lists paths # to matplotlib fonts. # # When you run your onefile exe for the first time it's extracted to for example # "_MEIxxxxx" temp directory and fontList.cache file is created with fonts paths # pointing to this directory. # # Second time you run your exe new directory is created "_MEIyyyyy" but # fontList.cache file still points to previous directory which was deleted. # And then you will get error like: # # RuntimeError: Could not open facefile # # We need to force matplotlib to recreate config directory every time you run # your app. import atexit import os import shutil import tempfile # Put matplot config dir to temp directory. configdir = tempfile.mkdtemp() os.environ['MPLCONFIGDIR'] = configdir try: # Remove temp directory at application exit and ignore any errors. atexit.register(shutil.rmtree, configdir, ignore_errors=True) except OSError: pass
gpl-3.0
Hiyorimi/scikit-image
skimage/future/graph/rag.py
5
19594
import networkx as nx import numpy as np from numpy.lib.stride_tricks import as_strided from scipy import ndimage as ndi from scipy import sparse import math from ... import measure, segmentation, util, color from matplotlib import colors, cm from matplotlib import pyplot as plt from matplotlib.collections import LineCollection def _edge_generator_from_csr(csr_matrix): """Yield weighted edge triples for use by NetworkX from a CSR matrix. This function is a straight rewrite of `networkx.convert_matrix._csr_gen_triples`. Since that is a private function, it is safer to include our own here. Parameters ---------- csr_matrix : scipy.sparse.csr_matrix The input matrix. An edge (i, j, w) will be yielded if there is a data value for coordinates (i, j) in the matrix, even if that value is 0. Yields ------ i, j, w : (int, int, float) tuples Each value `w` in the matrix along with its coordinates (i, j). Examples -------- >>> dense = np.eye(2, dtype=np.float) >>> csr = sparse.csr_matrix(dense) >>> edges = _edge_generator_from_csr(csr) >>> list(edges) [(0, 0, 1.0), (1, 1, 1.0)] """ nrows = csr_matrix.shape[0] values = csr_matrix.data indptr = csr_matrix.indptr col_indices = csr_matrix.indices for i in range(nrows): for j in range(indptr[i], indptr[i + 1]): yield i, col_indices[j], values[j] def min_weight(graph, src, dst, n): """Callback to handle merging nodes by choosing minimum weight. Returns a dictionary with `"weight"` set as either the weight between (`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when both exist. Parameters ---------- graph : RAG The graph under consideration. src, dst : int The verices in `graph` to be merged. n : int A neighbor of `src` or `dst` or both. Returns ------- data : dict A dict with the `"weight"` attribute set the weight between (`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when both exist. """ # cover the cases where n only has edge to either `src` or `dst` default = {'weight': np.inf} w1 = graph[n].get(src, default)['weight'] w2 = graph[n].get(dst, default)['weight'] return {'weight': min(w1, w2)} def _add_edge_filter(values, graph): """Create edge in `graph` between central element of `values` and the rest. Add an edge between the middle element in `values` and all other elements of `values` into `graph`. ``values[len(values) // 2]`` is expected to be the central value of the footprint used. Parameters ---------- values : array The array to process. graph : RAG The graph to add edges in. Returns ------- 0 : float Always returns 0. The return value is required so that `generic_filter` can put it in the output array, but it is ignored by this filter. """ values = values.astype(int) center = values[len(values) // 2] for value in values: if value != center and not graph.has_edge(center, value): graph.add_edge(center, value) return 0. class RAG(nx.Graph): """ The Region Adjacency Graph (RAG) of an image, subclasses `networx.Graph <http://networkx.github.io/documentation/latest/reference/classes.graph.html>`_ Parameters ---------- label_image : array of int An initial segmentation, with each region labeled as a different integer. Every unique value in ``label_image`` will correspond to a node in the graph. connectivity : int in {1, ..., ``label_image.ndim``}, optional The connectivity between pixels in ``label_image``. For a 2D image, a connectivity of 1 corresponds to immediate neighbors up, down, left, and right, while a connectivity of 2 also includes diagonal neighbors. See `scipy.ndimage.generate_binary_structure`. data : networkx Graph specification, optional Initial or additional edges to pass to the NetworkX Graph constructor. See `networkx.Graph`. Valid edge specifications include edge list (list of tuples), NumPy arrays, and SciPy sparse matrices. **attr : keyword arguments, optional Additional attributes to add to the graph. """ def __init__(self, label_image=None, connectivity=1, data=None, **attr): super(RAG, self).__init__(data, **attr) if self.number_of_nodes() == 0: self.max_id = 0 else: self.max_id = max(self.nodes_iter()) if label_image is not None: fp = ndi.generate_binary_structure(label_image.ndim, connectivity) # In the next ``ndi.generic_filter`` function, the kwarg # ``output`` is used to provide a strided array with a single # 64-bit floating point number, to which the function repeatedly # writes. This is done because even if we don't care about the # output, without this, a float array of the same shape as the # input image will be created and that could be expensive in # memory consumption. ndi.generic_filter( label_image, function=_add_edge_filter, footprint=fp, mode='nearest', output=as_strided(np.empty((1,), dtype=np.float_), shape=label_image.shape, strides=((0,) * label_image.ndim)), extra_arguments=(self,)) def merge_nodes(self, src, dst, weight_func=min_weight, in_place=True, extra_arguments=[], extra_keywords={}): """Merge node `src` and `dst`. The new combined node is adjacent to all the neighbors of `src` and `dst`. `weight_func` is called to decide the weight of edges incident on the new node. Parameters ---------- src, dst : int Nodes to be merged. weight_func : callable, optional Function to decide the attributes of edges incident on the new node. For each neighbor `n` for `src and `dst`, `weight_func` will be called as follows: `weight_func(src, dst, n, *extra_arguments, **extra_keywords)`. `src`, `dst` and `n` are IDs of vertices in the RAG object which is in turn a subclass of `networkx.Graph`. It is expected to return a dict of attributes of the resulting edge. in_place : bool, optional If set to `True`, the merged node has the id `dst`, else merged node has a new id which is returned. extra_arguments : sequence, optional The sequence of extra positional arguments passed to `weight_func`. extra_keywords : dictionary, optional The dict of keyword arguments passed to the `weight_func`. Returns ------- id : int The id of the new node. Notes ----- If `in_place` is `False` the resulting node has a new id, rather than `dst`. """ src_nbrs = set(self.neighbors(src)) dst_nbrs = set(self.neighbors(dst)) neighbors = (src_nbrs | dst_nbrs) - set([src, dst]) if in_place: new = dst else: new = self.next_id() self.add_node(new) for neighbor in neighbors: data = weight_func(self, src, new, neighbor, *extra_arguments, **extra_keywords) self.add_edge(neighbor, new, attr_dict=data) self.node[new]['labels'] = (self.node[src]['labels'] + self.node[dst]['labels']) self.remove_node(src) if not in_place: self.remove_node(dst) return new def add_node(self, n, attr_dict=None, **attr): """Add node `n` while updating the maximum node id. .. seealso:: :func:`networkx.Graph.add_node`.""" super(RAG, self).add_node(n, attr_dict, **attr) self.max_id = max(n, self.max_id) def add_edge(self, u, v, attr_dict=None, **attr): """Add an edge between `u` and `v` while updating max node id. .. seealso:: :func:`networkx.Graph.add_edge`.""" super(RAG, self).add_edge(u, v, attr_dict, **attr) self.max_id = max(u, v, self.max_id) def copy(self): """Copy the graph with its max node id. .. seealso:: :func:`networkx.Graph.copy`.""" g = super(RAG, self).copy() g.max_id = self.max_id return g def next_id(self): """Returns the `id` for the new node to be inserted. The current implementation returns one more than the maximum `id`. Returns ------- id : int The `id` of the new node to be inserted. """ return self.max_id + 1 def _add_node_silent(self, n): """Add node `n` without updating the maximum node id. This is a convenience method used internally. .. seealso:: :func:`networkx.Graph.add_node`.""" super(RAG, self).add_node(n) def rag_mean_color(image, labels, connectivity=2, mode='distance', sigma=255.0): """Compute the Region Adjacency Graph using mean colors. Given an image and its initial segmentation, this method constructs the corresponding Region Adjacency Graph (RAG). Each node in the RAG represents a set of pixels within `image` with the same label in `labels`. The weight between two adjacent regions represents how similar or dissimilar two regions are depending on the `mode` parameter. Parameters ---------- image : ndarray, shape(M, N, [..., P,] 3) Input image. labels : ndarray, shape(M, N, [..., P,]) The labelled image. This should have one dimension less than `image`. If `image` has dimensions `(M, N, 3)` `labels` should have dimensions `(M, N)`. connectivity : int, optional Pixels with a squared distance less than `connectivity` from each other are considered adjacent. It can range from 1 to `labels.ndim`. Its behavior is the same as `connectivity` parameter in `scipy.ndimage.generate_binary_structure`. mode : {'distance', 'similarity'}, optional The strategy to assign edge weights. 'distance' : The weight between two adjacent regions is the :math:`|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean colors of the two regions. It represents the Euclidean distance in their average color. 'similarity' : The weight between two adjacent is :math:`e^{-d^2/sigma}` where :math:`d=|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean colors of the two regions. It represents how similar two regions are. sigma : float, optional Used for computation when `mode` is "similarity". It governs how close to each other two colors should be, for their corresponding edge weight to be significant. A very large value of `sigma` could make any two colors behave as though they were similar. Returns ------- out : RAG The region adjacency graph. Examples -------- >>> from skimage import data, segmentation >>> from skimage.future import graph >>> img = data.astronaut() >>> labels = segmentation.slic(img) >>> rag = graph.rag_mean_color(img, labels) References ---------- .. [1] Alain Tremeau and Philippe Colantoni "Regions Adjacency Graph Applied To Color Image Segmentation" http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274 """ graph = RAG(labels, connectivity=connectivity) for n in graph: graph.node[n].update({'labels': [n], 'pixel count': 0, 'total color': np.array([0, 0, 0], dtype=np.double)}) for index in np.ndindex(labels.shape): current = labels[index] graph.node[current]['pixel count'] += 1 graph.node[current]['total color'] += image[index] for n in graph: graph.node[n]['mean color'] = (graph.node[n]['total color'] / graph.node[n]['pixel count']) for x, y, d in graph.edges_iter(data=True): diff = graph.node[x]['mean color'] - graph.node[y]['mean color'] diff = np.linalg.norm(diff) if mode == 'similarity': d['weight'] = math.e ** (-(diff ** 2) / sigma) elif mode == 'distance': d['weight'] = diff else: raise ValueError("The mode '%s' is not recognised" % mode) return graph def rag_boundary(labels, edge_map, connectivity=2): """ Comouter RAG based on region boundaries Given an image's initial segmentation and its edge map this method constructs the corresponding Region Adjacency Graph (RAG). Each node in the RAG represents a set of pixels within the image with the same label in `labels`. The weight between two adjacent regions is the average value in `edge_map` along their boundary. labels : ndarray The labelled image. edge_map : ndarray This should have the same shape as that of `labels`. For all pixels along the boundary between 2 adjacent regions, the average value of the corresponding pixels in `edge_map` is the edge weight between them. connectivity : int, optional Pixels with a squared distance less than `connectivity` from each other are considered adjacent. It can range from 1 to `labels.ndim`. Its behavior is the same as `connectivity` parameter in `scipy.ndimage.filters.generate_binary_structure`. Examples -------- >>> from skimage import data, segmentation, filters, color >>> from skimage.future import graph >>> img = data.chelsea() >>> labels = segmentation.slic(img) >>> edge_map = filters.sobel(color.rgb2gray(img)) >>> rag = graph.rag_boundary(labels, edge_map) """ conn = ndi.generate_binary_structure(labels.ndim, connectivity) eroded = ndi.grey_erosion(labels, footprint=conn) dilated = ndi.grey_dilation(labels, footprint=conn) boundaries0 = (eroded != labels) boundaries1 = (dilated != labels) labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1])) labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1])) n = np.max(labels_large) + 1 # use a dummy broadcast array as data for RAG ones = as_strided(np.ones((1,), dtype=np.float), shape=labels_small.shape, strides=(0,)) count_matrix = sparse.coo_matrix((ones, (labels_small, labels_large)), dtype=np.int_, shape=(n, n)).tocsr() data = np.concatenate((edge_map[boundaries0], edge_map[boundaries1])) data_coo = sparse.coo_matrix((data, (labels_small, labels_large))) graph_matrix = data_coo.tocsr() graph_matrix.data /= count_matrix.data rag = RAG() rag.add_weighted_edges_from(_edge_generator_from_csr(graph_matrix), weight='weight') rag.add_weighted_edges_from(_edge_generator_from_csr(count_matrix), weight='count') for n in rag.nodes(): rag.node[n].update({'labels': [n]}) return rag def show_rag(labels, rag, img, border_color='black', edge_width=1.5, edge_cmap='magma', img_cmap='bone', in_place=True, ax=None): """Draw a Region Adjacency Graph on an image. Given a labelled image and its corresponding RAG, draw the nodes and edges of the RAG on the image with the specified colors. Edges are drawn between the centroid of the 2 adjacent regions in the image. Parameters ---------- labels : ndarray, shape (M, N) The labelled image. rag : RAG The Region Adjacency Graph. img : ndarray, shape (M, N[, 3]) Input image. If `colormap` is `None`, the image should be in RGB format. border_color : color spec, optional Color with which the borders between regions are drawn. edge_width : float, optional The thickness with which the RAG edges are drawn. edge_cmap : :py:class:`matplotlib.colors.Colormap`, optional Any matplotlib colormap with which the edges are drawn. img_cmap : :py:class:`matplotlib.colors.Colormap`, optional Any matplotlib colormap with which the image is draw. If set to `None` the image is drawn as it is. in_place : bool, optional If set, the RAG is modified in place. For each node `n` the function will set a new attribute ``rag.node[n]['centroid']``. ax : :py:class:`matplotlib.axes.Axes`, optional The axes to draw on. If not specified, new axes are created and drawn on. Returns ------- lc : :py:class:`matplotlib.collections.LineCollection` A colection of lines that represent the edges of the graph. It can be passed to the :meth:`matplotlib.figure.Figure.colorbar` function. Examples -------- >>> from skimage import data, segmentation >>> from skimage.future import graph >>> img = data.coffee() >>> labels = segmentation.slic(img) >>> g = graph.rag_mean_color(img, labels) >>> lc = graph.show_rag(labels, g, img) >>> cbar = plt.colorbar(lc) """ if not in_place: rag = rag.copy() if ax is None: fig, ax = plt.subplots() out = util.img_as_float(img, force_copy=True) if img_cmap is None: if img.ndim < 3 or img.shape[2] not in [3, 4]: msg = 'If colormap is `None`, an RGB or RGBA image should be given' raise ValueError(msg) # Ignore the alpha channel out = img[:, :, :3] else: img_cmap = cm.get_cmap(img_cmap) out = color.rgb2gray(img) # Ignore the alpha channel out = img_cmap(out)[:, :, :3] edge_cmap = cm.get_cmap(edge_cmap) # Handling the case where one node has multiple labels # offset is 1 so that regionprops does not ignore 0 offset = 1 map_array = np.arange(labels.max() + 1) for n, d in rag.nodes_iter(data=True): for label in d['labels']: map_array[label] = offset offset += 1 rag_labels = map_array[labels] regions = measure.regionprops(rag_labels) for (n, data), region in zip(rag.nodes_iter(data=True), regions): data['centroid'] = tuple(map(int, region['centroid'])) cc = colors.ColorConverter() if border_color is not None: border_color = cc.to_rgb(border_color) out = segmentation.mark_boundaries(out, rag_labels, color=border_color) ax.imshow(out) # Defining the end points of the edges # The tuple[::-1] syntax reverses a tuple as matplotlib uses (x,y) # convention while skimage uses (row, column) lines = [[rag.node[n1]['centroid'][::-1], rag.node[n2]['centroid'][::-1]] for (n1, n2) in rag.edges_iter()] lc = LineCollection(lines, linewidths=edge_width, cmap=edge_cmap) edge_weights = [d['weight'] for x, y, d in rag.edges_iter(data=True)] lc.set_array(np.array(edge_weights)) ax.add_collection(lc) return lc
bsd-3-clause
BhallaLab/moose-examples
traub_2005/py/test_singlecomp.py
2
7203
# test_singlecomp.py --- # # Filename: test_singlecomp.py # Description: # Author: Subhasis Ray # Maintainer: # Created: Tue Jul 17 21:01:14 2012 (+0530) # Version: # Last-Updated: Sun Jun 25 15:37:21 2017 (-0400) # By: subha # Update #: 320 # URL: # Keywords: # Compatibility: # # # Commentary: # # Test the ion channels with a single compartment. # # # Change log: # # 2012-07-17 22:22:23 (+0530) Tested NaF2 and NaPF_SS against neuron # test case. # # # Code: import os os.environ['NUMPTHREADS'] = '1' import uuid import unittest from datetime import datetime import sys sys.path.append('../../../python') import numpy as np from matplotlib import pyplot as plt import moose from testutils import * from nachans import * from kchans import * from archan import * from cachans import * from capool import * simdt = 0.25e-4 plotdt = 0.25e-4 simtime = 350e-3 erev = { 'K': -100e-3, 'Na': 50e-3, 'Ca': 125e-3, 'AR': -40e-3 } channel_density = { 'NaF2': 1500.0, 'NaPF_SS': 1.5, 'KDR_FS': 1000.0, 'KC_FAST': 100.0, 'KA': 300.0, 'KM': 37.5, 'K2': 1.0, 'KAHP_SLOWER': 1.0, 'CaL': 5.0, 'CaT_A': 1.0, 'AR': 2.5 } compartment_propeties = { 'length': 20e-6, 'diameter': 2e-6 * 7.5, 'initVm': -65e-3, 'Em': -65e-3, 'Rm': 5.0, 'Cm': 9e-3, 'Ra': 1.0, 'specific': True} stimulus = [[100e-3, 50e-3, 3e-10], # delay[0], width[0], level[0] [1e9, 0, 0]] def create_compartment(path, length, diameter, initVm, Em, Rm, Cm, Ra, specific=False): comp = moose.Compartment(path) comp.length = length comp.diameter = diameter comp.initVm = initVm comp.Em = Em if not specific: comp.Rm = Rm comp.Cm = Cm comp.Ra = Ra else: sarea = np.pi * length * diameter comp.Rm = Rm / sarea comp.Cm = Cm * sarea comp.Ra = 4.0 * Ra * length / (np.pi * diameter * diameter) return comp def insert_channel(compartment, channeclass, gbar, density=False): channel = moose.copy(channeclass.prototype, compartment)[0] if not density: channel.Gbar = gbar else: channel.Gbar = gbar * np.pi * compartment.length * compartment.diameter moose.connect(channel, 'channel', compartment, 'channel') return channel def insert_ca(compartment, phi, tau): ca = moose.copy(CaPool.prototype, compartment)[0] ca.B = phi / (np.pi * compartment.length * compartment.diameter) ca.tau = tau print( ca.path, ca.B, ca.tau) for chan in moose.wildcardFind('%s/#[TYPE=HHChannel]' % (compartment.path)): if chan.name.startswith('KC') or chan.name.startswith('KAHP'): moose.connect(ca, 'concOut', chan, 'concen') elif chan.name.startswith('CaL'): moose.connect(chan, 'IkOut', ca, 'current') else: continue moose.showfield(chan) return ca class TestSingleComp(unittest.TestCase): def setUp(self): self.testId = uuid.uuid4().int self.container = moose.Neutral('test%d' % (self.testId)) self.model = moose.Neutral('%s/model' % (self.container.path)) self.data = moose.Neutral('%s/data' % (self.container.path)) self.soma = create_compartment('%s/soma' % (self.model.path), **compartment_propeties) self.tables = {} tab = moose.Table('%s/Vm' % (self.data.path)) self.tables['Vm'] = tab moose.connect(tab, 'requestOut', self.soma, 'getVm') for channelname, conductance in list(channel_density.items()): chanclass = eval(channelname) channel = insert_channel(self.soma, chanclass, conductance, density=True) if issubclass(chanclass, KChannel): channel.Ek = erev['K'] elif issubclass(chanclass, NaChannel): channel.Ek = erev['Na'] elif issubclass(chanclass, CaChannel): channel.Ek = erev['Ca'] elif issubclass(chanclass, AR): channel.Ek = erev['AR'] tab = moose.Table('%s/%s' % (self.data.path, channelname)) moose.connect(tab, 'requestOut', channel, 'getGk') self.tables['Gk_'+channel.name] = tab archan = moose.HHChannel(self.soma.path + '/AR') archan.X = 0.0 ca = insert_ca(self.soma, 2.6e7, 50e-3) tab = moose.Table('%s/Ca' % (self.data.path)) self.tables['Ca'] = tab moose.connect(tab, 'requestOut', ca, 'getCa') self.pulsegen = moose.PulseGen('%s/inject' % (self.model.path)) moose.connect(self.pulsegen, 'output', self.soma, 'injectMsg') tab = moose.Table('%s/injection' % (self.data.path)) moose.connect(tab, 'requestOut', self.pulsegen, 'getOutputValue') self.tables['pulsegen'] = tab self.pulsegen.count = len(stimulus) for ii in range(len(stimulus)): self.pulsegen.delay[ii] = stimulus[ii][0] self.pulsegen.width[ii] = stimulus[ii][1] self.pulsegen.level[ii] = stimulus[ii][2] setup_clocks(simdt, plotdt) assign_clocks(self.model, self.data) moose.reinit() start = datetime.now() moose.start(simtime) end = datetime.now() delta = end - start print( 'Simulation of %g s finished in %g s' % (simtime, delta.seconds + delta.microseconds*1e-6)) def testDefault(self): vm_axis = plt.subplot(2,1,1) ca_axis = plt.subplot(2,1,2) try: fname = os.path.join(config.mydir, 'nrn', 'data', 'singlecomp_Vm.dat') nrndata = np.loadtxt(fname) vm_axis.plot(nrndata[:,0], nrndata[:,1], label='Vm (mV) - nrn') ca_axis.plot(nrndata[:,0], nrndata[:,2], label='Ca (mM) - nrn') except IOError as e: print(e) tseries = np.linspace(0, simtime, len(self.tables['Vm'].vector)) * 1e3 # plotcount = len(channel_density) + 1 # rows = int(np.sqrt(plotcount) + 0.5) # columns = int(plotcount * 1.0/rows + 0.5) # print plotcount, rows, columns # plt.subplot(rows, columns, 1) vm_axis.plot(tseries, self.tables['Vm'].vector * 1e3, label='Vm (mV) - moose') vm_axis.plot(tseries, self.tables['pulsegen'].vector * 1e12, label='inject (pA)') ca_axis.plot(tseries, self.tables['Ca'].vector, label='Ca (mM) - moose') vm_axis.legend() ca_axis.legend() # ii = 2 # for key, value in self.tables.items(): # if key.startswith('Gk'): # plt.subplot(rows, columns, ii) # plt.plot(tseries, value.vector, label=key) # ii += 1 # plt.legend() plt.show() data = np.vstack((tseries*1e-3, self.tables['Vm'].vector, self.tables['Ca'].vector)) np.savetxt(os.path.join(config.data_dir, 'singlecomp_Vm.dat'), np.transpose(data)) if __name__ == '__main__': unittest.main() # # test_singlecomp.py ends here
gpl-2.0
mantidproject/mantid
qt/python/mantidqt/gui_helper.py
3
5994
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source, # Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS # SPDX - License - Identifier: GPL - 3.0 + from qtpy.QtWidgets import (QApplication) # noqa from qtpy import QtCore, QtGui import matplotlib import sys import os try: from mantid import __version__ as __mtd_version from mantid import _bindir as __mtd_bin_dir # convert to major.minor __mtd_version = '.'.join(__mtd_version.split(".")[:2]) except ImportError: # mantid not found __mtd_version = '' __mtd_bin_dir='' def set_matplotlib_backend(): '''MUST be called before anything tries to use matplotlib This will set the backend if it hasn't been already. It also returns the name of the backend to be the name to be used for importing the correct matplotlib widgets.''' backend = matplotlib.get_backend() if backend.startswith('module://'): if backend.endswith('qt4agg'): backend = 'Qt4Agg' elif backend.endswith('workbench') or backend.endswith('qt5agg'): backend = 'Qt5Agg' else: from qtpy import PYQT4, PYQT5 # noqa if PYQT5: backend = 'Qt5Agg' elif PYQT4: backend = 'Qt4Agg' else: raise RuntimeError('Do not know which matplotlib backend to set') matplotlib.use(backend) return backend def get_qapplication(): ''' Example usage: app, within_mantid = get_qapplication() reducer = eventFilterGUI.MainWindow() # the main ui class in this file reducer.show() if not within_mantid: sys.exit(app.exec_())''' app = QApplication.instance() if app: return app, app.applicationName().lower().startswith('mantid') else: return QApplication(sys.argv), False def __to_external_url(interface_name: str, section: str, external_url: str) -> QtCore.QUrl: if not external_url: template = 'http://docs.mantidproject.org/nightly/interfaces/{}/{}.html' external_url = template.format(section, interface_name) return QtCore.QUrl(external_url) def __to_qthelp_url(interface_name: str, section: str, qt_url: str) -> str: if qt_url: return qt_url else: template = 'qthelp://org.sphinx.mantidproject.{}/doc/interfaces/{}/{}.html' return template.format(__mtd_version, section, interface_name) def __get_collection_file(collection_file: str) -> str: if not collection_file: if not __mtd_bin_dir: return 'HELP COLLECTION FILE NOT FOUND' else: collection_file = os.path.join(__mtd_bin_dir, '../docs/qthelp/MantidProject.qhc') return os.path.abspath(collection_file) def show_interface_help(mantidplot_name, assistant_process, area: str='', collection_file: str='', qt_url: str='', external_url: str=""): ''' Shows the help page for a custom interface @param mantidplot_name: used by showCustomInterfaceHelp @param assistant_process: needs to be started/closed from outside (see example below) @param collection_file: qth file containing the help in format used by qtassistant. The default is ``mantid._bindir + '../docs/qthelp/MantidProject.qhc'`` @param qt_url: location of the help in the qth file. The default value is ``qthelp://org.sphinx.mantidproject.{mtdversion}/doc/interfaces/{mantidplot_name}.html``. @param external_url: location of external page to be displayed in the default browser. The default value is ``http://docs.mantidproject.org/nightly/interfaces/framework/{mantidplot_name}.html`` Example using defaults: #in the __init__ function of the GUI add: self.assistant_process = QtCore.QProcess(self) self.mantidplot_name='DGS Planner' #add a help function in the GUI def help(self): show_interface_help(self.mantidplot_name, self.assistant_process) #make sure you close the qtassistant when the GUI is closed def closeEvent(self, event): self.assistant_process.close() self.assistant_process.waitForFinished() event.accept() ''' try: # try using built-in help in mantid import mantidqt mantidqt.interfacemanager.InterfaceManager().showCustomInterfaceHelp(mantidplot_name, area) except: #(ImportError, ModuleNotFoundError) raises the wrong type of error # built-in help failed, try external qtassistant then give up and launch a browser # cleanup previous version assistant_process.close() assistant_process.waitForFinished() # where to expect qtassistant helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator() helpapp += 'assistant' collection_file = __get_collection_file(collection_file) if os.path.isfile(helpapp) and os.path.isfile(collection_file): # try to find the collection file and launch qtassistant args = ['-enableRemoteControl', '-collectionFile', collection_file, '-showUrl', __to_qthelp_url(mantidplot_name, area, qt_url)] assistant_process.close() assistant_process.waitForFinished() assistant_process.start(helpapp, args) else: # give up and upen a URL in default browser openUrl=QtGui.QDesktopServices.openUrl sysenv=QtCore.QProcessEnvironment.systemEnvironment() ldp=sysenv.value('LD_PRELOAD') if ldp: del os.environ['LD_PRELOAD'] # create a url to the help in the default location openUrl(__to_external_url(mantidplot_name, area, external_url)) if ldp: os.environ['LD_PRELOAD']=ldp
gpl-3.0
andre-richter/pcie-lat
all_in_one.py
1
6054
#!/usr/bin/python import sys import os import numpy as np import matplotlib import matplotlib.mlab as mlab import matplotlib.pyplot as plt import subprocess import traceback pci_dev ={ "name" : "", "loc" : "", "class" : "", "vender" : "", "device" : "", "vd" : "", "isBridge" : 1, "driver" : "" } def is_root(): return os.geteuid() == 0 def get_pci_list(): out = subprocess.Popen(['lspci', '-nm'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = out.communicate() lspci_str = stdout.decode('ascii') pci_list = [] pcis = lspci_str.split('\n') for each_pci in pcis: pci = {} __ = each_pci.split(" ") if len(__) < 4: continue pci["loc"] = __[0].replace('"', '') pci["vender"] = __[2].replace('"', '') pci["device"] = __[3].replace('"', '') pci["vd"] = ":".join([pci["vender"], pci["device"]]) out = subprocess.Popen(['lspci', '-s', '{}'.format(pci["loc"]), "-mvk"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = out.communicate() ss = stdout.decode('ascii') for line in ss.split("\n"): if ': ' in line: k, v = line.split(": ") if k.strip() == "Class": pci['class'] = v.strip().replace('"', '') elif k.strip() == "Vendor": pci['vender'] = v.strip().replace('"', '') elif k.strip() == "Device" and ss.split("\n").index(line) > 0: pci['device'] = v.strip().replace('"', '') elif k.strip() == "Driver": pci['driver'] = v.strip().replace('"', '') else: pass else: continue pci_list.append(pci) return pci_list def print_mach_info(tsc_freq, tsc_overhead, loops): print("-------------------------------") print(" tsc_freq : {}".format(tsc_freq)) print(" tsc_overhead : {} clocks".format(tsc_overhead)) print(" loops : {}".format(loops)) print("-------------------------------") def clock2ns(clocks, tsc_freq): return int(clocks*1000000000/tsc_freq) def plot_y(y, fname): num_width = 10 ymin = int(min(y))-1 ymax = int(max(y))+1 print("Max. and Min. latencies are {}ns {}ns".format(ymax, ymin)) margin = max(num_width, 5) bins = [ii for ii in range(ymin-margin, ymax+margin, num_width)] plt.yscale('log') n, bins, patches = plt.hist(y, bins, range=(min(y), max(y)), width=10, color='blue') plt.xlabel('nanoseconds') plt.ylabel('Probability') plt.title('Histogram of PCIe latencies (%s samples)' % len(y)) plt.savefig(fname, dpi=200, format='png') def main(): loops = 0 if len(sys.argv) < 2: print("Usage: {} [0000]:XX:XX.X [loops]".format(sys.argv[0])) exit(-1) else: pci_test = sys.argv[1] if pci_test.startswith('0000:'): pci_test = sys.argv[0][5:] if len(sys.argv) == 3: loops = int(sys.argv[2]) else: loops = 100000 ### must be root to run the script if not is_root(): print("Need root privillege! run as root!") exit(-1) ### get all devices in this computer pcis = get_pci_list() if pci_test not in [pp['loc'] for pp in pcis]: print("existing PCI devices:") for __ in pcis: print(__) print("{} not found!".format(pci_test)) exit(-1) for p in pcis: if p['loc'] == pci_test: pci_test = p unbind_file = "/sys/bus/pci/devices/0000\:{}/driver/unbind" unbind_file = unbind_file.format(pci_test['loc'].replace(':', '\:')) if os.path.exists(unbind_file): print("Unbind file {} not found!".format(unbind_file)) exit(-1) unbind_ss = 'echo -n "0000:{}" > {}'.format(pci_test['loc'], unbind_file) os.system(unbind_ss) # insert module os.system("make") print("finished compiling the pcie-lat, insmod..."); ins_command = "sudo insmod ./pcie-lat.ko ids={}".format(pci_test['vd']) print(ins_command) os.system(ins_command) # couting try: sys_path_head = "/sys/bus/pci/devices/0000:{}/pcie-lat/{}/pcielat_" sys_path_head = sys_path_head.format(pci_test['loc'], pci_test['loc']) tsc_freq = 0 tsc_overhead = 0 with open(sys_path_head+'tsc_freq', 'r') as __: tsc_freq = int(float(__.read())) with open(sys_path_head+'tsc_overhead', 'r') as __: tsc_overhead = int(float(__.read())) with open(sys_path_head+'loops', 'w') as __: __.write(str(loops)) with open(sys_path_head+'target_bar', 'w') as __: __.write('0') print_mach_info(tsc_freq, tsc_overhead, loops) with open(sys_path_head+'measure', 'w') as __: __.write('0') with open('/dev/pcie-lat/{}'.format(pci_test['loc']), 'rb') as __: y = [] cc = __.read(16) while cc: acc = 0 acc2 = 0 for ii in range(8): acc = acc*256 + int(cc[7-ii]) acc2 = acc2*256 + int(cc[15-ii]) y.append(clock2ns(acc2, tsc_freq)) # read next cc = __.read(16) fname = "pcie_lat_loops{}_{}.png" fname = fname.format(loops, pci_test['loc'].replace(':', '..')) print("plot the graph") plot_y(y, fname) except Exception: traceback.print_exc() print("Removing module : sudo rmmod pcie-lat.ko") os.system("sudo rmmod pcie-lat.ko") exit(-1) # remove module print("Removing module : sudo rmmod pcie-lat.ko") os.system("sudo rmmod pcie-lat.ko") if __name__ == "__main__": main()
gpl-2.0
bzero/statsmodels
statsmodels/sandbox/tsa/varma.py
33
5032
'''VAR and VARMA process this doesn't actually do much, trying out a version for a time loop alternative representation: * textbook, different blocks in matrices * Kalman filter * VAR, VARX and ARX could be calculated with signal.lfilter only tried some examples, not implemented TODO: try minimizing sum of squares of (Y-Yhat) Note: filter has smallest lag at end of array and largest lag at beginning, be careful for asymmetric lags coefficients check this again if it is consistently used changes 2009-09-08 : separated from movstat.py Author : josefpkt License : BSD ''' from __future__ import print_function import numpy as np from scipy import signal #import matplotlib.pylab as plt from numpy.testing import assert_array_equal, assert_array_almost_equal #NOTE: this just returns that predicted values given the #B matrix in polynomial form. #TODO: make sure VAR class returns B/params in this form. def VAR(x,B, const=0): ''' multivariate linear filter Parameters ---------- x: (TxK) array columns are variables, rows are observations for time period B: (PxKxK) array b_t-1 is bottom "row", b_t-P is top "row" when printing B(:,:,0) is lag polynomial matrix for variable 1 B(:,:,k) is lag polynomial matrix for variable k B(p,:,k) is pth lag for variable k B[p,:,:].T corresponds to A_p in Wikipedia const: float or array (not tested) constant added to autoregression Returns ------- xhat: (TxK) array filtered, predicted values of x array Notes ----- xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } for all i = 0,K-1, for all t=p..T xhat does not include the forecasting observation, xhat(T+1), xhat is 1 row shorter than signal.correlate References ---------- http://en.wikipedia.org/wiki/Vector_Autoregression http://en.wikipedia.org/wiki/General_matrix_notation_of_a_VAR(p) ''' p = B.shape[0] T = x.shape[0] xhat = np.zeros(x.shape) for t in range(p,T): #[p+2]:# ## print(p,T) ## print(x[t-p:t,:,np.newaxis].shape) ## print(B.shape) #print(x[t-p:t,:,np.newaxis]) xhat[t,:] = const + (x[t-p:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0) return xhat def VARMA(x,B,C, const=0): ''' multivariate linear filter x (TxK) B (PxKxK) xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } + sum{_q}sum{_k} { e(t-Q:t,:) .* C(:,:,i) }for all i = 0,K-1 ''' P = B.shape[0] Q = C.shape[0] T = x.shape[0] xhat = np.zeros(x.shape) e = np.zeros(x.shape) start = max(P,Q) for t in range(start,T): #[p+2]:# ## print(p,T ## print(x[t-p:t,:,np.newaxis].shape ## print(B.shape #print(x[t-p:t,:,np.newaxis] xhat[t,:] = const + (x[t-P:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0) + \ (e[t-Q:t,:,np.newaxis]*C).sum(axis=1).sum(axis=0) e[t,:] = x[t,:] - xhat[t,:] return xhat, e if __name__ == '__main__': T = 20 K = 2 P = 3 #x = np.arange(10).reshape(5,2) x = np.column_stack([np.arange(T)]*K) B = np.ones((P,K,K)) #B[:,:,1] = 2 B[:,:,1] = [[0,0],[0,0],[0,1]] xhat = VAR(x,B) print(np.all(xhat[P:,0]==np.correlate(x[:-1,0],np.ones(P))*2)) #print(xhat) T = 20 K = 2 Q = 2 P = 3 const = 1 #x = np.arange(10).reshape(5,2) x = np.column_stack([np.arange(T)]*K) B = np.ones((P,K,K)) #B[:,:,1] = 2 B[:,:,1] = [[0,0],[0,0],[0,1]] C = np.zeros((Q,K,K)) xhat1 = VAR(x,B, const=const) xhat2, err2 = VARMA(x,B,C, const=const) print(np.all(xhat2 == xhat1)) print(np.all(xhat2[P:,0] == np.correlate(x[:-1,0],np.ones(P))*2+const)) C[1,1,1] = 0.5 xhat3, err3 = VARMA(x,B,C) x = np.r_[np.zeros((P,K)),x] #prepend inital conditions xhat4, err4 = VARMA(x,B,C) C[1,1,1] = 1 B[:,:,1] = [[0,0],[0,0],[0,1]] xhat5, err5 = VARMA(x,B,C) #print(err5) #in differences #VARMA(np.diff(x,axis=0),B,C) #Note: # * signal correlate applies same filter to all columns if kernel.shape[1]<K # e.g. signal.correlate(x0,np.ones((3,1)),'valid') # * if kernel.shape[1]==K, then `valid` produces a single column # -> possible to run signal.correlate K times with different filters, # see the following example, which replicates VAR filter x0 = np.column_stack([np.arange(T), 2*np.arange(T)]) B[:,:,0] = np.ones((P,K)) B[:,:,1] = np.ones((P,K)) B[1,1,1] = 0 xhat0 = VAR(x0,B) xcorr00 = signal.correlate(x0,B[:,:,0])#[:,0] xcorr01 = signal.correlate(x0,B[:,:,1]) print(np.all(signal.correlate(x0,B[:,:,0],'valid')[:-1,0]==xhat0[P:,0])) print(np.all(signal.correlate(x0,B[:,:,1],'valid')[:-1,0]==xhat0[P:,1])) #import error #from movstat import acovf, acf from statsmodels.tsa.stattools import acovf, acf aav = acovf(x[:,0]) print(aav[0] == np.var(x[:,0])) aac = acf(x[:,0])
bsd-3-clause
to266/hyperspy
hyperspy/drawing/widget.py
2
36785
# -*- coding: utf-8 -*- # Copyright 2007-2016 The HyperSpy developers # # This file is part of HyperSpy. # # HyperSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # HyperSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with HyperSpy. If not, see <http://www.gnu.org/licenses/>. from __future__ import division import matplotlib.pyplot as plt from matplotlib.backend_bases import MouseEvent import numpy as np from hyperspy.drawing.utils import on_figure_window_close from hyperspy.events import Events, Event class WidgetBase(object): """Base class for interactive widgets/patches. A widget creates and maintains one or more matplotlib patches, and manages the interaction code so that the user can maniuplate it on the fly. This base class implements functionality witch is common to all such widgets, mainly the code that manages the patch, axes management, and sets up common events ('changed' and 'closed'). Any inherting subclasses must implement the following methods: _set_patch(self) _on_navigate(obj, name, old, new) # Only for widgets that can navigate It should also make sure to fill the 'axes' attribute as early as possible (but after the base class init), so that it is available when needed. """ def __init__(self, axes_manager=None, **kwargs): self.axes_manager = axes_manager self._axes = list() self.ax = None self.picked = False self.selected = False self._selected_artist = None self._size = 1. self.color = 'red' self.__is_on = True self.background = None self.patch = [] self.cids = list() self.blit = True self.events = Events() self.events.changed = Event(doc=""" Event that triggers when the widget has a significant change. The event triggers after the internal state of the widget has been updated. Arguments: ---------- widget: The widget that changed """, arguments=['obj']) self.events.closed = Event(doc=""" Event that triggers when the widget closed. The event triggers after the widget has already been closed. Arguments: ---------- widget: The widget that closed """, arguments=['obj']) self._navigating = False super(WidgetBase, self).__init__(**kwargs) def _get_axes(self): return self._axes def _set_axes(self, axes): if axes is None: self._axes = list() else: self._axes = axes axes = property(lambda s: s._get_axes(), lambda s, v: s._set_axes(v)) def is_on(self): """Determines if the widget is set to draw if valid (turned on). """ return self.__is_on def set_on(self, value): """Change the on state of the widget. If turning off, all patches will be removed from the matplotlib axes and the widget will disconnect from all events. If turning on, the patch(es) will be added to the matplotlib axes, and the widget will connect to its default events. """ did_something = False if value is not self.is_on() and self.ax is not None: did_something = True if value is True: self._add_patch_to(self.ax) self.connect(self.ax) elif value is False: for container in [ self.ax.patches, self.ax.lines, self.ax.artists, self.ax.texts]: for p in self.patch: if p in container: container.remove(p) self.disconnect() if hasattr(super(WidgetBase, self), 'set_on'): super(WidgetBase, self).set_on(value) if did_something: self.draw_patch() if value is False: self.ax = None self.__is_on = value def _set_patch(self): """Create the matplotlib patch(es), and store it in self.patch """ if hasattr(super(WidgetBase, self), '_set_patch'): super(WidgetBase, self)._set_patch() # Must be provided by the subclass def _add_patch_to(self, ax): """Create and add the matplotlib patches to 'ax' """ self._set_patch() for p in self.patch: ax.add_artist(p) p.set_animated(hasattr(ax, 'hspy_fig')) if hasattr(super(WidgetBase, self), '_add_patch_to'): super(WidgetBase, self)._add_patch_to(ax) def set_mpl_ax(self, ax): """Set the matplotlib Axes that the widget will draw to. If the widget on state is True, it will also add the patch to the Axes, and connect to its default events. """ if ax is self.ax: return # Do nothing # Disconnect from previous axes if set if self.ax is not None and self.is_on(): self.disconnect() self.ax = ax canvas = ax.figure.canvas if self.is_on() is True: self._add_patch_to(ax) self.connect(ax) canvas.draw() self.select() def select(self): """ Cause this widget to be the selected widget in its MPL axes. This assumes that the widget has its patch added to the MPL axes. """ if not self.patch or not self.is_on() or not self.ax: return canvas = self.ax.figure.canvas # Simulate a pick event x, y = self.patch[0].get_transform().transform_point((0, 0)) mouseevent = MouseEvent('pick_event', canvas, x, y) canvas.pick_event(mouseevent, self.patch[0]) self.picked = False def connect(self, ax): """Connect to the matplotlib Axes' events. """ on_figure_window_close(ax.figure, self.close) if self._navigating: self.connect_navigate() def connect_navigate(self): """Connect to the axes_manager such that changes in the widget or in the axes_manager are reflected in the other. """ if self._navigating: self.disconnect_navigate() self.axes_manager.events.indices_changed.connect( self._on_navigate, {'obj': 'axes_manager'}) self._on_navigate(self.axes_manager) # Update our position self._navigating = True def disconnect_navigate(self): """Disconnect a previous naivgation connection. """ self.axes_manager.events.indices_changed.disconnect(self._on_navigate) self._navigating = False def _on_navigate(self, axes_manager): """Callback for axes_manager's change notification. """ pass # Implement in subclass! def disconnect(self): """Disconnect from all events (both matplotlib and navigation). """ for cid in self.cids: try: self.ax.figure.canvas.mpl_disconnect(cid) except: pass if self._navigating: self.disconnect_navigate() def close(self, window=None): """Set the on state to off (removes patch and disconnects), and trigger events.closed. """ self.set_on(False) self.events.closed.trigger(obj=self) def draw_patch(self, *args): """Update the patch drawing. """ try: if hasattr(self.ax, 'hspy_fig'): self.ax.hspy_fig._draw_animated() elif self.ax.figure is not None: self.ax.figure.canvas.draw_idle() except AttributeError: pass # When figure is None, typically when closing def _v2i(self, axis, v): """Wrapped version of DataAxis.value2index, which bounds the index inbetween axis.low_index and axis.high_index+1, and does not raise a ValueError. """ try: return axis.value2index(v) except ValueError: if v > axis.high_value: return axis.high_index + 1 elif v < axis.low_value: return axis.low_index else: raise def _i2v(self, axis, i): """Wrapped version of DataAxis.index2value, which bounds the value inbetween axis.low_value and axis.high_value+axis.scale, and does not raise a ValueError. """ try: return axis.index2value(i) except ValueError: if i > axis.high_index: return axis.high_value + axis.scale elif i < axis.low_index: return axis.low_value else: raise class DraggableWidgetBase(WidgetBase): """Adds the `position` and `indices` properties, and adds a framework for letting the user drag the patch around. Also adds the `moved` event. The default behavior is that `position` snaps to the values corresponding to the values of the axes grid (i.e. no subpixel values). This behavior can be controlled by the property `snap_position`. Any inheritors must override these methods: _onmousemove(self, event) _update_patch_position(self) _set_patch(self) """ def __init__(self, axes_manager, **kwargs): super(DraggableWidgetBase, self).__init__(axes_manager, **kwargs) self.events.moved = Event(doc=""" Event that triggers when the widget was moved. The event triggers after the internal state of the widget has been updated. This event does not differentiate on how the position of the widget was changed, so it is the responsibility of the user to suppress events as neccessary to avoid closed loops etc. Arguments: ---------- obj: The widget that was moved. """, arguments=['obj']) self._snap_position = True # Set default axes if self.axes_manager is not None: if self.axes_manager.navigation_dimension > 0: self.axes = self.axes_manager.navigation_axes[0:1] else: self.axes = self.axes_manager.signal_axes[0:1] else: self._pos = np.array([0.]) def _set_axes(self, axes): super(DraggableWidgetBase, self)._set_axes(axes) if self.axes: self._pos = np.array([ax.low_value for ax in self.axes]) def _get_indices(self): """Returns a tuple with the position (indices). """ idx = [] for i in range(len(self.axes)): idx.append(self.axes[i].value2index(self._pos[i])) return tuple(idx) def _set_indices(self, value): """Sets the position of the widget (by indices). The dimensions should correspond to that of the 'axes' attribute. Calls _pos_changed if the value has changed, which is then responsible for triggering any relevant events. """ if np.ndim(value) == 0 and len(self.axes) == 1: self.position = [self.axes[0].index2value(value)] elif len(self.axes) != len(value): raise ValueError() else: p = [] for i in range(len(self.axes)): p.append(self.axes[i].index2value(value[i])) self.position = p indices = property(lambda s: s._get_indices(), lambda s, v: s._set_indices(v)) def _pos_changed(self): """Call when the position of the widget has changed. It triggers the relevant events, and updates the patch position. """ if self._navigating: with self.axes_manager.events.indices_changed.suppress_callback( self._on_navigate): for i in range(len(self.axes)): self.axes[i].value = self._pos[i] self.events.moved.trigger(self) self.events.changed.trigger(self) self._update_patch_position() def _validate_pos(self, pos): """Validates the passed position. Depending on the position and the implementation, this can either fire a ValueError, or return a modified position that has valid values. Or simply return the unmodified position if everything is ok. This default implementation bounds the position within the axes limits. """ if len(pos) != len(self.axes): raise ValueError() pos = np.maximum(pos, [ax.low_value for ax in self.axes]) pos = np.minimum(pos, [ax.high_value for ax in self.axes]) if self.snap_position: pos = self._do_snap_position(pos) return pos def _get_position(self): """Providies the position of the widget (by values) in a tuple. """ return tuple( self._pos.tolist()) # Don't pass reference, and make it clear def _set_position(self, position): """Sets the position of the widget (by values). The dimensions should correspond to that of the 'axes' attribute. Calls _pos_changed if the value has changed, which is then responsible for triggering any relevant events. """ position = self._validate_pos(position) if np.any(self._pos != position): self._pos = np.array(position) self._pos_changed() position = property(lambda s: s._get_position(), lambda s, v: s._set_position(v)) def _do_snap_position(self, value=None): """Snaps position to axes grid. Returns snapped value. If value is passed as an argument, the internal state is left untouched, if not the position attribute is updated to the snapped value. """ value = np.array(value) if value is not None else self._pos for i, ax in enumerate(self.axes): value[i] = ax.index2value(ax.value2index(value[i])) return value def _set_snap_position(self, value): self._snap_position = value if value: snap_value = self._do_snap_position(self._pos) if np.any(self._pos != snap_value): self._pos = snap_value self._pos_changed() snap_position = property(lambda s: s._snap_position, lambda s, v: s._set_snap_position(v)) def connect(self, ax): super(DraggableWidgetBase, self).connect(ax) canvas = ax.figure.canvas self.cids.append( canvas.mpl_connect('motion_notify_event', self._onmousemove)) self.cids.append(canvas.mpl_connect('pick_event', self.onpick)) self.cids.append(canvas.mpl_connect( 'button_release_event', self.button_release)) def _on_navigate(self, axes_manager): if axes_manager is self.axes_manager: p = self._pos.tolist() for i, a in enumerate(self.axes): p[i] = a.value self.position = p # Use property to trigger events def onpick(self, event): # Callback for MPL pick event self.picked = (event.artist in self.patch) self._selected_artist = event.artist if hasattr(super(DraggableWidgetBase, self), 'onpick'): super(DraggableWidgetBase, self).onpick(event) self.selected = self.picked def _onmousemove(self, event): """Callback for mouse movement. For dragging, the implementor would normally check that the widget is picked, and that the event.inaxes Axes equals self.ax. """ # This method must be provided by the subclass pass def _update_patch_position(self): """Updates the position of the patch on the plot. """ # This method must be provided by the subclass pass def _update_patch_geometry(self): """Updates all geometry of the patch on the plot. """ self._update_patch_position() def button_release(self, event): """whenever a mouse button is released""" if event.button != 1: return if self.picked is True: self.picked = False class Widget1DBase(DraggableWidgetBase): """A base class for 1D widgets. It sets the right dimensions for size and position, adds the 'border_thickness' attribute and initalizes the 'axes' attribute to the first two navigation axes if possible, if not, the two first signal_axes are used. Other than that it mainly supplies common utility functions for inheritors, and implements required functions for ResizableDraggableWidgetBase. The implementation for ResizableDraggableWidgetBase methods all assume that a Rectangle patch will be used, centered on position. If not, the inheriting class will have to override those as applicable. """ def _set_position(self, position): try: len(position) except TypeError: position = (position,) super(Widget1DBase, self)._set_position(position) def _validate_pos(self, pos): pos = np.maximum(pos, self.axes[0].low_value) pos = np.minimum(pos, self.axes[0].high_value) return super(Widget1DBase, self)._validate_pos(pos) class ResizableDraggableWidgetBase(DraggableWidgetBase): """Adds the `size` property and get_size_in_axes method, and adds a framework for letting the user resize the patch, including resizing by key strokes ('+', '-'). Also adds the 'resized' event. Utility functions for resizing are implemented by `increase_size` and `decrease_size`, which will in-/decrement the size by 1. Other utility functions include `get_centre` and `get_centre_indices` which returns the center position, and the internal _apply_changes which helps make sure that only one 'changed' event is fired for a combined move and resize. Any inheritors must override these methods: _update_patch_position(self) _update_patch_size(self) _update_patch_geometry(self) _set_patch(self) """ def __init__(self, axes_manager, **kwargs): super(ResizableDraggableWidgetBase, self).__init__( axes_manager, **kwargs) if not self.axes: self._size = np.array([1]) self.size_step = 1 # = one step in index space self._snap_size = True self.events.resized = Event(doc=""" Event that triggers when the widget was resized. The event triggers after the internal state of the widget has been updated. This event does not differentiate on how the size of the widget was changed, so it is the responsibility of the user to suppress events as neccessary to avoid closed loops etc. Arguments: ---------- obj: The widget that was resized. """, arguments=['obj']) self.no_events_while_dragging = False self._drag_store = None def _set_axes(self, axes): super(ResizableDraggableWidgetBase, self)._set_axes(axes) if self.axes: self._size = np.array([ax.scale for ax in self.axes]) def _get_size(self): """Getter for 'size' property. Returns the size as a tuple (to prevent unintended in-place changes). """ return tuple(self._size.tolist()) def _set_size(self, value): """Setter for the 'size' property. Calls _size_changed to handle size change, if the value has changed. """ value = np.minimum(value, [ax.size * ax.scale for ax in self.axes]) value = np.maximum(value, self.size_step * [ax.scale for ax in self.axes]) if self.snap_size: value = self._do_snap_size(value) if np.any(self._size != value): self._size = value self._size_changed() size = property(lambda s: s._get_size(), lambda s, v: s._set_size(v)) def _do_snap_size(self, value=None): value = np.array(value) if value is not None else self._size for i, ax in enumerate(self.axes): value[i] = round(value[i] / ax.scale) * ax.scale return value def _set_snap_size(self, value): self._snap_size = value if value: snap_value = self._do_snap_size(self._size) if np.any(self._size != snap_value): self._size = snap_value self._size_changed() snap_size = property(lambda s: s._snap_size, lambda s, v: s._set_snap_size(v)) def _set_snap_all(self, value): # Snap position first, as snapped size can depend on position. self.snap_position = value self.snap_size = value snap_all = property(lambda s: s.snap_size and s.snap_position, lambda s, v: s._set_snap_all(v)) def increase_size(self): """Increment all sizes by 1. Applied via 'size' property. """ self.size = np.array(self.size) + \ self.size_step * np.array([a.scale for a in self.axes]) def decrease_size(self): """Decrement all sizes by 1. Applied via 'size' property. """ self.size = np.array(self.size) - \ self.size_step * np.array([a.scale for a in self.axes]) def _size_changed(self): """Triggers resize and changed events, and updates the patch. """ self.events.resized.trigger(self) self.events.changed.trigger(self) self._update_patch_size() def get_size_in_indices(self): """Gets the size property converted to the index space (via 'axes' attribute). """ s = list() for i in range(len(self.axes)): s.append(int(round(self._size[i] / self.axes[i].scale))) return np.array(s) def set_size_in_indices(self, value): """Sets the size property converted to the index space (via 'axes' attribute). """ s = list() for i in range(len(self.axes)): s.append(int(round(value[i] * self.axes[i].scale))) self.size = s # Use property to get full processing def get_centre(self): """Get's the center indices. The default implementation is simply the position + half the size in axes space, which should work for any symmetric widget, but more advanced widgets will need to decide whether to return the center of gravity or the geometrical center of the bounds. """ return self._pos + self._size() / 2.0 def get_centre_index(self): """Get's the center position (in index space). The default implementation is simply the indices + half the size, which should work for any symmetric widget, but more advanced widgets will need to decide whether to return the center of gravity or the geometrical center of the bounds. """ return self.indices + self.get_size_in_indices() / 2.0 def _update_patch_size(self): """Updates the size of the patch on the plot. """ # This method must be provided by the subclass pass def _update_patch_geometry(self): """Updates all geometry of the patch on the plot. """ # This method must be provided by the subclass pass def on_key_press(self, event): if event.key == "+": self.increase_size() if event.key == "-": self.decrease_size() def connect(self, ax): super(ResizableDraggableWidgetBase, self).connect(ax) canvas = ax.figure.canvas self.cids.append(canvas.mpl_connect('key_press_event', self.on_key_press)) def onpick(self, event): if hasattr(super(ResizableDraggableWidgetBase, self), 'onpick'): super(ResizableDraggableWidgetBase, self).onpick(event) if self.picked: self._drag_store = (self.position, self.size) def _apply_changes(self, old_size, old_position): """Evalutes whether the widget has been moved/resized, and triggers the correct events and updates the patch geometry. This function has the advantage that the geometry is updated only once, preventing flickering, and the 'changed' event only fires once. """ moved = self.position != old_position resized = self.size != old_size if moved: if self._navigating: e = self.axes_manager.events.indices_changed with e.suppress_callback(self._on_navigate): for i in range(len(self.axes)): self.axes[i].index = self.indices[i] if moved or resized: # Update patch first if moved and resized: self._update_patch_geometry() elif moved: self._update_patch_position() else: self._update_patch_size() # Then fire events if not self.no_events_while_dragging or not self.picked: if moved: self.events.moved.trigger(self) if resized: self.events.resized.trigger(self) self.events.changed.trigger(self) def button_release(self, event): """whenever a mouse button is released""" picked = self.picked super(ResizableDraggableWidgetBase, self).button_release(event) if event.button != 1: return if picked and self.picked is False: if self.no_events_while_dragging and self._drag_store: self._apply_changes(*self._drag_store) class Widget2DBase(ResizableDraggableWidgetBase): """A base class for 2D widgets. It sets the right dimensions for size and position, adds the 'border_thickness' attribute and initalizes the 'axes' attribute to the first two navigation axes if possible, if not, the two first signal_axes are used. Other than that it mainly supplies common utility functions for inheritors, and implements required functions for ResizableDraggableWidgetBase. The implementation for ResizableDraggableWidgetBase methods all assume that a Rectangle patch will be used, centered on position. If not, the inheriting class will have to override those as applicable. """ def __init__(self, axes_manager, **kwargs): super(Widget2DBase, self).__init__(axes_manager, **kwargs) self.border_thickness = 2 # Set default axes if self.axes_manager is not None: if self.axes_manager.navigation_dimension > 1: self.axes = self.axes_manager.navigation_axes[0:2] elif self.axes_manager.signal_dimension > 1: self.axes = self.axes_manager.signal_axes[0:2] elif len(self.axes_manager.shape) > 1: self.axes = (self.axes_manager.signal_axes + self.axes_manager.navigation_axes) else: raise ValueError("2D widget needs at least two axes!") else: self._pos = np.array([0, 0]) self._size = np.array([1, 1]) def _get_patch_xy(self): """Returns the xy position of the widget. In this default implementation, the widget is centered on the position. """ return self._pos - self._size / 2. def _get_patch_bounds(self): """Returns the bounds of the patch in the form of a tuple in the order left, top, width, height. In matplotlib, 'bottom' is used instead of 'top' as the naming assumes an upwards pointing y-axis, meaning the lowest value corresponds to bottom. However, our widgets will normally only go on images (which has an inverted y-axis in MPL by default), so we define the lowest value to be termed 'top'. """ xy = self._get_patch_xy() xs, ys = self.size return (xy[0], xy[1], xs, ys) # x,y,w,h def _update_patch_position(self): if self.is_on() and self.patch: self.patch[0].set_xy(self._get_patch_xy()) self.draw_patch() def _update_patch_size(self): self._update_patch_geometry() def _update_patch_geometry(self): if self.is_on() and self.patch: self.patch[0].set_bounds(*self._get_patch_bounds()) self.draw_patch() class ResizersMixin(object): """ Widget mix-in for adding resizing manipulation handles. The default handles are green boxes displayed on the outside corners of the boundaries. By default, the handles are only displayed when the widget is selected (`picked` in matplotlib terminology). Attributes: ----------- resizers : {bool} Property that determines whether the resizer handles should be used resize_color : {matplotlib color} The color of the resize handles. resize_pixel_size : {tuple | None} Size of the resize handles in screen pixels. If None, it is set equal to the size of one 'data-pixel' (image pixel size). resizer_picked : {False | int} Inidcates which, if any, resizer was selected the last time the widget was picked. `False` if another patch was picked, or the index of the resizer handle that was picked. """ def __init__(self, resizers=True, **kwargs): super(ResizersMixin, self).__init__(**kwargs) self.resizer_picked = False self.pick_offset = (0, 0) self.resize_color = 'lime' self.resize_pixel_size = (5, 5) # Set to None to make one data pixel self._resizers = resizers self._resizer_handles = [] self._resizers_on = False # The `_resizers_on` attribute reflects whether handles are actually on # as compared to `_resizers` which is whether the user wants them on. # The difference is e.g. for turning on and off handles when the # widget is selected/deselected. @property def resizers(self): return self._resizers @resizers.setter def resizers(self, value): if self._resizers != value: self._resizers = value self._set_resizers(value, self.ax) def _update_resizers(self): """Update resizer handles' patch geometry. """ pos = self._get_resizer_pos() rsize = self._get_resizer_size() for i, r in enumerate(self._resizer_handles): r.set_xy(pos[i]) r.set_width(rsize[0]) r.set_height(rsize[1]) def _set_resizers(self, value, ax): """Turns the resizers on/off, in much the same way that _set_patch works. """ if ax is not None: if value: for r in self._resizer_handles: ax.add_artist(r) r.set_animated(hasattr(ax, 'hspy_fig')) else: for container in [ ax.patches, ax.lines, ax.artists, ax.texts]: for r in self._resizer_handles: if r in container: container.remove(r) self._resizers_on = value self.draw_patch() def _get_resizer_size(self): """Gets the size of the resizer handles in axes coordinates. If 'resize_pixel_size' is None, a size of one pixel will be used. """ invtrans = self.ax.transData.inverted() if self.resize_pixel_size is None: rsize = [ax.scale for ax in self.axes] else: rsize = np.abs(invtrans.transform(self.resize_pixel_size) - invtrans.transform((0, 0))) return rsize def _get_resizer_offset(self): """Utility for getting the distance from the boundary box to the center of the resize handles. """ invtrans = self.ax.transData.inverted() border = self.border_thickness # Transform the border thickness into data values dl = np.abs(invtrans.transform((border, border)) - invtrans.transform((0, 0))) / 2 rsize = self._get_resizer_size() return rsize / 2 + dl def _get_resizer_pos(self): """Get the positions of the resizer handles. """ invtrans = self.ax.transData.inverted() border = self.border_thickness # Transform the border thickness into data values dl = np.abs(invtrans.transform((border, border)) - invtrans.transform((0, 0))) / 2 rsize = self._get_resizer_size() xs, ys = self._size positions = [] rp = np.array(self._get_patch_xy()) p = rp - rsize + dl # Top left positions.append(p) p = rp + (xs - dl[0], -rsize[1] + dl[1]) # Top right positions.append(p) p = rp + (-rsize[0] + dl[0], ys - dl[1]) # Bottom left positions.append(p) p = rp + (xs - dl[0], ys - dl[1]) # Bottom right positions.append(p) return positions def _set_patch(self): """Creates the resizer handles, irregardless of whether they will be used or not. """ if hasattr(super(ResizersMixin, self), '_set_patch'): super(ResizersMixin, self)._set_patch() if self._resizer_handles: self._set_resizers(False, self.ax) self._resizer_handles = [] rsize = self._get_resizer_size() pos = self._get_resizer_pos() for i in range(len(pos)): r = plt.Rectangle(pos[i], rsize[0], rsize[1], animated=self.blit, fill=True, lw=0, fc=self.resize_color, picker=True,) self._resizer_handles.append(r) def set_on(self, value): """Turns on/off resizers whet widget is turned on/off. """ if self.resizers and value != self._resizers_on: self._set_resizers(value, self.ax) if hasattr(super(ResizersMixin, self), 'set_on'): super(ResizersMixin, self).set_on(value) def onpick(self, event): """Picking of main patch is same as for widget base, but this also handles picking of the resize handles. If a resize handles is picked, `picked` is set to `True`, and `resizer_picked` is set to an integer indicating which handle was picked (0-3 for top left, top right, bottom left, bottom right). It is set to `False` if another widget was picked. If the main patch is picked, the offset from the picked pixel to the `position` is stored in `pick_offset`. This can be used in e.g. `_onmousemove` to ease dragging code (prevent widget center/corner snapping to mouse). """ if event.artist in self._resizer_handles: corner = self._resizer_handles.index(event.artist) self.resizer_picked = corner self.picked = True elif self.picked: if self.resizers and not self._resizers_on: self._set_resizers(True, self.ax) x = event.mouseevent.xdata y = event.mouseevent.ydata self.pick_offset = (x - self._pos[0], y - self._pos[1]) self.resizer_picked = False else: self._set_resizers(False, self.ax) if hasattr(super(ResizersMixin, self), 'onpick'): super(ResizersMixin, self).onpick(event) def _add_patch_to(self, ax): """Same as widget base, but also adds resizers if 'resizers' property is True. """ if self.resizers: self._set_resizers(True, ax) if hasattr(super(ResizersMixin, self), '_add_patch_to'): super(ResizersMixin, self)._add_patch_to(ax)
gpl-3.0
jm-begon/scikit-learn
examples/linear_model/plot_bayesian_ridge.py
248
2588
""" ========================= Bayesian Ridge Regression ========================= Computes a Bayesian Ridge Regression on a synthetic dataset. See :ref:`bayesian_ridge_regression` for more information on the regressor. Compared to the OLS (ordinary least squares) estimator, the coefficient weights are slightly shifted toward zeros, which stabilises them. As the prior on the weights is a Gaussian prior, the histogram of the estimated weights is Gaussian. The estimation of the model is done by iteratively maximizing the marginal log-likelihood of the observations. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from scipy import stats from sklearn.linear_model import BayesianRidge, LinearRegression ############################################################################### # Generating simulated data with Gaussian weigthts np.random.seed(0) n_samples, n_features = 100, 100 X = np.random.randn(n_samples, n_features) # Create Gaussian data # Create weigts with a precision lambda_ of 4. lambda_ = 4. w = np.zeros(n_features) # Only keep 10 weights of interest relevant_features = np.random.randint(0, n_features, 10) for i in relevant_features: w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_)) # Create noise with a precision alpha of 50. alpha_ = 50. noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples) # Create the target y = np.dot(X, w) + noise ############################################################################### # Fit the Bayesian Ridge Regression and an OLS for comparison clf = BayesianRidge(compute_score=True) clf.fit(X, y) ols = LinearRegression() ols.fit(X, y) ############################################################################### # Plot true weights, estimated weights and histogram of the weights plt.figure(figsize=(6, 5)) plt.title("Weights of the model") plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate") plt.plot(w, 'g-', label="Ground truth") plt.plot(ols.coef_, 'r--', label="OLS estimate") plt.xlabel("Features") plt.ylabel("Values of the weights") plt.legend(loc="best", prop=dict(size=12)) plt.figure(figsize=(6, 5)) plt.title("Histogram of the weights") plt.hist(clf.coef_, bins=n_features, log=True) plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)), 'ro', label="Relevant features") plt.ylabel("Features") plt.xlabel("Values of the weights") plt.legend(loc="lower left") plt.figure(figsize=(6, 5)) plt.title("Marginal log-likelihood") plt.plot(clf.scores_) plt.ylabel("Score") plt.xlabel("Iterations") plt.show()
bsd-3-clause
sergiohzlz/complejos
JdelC/jdelc.py
1
2211
#!/usr/bin/python import numpy as np import numpy.random as rnd import sys import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot as plt from numpy import pi poligono_p = lambda n,rot: [(1,i*2*np.pi/n+rot) for i in range(1,n+1)] pol2cart = lambda ro,te: (ro*np.cos(te),ro*np.sin(te)) poligono_c = lambda L: [pol2cart(x[0],x[1]) for x in L] genera_coords = lambda L,p: dict(zip(L,p)) pmedio = lambda x,y: (0.5*(x[0]+y[0]) , 0.5*(x[1]+y[1]) ) class JdelC(object): def __init__(self): pass def juego(n,m=100000, rot=pi/2): C = genera_coords(range(n), poligono_c(poligono_p(n,rot))) P = [C[rnd.choice(range(n))]] for i in range(m): up = P[-1] vz = C[rnd.choice(range(n))] P.append(pmedio(up,vz)) return np.array(P), C def juego_sec(V,S,m=100000,rot=pi/4): n = len(V) C = genera_coords(V, poligono_c(poligono_p(n,rot))) P = [C[S[0]]] cont = 0 for i in range(1,m): up = P[-1] vz = C[S[i]] P.append(pmedio(up,vz)) return np.array(P), C def secciones_nucleotidos(f,m): cont=0 for r in f: l = r.strip() if(l[0]=='>'): continue acum = m-cont sec = ''.join([ s for s in l[:acum] if s!='N' ]) cont+=len(sec) if(cont<=m): yield sec def secciones(f,m): cont=0 for r in f: l = r.strip() try: if(l[0]=='>'): continue except: continue acum = m-cont sec = ''.join([ s for s in l[:acum] ]) cont+=len(sec) if(cont<=m): yield sec def grafica(R): plt.scatter(R[:,0],R[:,1],s=0.1, c='k') def grafcoords(*D): R,C = D plt.scatter(R[:,0],R[:,1],s=0.1, c='k') for c in C: plt.annotate(c,C[c]) if __name__=='__main__': n = int(sys.argv[0]) # Ejemplo # In [150]: G = open('Saccharomyces_cerevisiae_aa.fasta','r') # # In [151]: secs = jdelc.secciones(G,1000) # # In [152]: secuencia = '' # # In [153]: for sec in secs: # ...: secuencia += sec # ...: # # In [154]: R,C = jdelc.juego_sec(aminos,secuencia, len(secuencia),pi/4); jdelc.grafcoords(R,C); show()
gpl-2.0
letsgoexploring/economicData
usConvergenceData/stateIncomeData.py
1
5246
# coding: utf-8 # In[1]: from __future__ import division,unicode_literals # get_ipython().magic('matplotlib inline') import numpy as np import pandas as pd import json import runProcs from urllib.request import urlopen import matplotlib.pyplot as plt # In[2]: # 0. State abbreviations # 0.1 dictionary: stateAbbr = { u'Alabama':u'AL', u'Alaska':u'AK', u'Arizona':u'AZ', u'Arkansas':u'AR', u'California':u'CA', u'Colorado':u'CO', u'Connecticut':u'CT', u'Delaware':u'DE', u'District of Columbia':u'DC', u'Florida':u'FL', u'Georgia':u'GA', u'Hawaii':u'HI', u'Idaho':u'ID', u'Illinois':u'IL', u'Indiana':u'IN', u'Iowa':u'IA', u'Kansas':u'KS', u'Kentucky':u'KY', u'Louisiana':u'LA', u'Maine':u'ME', u'Maryland':u'MD', u'Massachusetts':u'MA', u'Michigan':u'MI', u'Minnesota':u'MN', u'Mississippi':u'MS', u'Missouri':u'MO', u'Montana':u'MT', u'Nebraska':u'NE', u'Nevada':u'NV', u'New Hampshire':u'NH', u'New Jersey':u'NJ', u'New Mexico':u'NM', u'New York':u'NY', u'North Carolina':u'NC', u'North Dakota':u'ND', u'Ohio':u'OH', u'Oklahoma':u'OK', u'Oregon':u'OR', u'Pennsylvania':u'PA', u'Rhode Island':u'RI', u'South Carolina':u'SC', u'South Dakota':u'SD', u'Tennessee':u'TN', u'Texas':u'TX', u'Utah':u'UT', u'Vermont':u'VT', u'Virginia':u'VA', u'Washington':u'WA', u'West Virginia':u'WV', u'Wisconsin':u'WI', u'Wyoming':u'WY' } # 0.2 List of states in the US stateList = [s for s in stateAbbr] # In[3]: # 1. Construct series for price deflator # 1.1 Obtain data from BEA gdpDeflator = urlopen('http://bea.gov/api/data/?UserID=3EDEAA66-4B2B-4926-83C9-FD2089747A5B&method=GetData&datasetname=NIPA&TableID=13&Frequency=A&Year=X&ResultFormat=JSON&') # result = gdpDeflator.readall().decode('utf-8') result = gdpDeflator.read().decode('utf-8') jsonResponse = json.loads(result) # In[4]: # 1.2 Construct the data frame for the deflator series values = [] years = [] for element in jsonResponse['BEAAPI']['Results']['Data']: # if element['LineDescription'] == 'Personal consumption expenditures': if element['LineDescription'] == 'Gross domestic product': years.append(element['TimePeriod']) values.append(float(element['DataValue'])/100) values = np.array([values]).T dataP = pd.DataFrame(values,index = years,columns = ['price level']) # 1.3 Display the data print(dataP) # In[5]: # 2. Construct series for per capita income by state, region, and the entire us # 2.1 Obtain data from BEA stateYpc = urlopen('http://bea.gov/api/data/?UserID=3EDEAA66-4B2B-4926-83C9-FD2089747A5B&method=GetData&datasetname=RegionalData&KeyCode=PCPI_SI&Year=ALL&GeoFips=STATE&ResultFormat=JSON&') # result = stateYpc.readall().decode('utf-8') result = stateYpc.read().decode('utf-8') jsonResponse = json.loads(result) # jsonResponse['BEAAPI']['Results']['Data'][0]['GeoName'] # In[6]: # 2.2 Construct the data frame for the per capita income series # 2.2.1 Initialize the dataframe regions = [] years = [] for element in jsonResponse['BEAAPI']['Results']['Data']: if element['GeoName'] not in regions: regions.append(element['GeoName']) if element['TimePeriod'] not in years: years.append(element['TimePeriod']) df = np.zeros([len(years),len(regions)]) dataY = pd.DataFrame(df,index = years,columns = regions) # 2.2.2 Populate the dataframe with values for element in jsonResponse['BEAAPI']['Results']['Data']: try: dataY[element['GeoName']][element['TimePeriod']] = np.round(float(element[u'DataValue'])/float(dataP.loc[element['TimePeriod']]),2)# real except: dataY[element['GeoName']][element['TimePeriod']] = np.nan # 2.2.3 Replace the state names in the index with abbreviations columns=[] for r in regions: if r in stateList: columns.append(stateAbbr[r]) else: columns.append(r) dataY.columns=columns # 2.2.4 Display the data obtained from the BEA dataY # In[7]: # 3. State income data for 1840, 1880, and 1900 # 3.1.1 Import Easterlin's income data easterlin_data = pd.read_csv('Historical Statistics of the US - Easterlin State Income Data.csv',index_col=0) # 3.1.2 Import historic CPI data historic_cpi_data=pd.read_csv('Historical Statistics of the US - cpi.csv',index_col=0) historic_cpi_data = historic_cpi_data/historic_cpi_data.loc[1929]*float(dataP.loc['1929']) # In[8]: # Const df_1840 = easterlin_data['Income per capita - 1840 - A [cur dollars]']/float(historic_cpi_data.loc[1840]) df_1880 = easterlin_data['Income per capita - 1880 [cur dollars]']/float(historic_cpi_data.loc[1890]) df_1900 = easterlin_data['Income per capita - 1900 [cur dollars]']/float(historic_cpi_data.loc[1900]) df = pd.DataFrame({'1840':df_1840,'1880':df_1880,'1900':df_1900}).transpose() # In[9]: df = pd.concat([dataY,df]).sort_index() # In[17]: df.loc['1880'].sort_values() # In[10]: # 3. Export data to csv series = dataY.sort_index() series = df.sort_index() dropCols = [u'AK', u'HI', u'New England', u'Mideast', u'Great Lakes', u'Plains', u'Southeast', u'Southwest', u'Rocky Mountain', u'Far West'] for c in dropCols: series = series.drop([c],axis=1) series.to_csv('stateIncomeData.csv',na_rep='NaN') # In[11]: len(dataY.columns) # In[12]: # 4. Export notebook to .py runProcs.exportNb('stateIncomeData')
mit
sumspr/scikit-learn
examples/linear_model/plot_lasso_and_elasticnet.py
249
1982
""" ======================================== Lasso and Elastic Net for Sparse Signals ======================================== Estimates Lasso and Elastic-Net regression models on a manually generated sparse signal corrupted with an additive noise. Estimated coefficients are compared with the ground-truth. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.metrics import r2_score ############################################################################### # generate some sparse data to play with np.random.seed(42) n_samples, n_features = 50, 200 X = np.random.randn(n_samples, n_features) coef = 3 * np.random.randn(n_features) inds = np.arange(n_features) np.random.shuffle(inds) coef[inds[10:]] = 0 # sparsify coef y = np.dot(X, coef) # add noise y += 0.01 * np.random.normal((n_samples,)) # Split data in train set and test set n_samples = X.shape[0] X_train, y_train = X[:n_samples / 2], y[:n_samples / 2] X_test, y_test = X[n_samples / 2:], y[n_samples / 2:] ############################################################################### # Lasso from sklearn.linear_model import Lasso alpha = 0.1 lasso = Lasso(alpha=alpha) y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test) r2_score_lasso = r2_score(y_test, y_pred_lasso) print(lasso) print("r^2 on test data : %f" % r2_score_lasso) ############################################################################### # ElasticNet from sklearn.linear_model import ElasticNet enet = ElasticNet(alpha=alpha, l1_ratio=0.7) y_pred_enet = enet.fit(X_train, y_train).predict(X_test) r2_score_enet = r2_score(y_test, y_pred_enet) print(enet) print("r^2 on test data : %f" % r2_score_enet) plt.plot(enet.coef_, label='Elastic net coefficients') plt.plot(lasso.coef_, label='Lasso coefficients') plt.plot(coef, '--', label='original coefficients') plt.legend(loc='best') plt.title("Lasso R^2: %f, Elastic Net R^2: %f" % (r2_score_lasso, r2_score_enet)) plt.show()
bsd-3-clause
teoliphant/scipy
scipy/stats/distributions.py
2
215895
# Functions to implement several important functions for # various Continous and Discrete Probability Distributions # # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # import math import warnings from copy import copy from scipy.misc import comb, derivative from scipy import special from scipy import optimize from scipy import integrate from scipy.special import gammaln as gamln import inspect from numpy import all, where, arange, putmask, \ ravel, take, ones, sum, shape, product, repeat, reshape, \ zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \ arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1 from numpy import atleast_1d, polyval, ceil, place, extract, \ any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \ power, NINF, empty import numpy import numpy as np import numpy.random as mtrand from numpy import flatnonzero as nonzero import vonmises_cython from _tukeylambda_stats import tukeylambda_variance as _tlvar, \ tukeylambda_kurtosis as _tlkurt __all__ = [ 'rv_continuous', 'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine', 'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy', 'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang', 'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy', 'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l', 'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme', 'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r', 'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant', 'gausshyper', 'invgamma', 'invgauss', 'invweibull', 'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l', 'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm', 'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't', 'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm', 'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss', 'semicircular', 'triang', 'truncexpon', 'truncnorm', 'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy', 'entropy', 'rv_discrete', 'binom', 'bernoulli', 'nbinom', 'geom', 'hypergeom', 'logser', 'poisson', 'planck', 'boltzmann', 'randint', 'zipf', 'dlaplace', 'skellam' ] floatinfo = numpy.finfo(float) gam = special.gamma random = mtrand.random_sample import types from scipy.misc import doccer sgf = vectorize try: from new import instancemethod except ImportError: # Python 3 def instancemethod(func, obj, cls): return types.MethodType(func, obj) # These are the docstring parts used for substitution in specific # distribution docstrings. docheaders = {'methods':"""\nMethods\n-------\n""", 'parameters':"""\nParameters\n---------\n""", 'notes':"""\nNotes\n-----\n""", 'examples':"""\nExamples\n--------\n"""} _doc_rvs = \ """rvs(%(shapes)s, loc=0, scale=1, size=1) Random variates. """ _doc_pdf = \ """pdf(x, %(shapes)s, loc=0, scale=1) Probability density function. """ _doc_logpdf = \ """logpdf(x, %(shapes)s, loc=0, scale=1) Log of the probability density function. """ _doc_pmf = \ """pmf(x, %(shapes)s, loc=0, scale=1) Probability mass function. """ _doc_logpmf = \ """logpmf(x, %(shapes)s, loc=0, scale=1) Log of the probability mass function. """ _doc_cdf = \ """cdf(x, %(shapes)s, loc=0, scale=1) Cumulative density function. """ _doc_logcdf = \ """logcdf(x, %(shapes)s, loc=0, scale=1) Log of the cumulative density function. """ _doc_sf = \ """sf(x, %(shapes)s, loc=0, scale=1) Survival function (1-cdf --- sometimes more accurate). """ _doc_logsf = \ """logsf(x, %(shapes)s, loc=0, scale=1) Log of the survival function. """ _doc_ppf = \ """ppf(q, %(shapes)s, loc=0, scale=1) Percent point function (inverse of cdf --- percentiles). """ _doc_isf = \ """isf(q, %(shapes)s, loc=0, scale=1) Inverse survival function (inverse of sf). """ _doc_moment = \ """moment(n, %(shapes)s, loc=0, scale=1) Non-central moment of order n """ _doc_stats = \ """stats(%(shapes)s, loc=0, scale=1, moments='mv') Mean('m'), variance('v'), skew('s'), and/or kurtosis('k'). """ _doc_entropy = \ """entropy(%(shapes)s, loc=0, scale=1) (Differential) entropy of the RV. """ _doc_fit = \ """fit(data, %(shapes)s, loc=0, scale=1) Parameter estimates for generic data. """ _doc_expect = \ """expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) Expected value of a function (of one argument) with respect to the distribution. """ _doc_expect_discrete = \ """expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False) Expected value of a function (of one argument) with respect to the distribution. """ _doc_median = \ """median(%(shapes)s, loc=0, scale=1) Median of the distribution. """ _doc_mean = \ """mean(%(shapes)s, loc=0, scale=1) Mean of the distribution. """ _doc_var = \ """var(%(shapes)s, loc=0, scale=1) Variance of the distribution. """ _doc_std = \ """std(%(shapes)s, loc=0, scale=1) Standard deviation of the distribution. """ _doc_interval = \ """interval(alpha, %(shapes)s, loc=0, scale=1) Endpoints of the range that contains alpha percent of the distribution """ _doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf, _doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf, _doc_logsf, _doc_ppf, _doc_isf, _doc_moment, _doc_stats, _doc_entropy, _doc_fit, _doc_expect, _doc_median, _doc_mean, _doc_var, _doc_std, _doc_interval]) # Note that the two lines for %(shapes) are searched for and replaced in # rv_continuous and rv_discrete - update there if the exact string changes _doc_default_callparams = \ """ Parameters ---------- x : array_like quantiles q : array_like lower or upper tail probability %(shapes)s : array_like shape parameters loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) size : int or tuple of ints, optional shape of random variates (default computed from input arguments ) moments : str, optional composed of letters ['mvsk'] specifying which moments to compute where 'm' = mean, 'v' = variance, 's' = (Fisher's) skew and 'k' = (Fisher's) kurtosis. (default='mv') """ _doc_default_longsummary = \ """Continuous random variables are defined from a standard form and may require some shape parameters to complete its specification. Any optional keyword parameters can be passed to the methods of the RV object as given below: """ _doc_default_frozen_note = \ """ Alternatively, the object may be called (as a function) to fix the shape, location, and scale parameters returning a "frozen" continuous RV object: rv = %(name)s(%(shapes)s, loc=0, scale=1) - Frozen RV object with the same methods but holding the given shape, location, and scale fixed. """ _doc_default_example = \ """Examples -------- >>> from scipy.stats import %(name)s >>> numargs = %(name)s.numargs >>> [ %(shapes)s ] = [0.9,] * numargs >>> rv = %(name)s(%(shapes)s) Display frozen pdf >>> x = np.linspace(0, np.minimum(rv.dist.b, 3)) >>> h = plt.plot(x, rv.pdf(x)) Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``. Check accuracy of cdf and ppf >>> prb = %(name)s.cdf(x, %(shapes)s) >>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20) Random number generation >>> R = %(name)s.rvs(%(shapes)s, size=100) """ _doc_default = ''.join([_doc_default_longsummary, _doc_allmethods, _doc_default_callparams, _doc_default_frozen_note, _doc_default_example]) _doc_default_before_notes = ''.join([_doc_default_longsummary, _doc_allmethods, _doc_default_callparams, _doc_default_frozen_note]) docdict = {'rvs':_doc_rvs, 'pdf':_doc_pdf, 'logpdf':_doc_logpdf, 'cdf':_doc_cdf, 'logcdf':_doc_logcdf, 'sf':_doc_sf, 'logsf':_doc_logsf, 'ppf':_doc_ppf, 'isf':_doc_isf, 'stats':_doc_stats, 'entropy':_doc_entropy, 'fit':_doc_fit, 'moment':_doc_moment, 'expect':_doc_expect, 'interval':_doc_interval, 'mean':_doc_mean, 'std':_doc_std, 'var':_doc_var, 'median':_doc_median, 'allmethods':_doc_allmethods, 'callparams':_doc_default_callparams, 'longsummary':_doc_default_longsummary, 'frozennote':_doc_default_frozen_note, 'example':_doc_default_example, 'default':_doc_default, 'before_notes':_doc_default_before_notes} # Reuse common content between continous and discrete docs, change some # minor bits. docdict_discrete = docdict.copy() docdict_discrete['pmf'] = _doc_pmf docdict_discrete['logpmf'] = _doc_logpmf docdict_discrete['expect'] = _doc_expect_discrete _doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf', 'ppf', 'isf', 'stats', 'entropy', 'expect', 'median', 'mean', 'var', 'std', 'interval'] for obj in _doc_disc_methods: docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '') docdict_discrete.pop('pdf') docdict_discrete.pop('logpdf') _doc_allmethods = ''.join([docdict_discrete[obj] for obj in _doc_disc_methods]) docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\ 'Continuous', 'Discrete') _doc_default_frozen_note = \ """ Alternatively, the object may be called (as a function) to fix the shape and location parameters returning a "frozen" discrete RV object: rv = %(name)s(%(shapes)s, loc=0) - Frozen RV object with the same methods but holding the given shape and location fixed. """ docdict_discrete['frozennote'] = _doc_default_frozen_note _doc_default_discrete_example = \ """Examples -------- >>> from scipy.stats import %(name)s >>> [ %(shapes)s ] = [<Replace with reasonable values>] >>> rv = %(name)s(%(shapes)s) Display frozen pmf >>> x = np.arange(0, np.minimum(rv.dist.b, 3)) >>> h = plt.vlines(x, 0, rv.pmf(x), lw=2) Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``. Check accuracy of cdf and ppf >>> prb = %(name)s.cdf(x, %(shapes)s) >>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20) Random number generation >>> R = %(name)s.rvs(%(shapes)s, size=100) """ docdict_discrete['example'] = _doc_default_discrete_example _doc_default_before_notes = ''.join([docdict_discrete['longsummary'], docdict_discrete['allmethods'], docdict_discrete['callparams'], docdict_discrete['frozennote']]) docdict_discrete['before_notes'] = _doc_default_before_notes _doc_default_disc = ''.join([docdict_discrete['longsummary'], docdict_discrete['allmethods'], docdict_discrete['frozennote'], docdict_discrete['example']]) docdict_discrete['default'] = _doc_default_disc # clean up all the separate docstring elements, we do not need them anymore for obj in [s for s in dir() if s.startswith('_doc_')]: exec('del ' + obj) del obj try: del s except NameError: # in Python 3, loop variables are not visible after the loop pass def _moment(data, n, mu=None): if mu is None: mu = data.mean() return ((data - mu)**n).mean() def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args): if (n==0): return 1.0 elif (n==1): if mu is None: val = moment_func(1,*args) else: val = mu elif (n==2): if mu2 is None or mu is None: val = moment_func(2,*args) else: val = mu2 + mu*mu elif (n==3): if g1 is None or mu2 is None or mu is None: val = moment_func(3,*args) else: mu3 = g1*(mu2**1.5) # 3rd central moment val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment elif (n==4): if g1 is None or g2 is None or mu2 is None or mu is None: val = moment_func(4,*args) else: mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment mu3 = g1*(mu2**1.5) # 3rd central moment val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4 else: val = moment_func(n, *args) return val def _skew(data): """ skew is third central moment / variance**(1.5) """ data = np.ravel(data) mu = data.mean() m2 = ((data - mu)**2).mean() m3 = ((data - mu)**3).mean() return m3 / m2**1.5 def _kurtosis(data): """ kurtosis is fourth central moment / variance**2 - 3 """ data = np.ravel(data) mu = data.mean() m2 = ((data - mu)**2).mean() m4 = ((data - mu)**4).mean() return m4 / m2**2 - 3 # Frozen RV class class rv_frozen(object): def __init__(self, dist, *args, **kwds): self.args = args self.kwds = kwds self.dist = dist def pdf(self, x): #raises AttributeError in frozen discrete distribution return self.dist.pdf(x, *self.args, **self.kwds) def logpdf(self, x): return self.dist.logpdf(x, *self.args, **self.kwds) def cdf(self, x): return self.dist.cdf(x, *self.args, **self.kwds) def logcdf(self, x): return self.dist.logcdf(x, *self.args, **self.kwds) def ppf(self, q): return self.dist.ppf(q, *self.args, **self.kwds) def isf(self, q): return self.dist.isf(q, *self.args, **self.kwds) def rvs(self, size=None): kwds = self.kwds.copy() kwds.update({'size':size}) return self.dist.rvs(*self.args, **kwds) def sf(self, x): return self.dist.sf(x, *self.args, **self.kwds) def logsf(self, x): return self.dist.logsf(x, *self.args, **self.kwds) def stats(self, moments='mv'): kwds = self.kwds.copy() kwds.update({'moments':moments}) return self.dist.stats(*self.args, **kwds) def median(self): return self.dist.median(*self.args, **self.kwds) def mean(self): return self.dist.mean(*self.args, **self.kwds) def var(self): return self.dist.var(*self.args, **self.kwds) def std(self): return self.dist.std(*self.args, **self.kwds) def moment(self, n): return self.dist.moment(n, *self.args, **self.kwds) def entropy(self): return self.dist.entropy(*self.args, **self.kwds) def pmf(self,k): return self.dist.pmf(k, *self.args, **self.kwds) def logpmf(self,k): return self.dist.logpmf(k, *self.args, **self.kwds) def interval(self, alpha): return self.dist.interval(alpha, *self.args, **self.kwds) def valarray(shape,value=nan,typecode=None): """Return an array of all value. """ out = reshape(repeat([value],product(shape,axis=0),axis=0),shape) if typecode is not None: out = out.astype(typecode) if not isinstance(out, ndarray): out = asarray(out) return out # This should be rewritten def argsreduce(cond, *args): """Return the sequence of ravel(args[i]) where ravel(condition) is True in 1D. Examples -------- >>> import numpy as np >>> rand = np.random.random_sample >>> A = rand((4,5)) >>> B = 2 >>> C = rand((1,5)) >>> cond = np.ones(A.shape) >>> [A1,B1,C1] = argsreduce(cond,A,B,C) >>> B1.shape (20,) >>> cond[2,:] = 0 >>> [A2,B2,C2] = argsreduce(cond,A,B,C) >>> B2.shape (15,) """ newargs = atleast_1d(*args) if not isinstance(newargs, list): newargs = [newargs,] expand_arr = (cond==cond) return [extract(cond, arr1 * expand_arr) for arr1 in newargs] class rv_generic(object): """Class which encapsulates common functionality between rv_discrete and rv_continuous. """ def _fix_loc_scale(self, args, loc, scale=1): N = len(args) if N > self.numargs: if N == self.numargs + 1 and loc is None: # loc is given without keyword loc = args[-1] if N == self.numargs + 2 and scale is None: # loc and scale given without keyword loc, scale = args[-2:] args = args[:self.numargs] if scale is None: scale = 1.0 if loc is None: loc = 0.0 return args, loc, scale def _fix_loc(self, args, loc): args, loc, scale = self._fix_loc_scale(args, loc) return args, loc # These are actually called, and should not be overwritten if you # want to keep error checking. def rvs(self,*args,**kwds): """ Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) size : int or tuple of ints, optional defining number of random variates (default=1) Returns ------- rvs : array_like random variates of given `size` """ kwd_names = ['loc', 'scale', 'size', 'discrete'] loc, scale, size, discrete = map(kwds.get, kwd_names, [None]*len(kwd_names)) args, loc, scale = self._fix_loc_scale(args, loc, scale) cond = logical_and(self._argcheck(*args),(scale >= 0)) if not all(cond): raise ValueError("Domain error in arguments.") # self._size is total size of all output values self._size = product(size, axis=0) if self._size is not None and self._size > 1: size = numpy.array(size, ndmin=1) if np.all(scale == 0): return loc*ones(size, 'd') vals = self._rvs(*args) if self._size is not None: vals = reshape(vals, size) vals = vals * scale + loc # Cast to int if discrete if discrete: if numpy.isscalar(vals): vals = int(vals) else: vals = vals.astype(int) return vals def median(self, *args, **kwds): """ Median of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- median : float the median of the distribution. See Also -------- self.ppf --- inverse of the CDF """ return self.ppf(0.5, *args, **kwds) def mean(self, *args, **kwds): """ Mean of the distribution Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- mean : float the mean of the distribution """ kwds['moments'] = 'm' res = self.stats(*args, **kwds) if isinstance(res, ndarray) and res.ndim == 0: return res[()] return res def var(self, *args, **kwds): """ Variance of the distribution Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- var : float the variance of the distribution """ kwds['moments'] = 'v' res = self.stats(*args, **kwds) if isinstance(res, ndarray) and res.ndim == 0: return res[()] return res def std(self, *args, **kwds): """ Standard deviation of the distribution. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- std : float standard deviation of the distribution """ kwds['moments'] = 'v' res = sqrt(self.stats(*args, **kwds)) return res def interval(self, alpha, *args, **kwds): """Confidence interval with equal areas around the median Parameters ---------- alpha : array_like float in [0,1] Probability that an rv will be drawn from the returned range arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default = 0) scale : array_like, optional scale paramter (default = 1) Returns ------- a, b : array_like (float) end-points of range that contain alpha % of the rvs """ alpha = asarray(alpha) if any((alpha > 1) | (alpha < 0)): raise ValueError("alpha must be between 0 and 1 inclusive") q1 = (1.0-alpha)/2 q2 = (1.0+alpha)/2 a = self.ppf(q1, *args, **kwds) b = self.ppf(q2, *args, **kwds) return a, b ## continuous random variables: implement maybe later ## ## hf --- Hazard Function (PDF / SF) ## chf --- Cumulative hazard function (-log(SF)) ## psf --- Probability sparsity function (reciprocal of the pdf) in ## units of percent-point-function (as a function of q). ## Also, the derivative of the percent-point function. class rv_continuous(rv_generic): """ A generic continuous random variable class meant for subclassing. `rv_continuous` is a base class to construct specific distribution classes and instances from for continuous random variables. It cannot be used directly as a distribution. Parameters ---------- momtype : int, optional The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf. a : float, optional Lower bound of the support of the distribution, default is minus infinity. b : float, optional Upper bound of the support of the distribution, default is plus infinity. xa : float, optional DEPRECATED xb : float, optional DEPRECATED xtol : float, optional The tolerance for fixed point calculation for generic ppf. badvalue : object, optional The value in a result arrays that indicates a value that for which some argument restriction is violated, default is np.nan. name : str, optional The name of the instance. This string is used to construct the default example for distributions. longname : str, optional This string is used as part of the first line of the docstring returned when a subclass has no docstring of its own. Note: `longname` exists for backwards compatibility, do not use for new subclasses. shapes : str, optional The shape of the distribution. For example ``"m, n"`` for a distribution that takes two integers as the two shape arguments for all its methods. extradoc : str, optional, deprecated This string is used as the last part of the docstring returned when a subclass has no docstring of its own. Note: `extradoc` exists for backwards compatibility, do not use for new subclasses. Methods ------- rvs(<shape(s)>, loc=0, scale=1, size=1) random variates pdf(x, <shape(s)>, loc=0, scale=1) probability density function logpdf(x, <shape(s)>, loc=0, scale=1) log of the probability density function cdf(x, <shape(s)>, loc=0, scale=1) cumulative density function logcdf(x, <shape(s)>, loc=0, scale=1) log of the cumulative density function sf(x, <shape(s)>, loc=0, scale=1) survival function (1-cdf --- sometimes more accurate) logsf(x, <shape(s)>, loc=0, scale=1) log of the survival function ppf(q, <shape(s)>, loc=0, scale=1) percent point function (inverse of cdf --- quantiles) isf(q, <shape(s)>, loc=0, scale=1) inverse survival function (inverse of sf) moment(n, <shape(s)>, loc=0, scale=1) non-central n-th moment of the distribution. May not work for array arguments. stats(<shape(s)>, loc=0, scale=1, moments='mv') mean('m'), variance('v'), skew('s'), and/or kurtosis('k') entropy(<shape(s)>, loc=0, scale=1) (differential) entropy of the RV. fit(data, <shape(s)>, loc=0, scale=1) Parameter estimates for generic data expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds) Expected value of a function with respect to the distribution. Additional kwd arguments passed to integrate.quad median(<shape(s)>, loc=0, scale=1) Median of the distribution. mean(<shape(s)>, loc=0, scale=1) Mean of the distribution. std(<shape(s)>, loc=0, scale=1) Standard deviation of the distribution. var(<shape(s)>, loc=0, scale=1) Variance of the distribution. interval(alpha, <shape(s)>, loc=0, scale=1) Interval that with `alpha` percent probability contains a random realization of this distribution. __call__(<shape(s)>, loc=0, scale=1) Calling a distribution instance creates a frozen RV object with the same methods but holding the given shape, location, and scale fixed. See Notes section. **Parameters for Methods** x : array_like quantiles q : array_like lower or upper tail probability <shape(s)> : array_like shape parameters loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) size : int or tuple of ints, optional shape of random variates (default computed from input arguments ) moments : string, optional composed of letters ['mvsk'] specifying which moments to compute where 'm' = mean, 'v' = variance, 's' = (Fisher's) skew and 'k' = (Fisher's) kurtosis. (default='mv') n : int order of moment to calculate in method moments Notes ----- **Methods that can be overwritten by subclasses** :: _rvs _pdf _cdf _sf _ppf _isf _stats _munp _entropy _argcheck There are additional (internal and private) generic methods that can be useful for cross-checking and for debugging, but might work in all cases when directly called. **Frozen Distribution** Alternatively, the object may be called (as a function) to fix the shape, location, and scale parameters returning a "frozen" continuous RV object: rv = generic(<shape(s)>, loc=0, scale=1) frozen RV object with the same methods but holding the given shape, location, and scale fixed **Subclassing** New random variables can be defined by subclassing rv_continuous class and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized to location 0 and scale 1) which will be given clean arguments (in between a and b) and passing the argument check method. If positive argument checking is not correct for your RV then you will also need to re-define the ``_argcheck`` method. Correct, but potentially slow defaults exist for the remaining methods but for speed and/or accuracy you can over-ride:: _logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could. Statistics are computed using numerical integration by default. For speed you can redefine this using ``_stats``: - take shape parameters and return mu, mu2, g1, g2 - If you can't compute one of these, return it as None - Can also be defined with a keyword argument ``moments=<str>``, where <str> is a string composed of 'm', 'v', 's', and/or 'k'. Only the components appearing in string should be computed and returned in the order 'm', 'v', 's', or 'k' with missing values returned as None. Alternatively, you can override ``_munp``, which takes n and shape parameters and returns the nth non-central moment of the distribution. Examples -------- To create a new Gaussian distribution, we would do the following:: class gaussian_gen(rv_continuous): "Gaussian distribution" def _pdf: ... ... """ def __init__(self, momtype=1, a=None, b=None, xa=None, xb=None, xtol=1e-14, badvalue=None, name=None, longname=None, shapes=None, extradoc=None): rv_generic.__init__(self) if badvalue is None: badvalue = nan if name is None: name = 'Distribution' self.badvalue = badvalue self.name = name self.a = a self.b = b if a is None: self.a = -inf if b is None: self.b = inf if xa is not None: warnings.warn("The `xa` parameter is deprecated and will be " "removed in scipy 0.12", DeprecationWarning) if xb is not None: warnings.warn("The `xb` parameter is deprecated and will be " "removed in scipy 0.12", DeprecationWarning) self.xa = xa self.xb = xb self.xtol = xtol self._size = 1 self.m = 0.0 self.moment_type = momtype self.expandarr = 1 if not hasattr(self,'numargs'): #allows more general subclassing with *args cdf_signature = inspect.getargspec(self._cdf.im_func) numargs1 = len(cdf_signature[0]) - 2 pdf_signature = inspect.getargspec(self._pdf.im_func) numargs2 = len(pdf_signature[0]) - 2 self.numargs = max(numargs1, numargs2) #nin correction self.vecfunc = sgf(self._ppf_single_call,otypes='d') self.vecfunc.nin = self.numargs + 1 self.vecentropy = sgf(self._entropy,otypes='d') self.vecentropy.nin = self.numargs + 1 self.veccdf = sgf(self._cdf_single_call,otypes='d') self.veccdf.nin = self.numargs + 1 self.shapes = shapes self.extradoc = extradoc if momtype == 0: self.generic_moment = sgf(self._mom0_sc,otypes='d') else: self.generic_moment = sgf(self._mom1_sc,otypes='d') self.generic_moment.nin = self.numargs+1 # Because of the *args argument # of _mom0_sc, vectorize cannot count the number of arguments correctly. if longname is None: if name[0] in ['aeiouAEIOU']: hstr = "An " else: hstr = "A " longname = hstr + name # generate docstring for subclass instances if self.__doc__ is None: self._construct_default_doc(longname=longname, extradoc=extradoc) else: self._construct_doc() ## This only works for old-style classes... # self.__class__.__doc__ = self.__doc__ def _construct_default_doc(self, longname=None, extradoc=None): """Construct instance docstring from the default template.""" if longname is None: longname = 'A' if extradoc is None: extradoc = '' if extradoc.startswith('\n\n'): extradoc = extradoc[2:] self.__doc__ = ''.join(['%s continuous random variable.'%longname, '\n\n%(before_notes)s\n', docheaders['notes'], extradoc, '\n%(example)s']) self._construct_doc() def _construct_doc(self): """Construct the instance docstring with string substitutions.""" tempdict = docdict.copy() tempdict['name'] = self.name or 'distname' tempdict['shapes'] = self.shapes or '' if self.shapes is None: # remove shapes from call parameters if there are none for item in ['callparams', 'default', 'before_notes']: tempdict[item] = tempdict[item].replace(\ "\n%(shapes)s : array_like\n shape parameters", "") for i in range(2): if self.shapes is None: # necessary because we use %(shapes)s in two forms (w w/o ", ") self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") self.__doc__ = doccer.docformat(self.__doc__, tempdict) def _ppf_to_solve(self, x, q,*args): return apply(self.cdf, (x, )+args)-q def _ppf_single_call(self, q, *args): left = right = None if self.a > -np.inf: left = self.a if self.b < np.inf: right = self.b factor = 10. if not left: # i.e. self.a = -inf left = -1.*factor while self._ppf_to_solve(left, q,*args) > 0.: right = left left *= factor # left is now such that cdf(left) < q if not right: # i.e. self.b = inf right = factor while self._ppf_to_solve(right, q,*args) < 0.: left = right right *= factor # right is now such that cdf(right) > q return optimize.brentq(self._ppf_to_solve, \ left, right, args=(q,)+args, xtol=self.xtol) # moment from definition def _mom_integ0(self, x,m,*args): return x**m * self.pdf(x,*args) def _mom0_sc(self, m,*args): return integrate.quad(self._mom_integ0, self.a, self.b, args=(m,)+args)[0] # moment calculated using ppf def _mom_integ1(self, q,m,*args): return (self.ppf(q,*args))**m def _mom1_sc(self, m,*args): return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0] ## These are the methods you must define (standard form functions) def _argcheck(self, *args): # Default check for correct values on args and keywords. # Returns condition array of 1's where arguments are correct and # 0's where they are not. cond = 1 for arg in args: cond = logical_and(cond,(asarray(arg) > 0)) return cond def _pdf(self,x,*args): return derivative(self._cdf,x,dx=1e-5,args=args,order=5) ## Could also define any of these def _logpdf(self, x, *args): return log(self._pdf(x, *args)) ##(return 1-d using self._size to get number) def _rvs(self, *args): ## Use basic inverse cdf algorithm for RV generation as default. U = mtrand.sample(self._size) Y = self._ppf(U,*args) return Y def _cdf_single_call(self, x, *args): return integrate.quad(self._pdf, self.a, x, args=args)[0] def _cdf(self, x, *args): return self.veccdf(x,*args) def _logcdf(self, x, *args): return log(self._cdf(x, *args)) def _sf(self, x, *args): return 1.0-self._cdf(x,*args) def _logsf(self, x, *args): return log(self._sf(x, *args)) def _ppf(self, q, *args): return self.vecfunc(q,*args) def _isf(self, q, *args): return self._ppf(1.0-q,*args) #use correct _ppf for subclasses # The actual cacluation functions (no basic checking need be done) # If these are defined, the others won't be looked at. # Otherwise, the other set can be defined. def _stats(self,*args, **kwds): return None, None, None, None # Central moments def _munp(self,n,*args): return self.generic_moment(n,*args) def pdf(self,x,*args,**kwds): """ Probability density function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- pdf : ndarray Probability density function evaluated at x """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) x,loc,scale = map(asarray,(x,loc,scale)) args = tuple(map(asarray,args)) x = asarray((x-loc)*1.0/scale) cond0 = self._argcheck(*args) & (scale > 0) cond1 = (scale > 0) & (x >= self.a) & (x <= self.b) cond = cond0 & cond1 output = zeros(shape(cond),'d') putmask(output,(1-cond0)+np.isnan(x),self.badvalue) if any(cond): goodargs = argsreduce(cond, *((x,)+args+(scale,))) scale, goodargs = goodargs[-1], goodargs[:-1] place(output,cond,self._pdf(*goodargs) / scale) if output.ndim == 0: return output[()] return output def logpdf(self, x, *args, **kwds): """ Log of the probability density function at x of the given RV. This uses a more numerically accurate calculation if available. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logpdf : array_like Log of the probability density function evaluated at x """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) x,loc,scale = map(asarray,(x,loc,scale)) args = tuple(map(asarray,args)) x = asarray((x-loc)*1.0/scale) cond0 = self._argcheck(*args) & (scale > 0) cond1 = (scale > 0) & (x >= self.a) & (x <= self.b) cond = cond0 & cond1 output = empty(shape(cond),'d') output.fill(NINF) putmask(output,(1-cond0)+np.isnan(x),self.badvalue) if any(cond): goodargs = argsreduce(cond, *((x,)+args+(scale,))) scale, goodargs = goodargs[-1], goodargs[:-1] place(output,cond,self._logpdf(*goodargs) - log(scale)) if output.ndim == 0: return output[()] return output def cdf(self,x,*args,**kwds): """ Cumulative distribution function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- cdf : array_like Cumulative distribution function evaluated at x """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) x,loc,scale = map(asarray,(x,loc,scale)) args = tuple(map(asarray,args)) x = (x-loc)*1.0/scale cond0 = self._argcheck(*args) & (scale > 0) cond1 = (scale > 0) & (x > self.a) & (x < self.b) cond2 = (x >= self.b) & cond0 cond = cond0 & cond1 output = zeros(shape(cond),'d') place(output,(1-cond0)+np.isnan(x),self.badvalue) place(output,cond2,1.0) if any(cond): #call only if at least 1 entry goodargs = argsreduce(cond, *((x,)+args)) place(output,cond,self._cdf(*goodargs)) if output.ndim == 0: return output[()] return output def logcdf(self,x,*args,**kwds): """ Log of the cumulative distribution function at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at x """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) x,loc,scale = map(asarray,(x,loc,scale)) args = tuple(map(asarray,args)) x = (x-loc)*1.0/scale cond0 = self._argcheck(*args) & (scale > 0) cond1 = (scale > 0) & (x > self.a) & (x < self.b) cond2 = (x >= self.b) & cond0 cond = cond0 & cond1 output = empty(shape(cond),'d') output.fill(NINF) place(output,(1-cond0)*(cond1==cond1)+np.isnan(x),self.badvalue) place(output,cond2,0.0) if any(cond): #call only if at least 1 entry goodargs = argsreduce(cond, *((x,)+args)) place(output,cond,self._logcdf(*goodargs)) if output.ndim == 0: return output[()] return output def sf(self,x,*args,**kwds): """ Survival function (1-cdf) at x of the given RV. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- sf : array_like Survival function evaluated at x """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) x,loc,scale = map(asarray,(x,loc,scale)) args = tuple(map(asarray,args)) x = (x-loc)*1.0/scale cond0 = self._argcheck(*args) & (scale > 0) cond1 = (scale > 0) & (x > self.a) & (x < self.b) cond2 = cond0 & (x <= self.a) cond = cond0 & cond1 output = zeros(shape(cond),'d') place(output,(1-cond0)+np.isnan(x),self.badvalue) place(output,cond2,1.0) if any(cond): goodargs = argsreduce(cond, *((x,)+args)) place(output,cond,self._sf(*goodargs)) if output.ndim == 0: return output[()] return output def logsf(self,x,*args,**kwds): """ Log of the survival function of the given RV. Returns the log of the "survival function," defined as (1 - `cdf`), evaluated at `x`. Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logsf : ndarray Log of the survival function evaluated at `x`. """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) x,loc,scale = map(asarray,(x,loc,scale)) args = tuple(map(asarray,args)) x = (x-loc)*1.0/scale cond0 = self._argcheck(*args) & (scale > 0) cond1 = (scale > 0) & (x > self.a) & (x < self.b) cond2 = cond0 & (x <= self.a) cond = cond0 & cond1 output = empty(shape(cond),'d') output.fill(NINF) place(output,(1-cond0)+np.isnan(x),self.badvalue) place(output,cond2,0.0) if any(cond): goodargs = argsreduce(cond, *((x,)+args)) place(output,cond,self._logsf(*goodargs)) if output.ndim == 0: return output[()] return output def ppf(self,q,*args,**kwds): """ Percent point function (inverse of cdf) at q of the given RV. Parameters ---------- q : array_like lower tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : array_like quantile corresponding to the lower tail probability q. """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) q,loc,scale = map(asarray,(q,loc,scale)) args = tuple(map(asarray,args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond),value=self.a*scale + loc) place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue) place(output,cond2,self.b*scale + loc) if any(cond): #call only if at least 1 entry goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] place(output,cond,self._ppf(*goodargs)*scale + loc) if output.ndim == 0: return output[()] return output def isf(self,q,*args,**kwds): """ Inverse survival function at q of the given RV. Parameters ---------- q : array_like upper tail probability arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- x : array_like quantile corresponding to the upper tail probability q. """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) q,loc,scale = map(asarray,(q,loc,scale)) args = tuple(map(asarray,args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond),value=self.b) #place(output,(1-cond0)*(cond1==cond1), self.badvalue) place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue) place(output,cond2,self.a) if any(cond): #call only if at least 1 entry goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf if output.ndim == 0: return output[()] return output def stats(self,*args,**kwds): """ Some statistics of the given RV Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) moments : string, optional composed of letters ['mvsk'] defining which moments to compute: 'm' = mean, 'v' = variance, 's' = (Fisher's) skew, 'k' = (Fisher's) kurtosis. (default='mv') Returns ------- stats : sequence of requested moments. """ loc,scale,moments=map(kwds.get,['loc','scale','moments']) N = len(args) if N > self.numargs: if N == self.numargs + 1 and loc is None: # loc is given without keyword loc = args[-1] if N == self.numargs + 2 and scale is None: # loc and scale given without keyword loc, scale = args[-2:] if N == self.numargs + 3 and moments is None: # loc, scale, and moments loc, scale, moments = args[-3:] args = args[:self.numargs] if scale is None: scale = 1.0 if loc is None: loc = 0.0 if moments is None: moments = 'mv' loc,scale = map(asarray,(loc,scale)) args = tuple(map(asarray,args)) cond = self._argcheck(*args) & (scale > 0) & (loc==loc) signature = inspect.getargspec(self._stats.im_func) if (signature[2] is not None) or ('moments' in signature[0]): mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments}) else: mu, mu2, g1, g2 = self._stats(*args) if g1 is None: mu3 = None else: mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf default = valarray(shape(cond), self.badvalue) output = [] # Use only entries that are valid in calculation if any(cond): goodargs = argsreduce(cond, *(args+(scale,loc))) scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2] if 'm' in moments: if mu is None: mu = self._munp(1.0,*goodargs) out0 = default.copy() place(out0,cond,mu*scale+loc) output.append(out0) if 'v' in moments: if mu2 is None: mu2p = self._munp(2.0,*goodargs) if mu is None: mu = self._munp(1.0,*goodargs) mu2 = mu2p - mu*mu if np.isinf(mu): #if mean is inf then var is also inf mu2 = np.inf out0 = default.copy() place(out0,cond,mu2*scale*scale) output.append(out0) if 's' in moments: if g1 is None: mu3p = self._munp(3.0,*goodargs) if mu is None: mu = self._munp(1.0,*goodargs) if mu2 is None: mu2p = self._munp(2.0,*goodargs) mu2 = mu2p - mu*mu mu3 = mu3p - 3*mu*mu2 - mu**3 g1 = mu3 / mu2**1.5 out0 = default.copy() place(out0,cond,g1) output.append(out0) if 'k' in moments: if g2 is None: mu4p = self._munp(4.0,*goodargs) if mu is None: mu = self._munp(1.0,*goodargs) if mu2 is None: mu2p = self._munp(2.0,*goodargs) mu2 = mu2p - mu*mu if mu3 is None: mu3p = self._munp(3.0,*goodargs) mu3 = mu3p - 3*mu*mu2 - mu**3 mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4 g2 = mu4 / mu2**2.0 - 3.0 out0 = default.copy() place(out0,cond,g2) output.append(out0) else: #no valid args output = [] for _ in moments: out0 = default.copy() output.append(out0) if len(output) == 1: return output[0] else: return tuple(output) def moment(self, n, *args, **kwds): """ n'th order non-central moment of distribution. Parameters ---------- n : int, n>=1 Order of moment. arg1, arg2, arg3,... : float The shape parameter(s) for the distribution (see docstring of the instance object for more information). kwds : keyword arguments, optional These can include "loc" and "scale", as well as other keyword arguments relevant for a given distribution. """ loc = kwds.get('loc', 0) scale = kwds.get('scale', 1) if not (self._argcheck(*args) and (scale > 0)): return nan if (floor(n) != n): raise ValueError("Moment must be an integer.") if (n < 0): raise ValueError("Moment must be positive.") mu, mu2, g1, g2 = None, None, None, None if (n > 0) and (n < 5): signature = inspect.getargspec(self._stats.im_func) if (signature[2] is not None) or ('moments' in signature[0]): mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} else: mdict = {} mu, mu2, g1, g2 = self._stats(*args,**mdict) val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) # Convert to transformed X = L + S*Y # so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n) if loc == 0: return scale**n * val else: result = 0 fac = float(scale) / float(loc) for k in range(n): valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args) result += comb(n,k,exact=True)*(fac**k) * valk result += fac**n * val return result * loc**n def _nnlf(self, x, *args): return -sum(self._logpdf(x, *args),axis=0) def nnlf(self, theta, x): # - sum (log pdf(x, theta),axis=0) # where theta are the parameters (including loc and scale) # try: loc = theta[-2] scale = theta[-1] args = tuple(theta[:-2]) except IndexError: raise ValueError("Not enough input arguments.") if not self._argcheck(*args) or scale <= 0: return inf x = asarray((x-loc) / scale) cond0 = (x <= self.a) | (x >= self.b) if (any(cond0)): return inf else: N = len(x) return self._nnlf(x, *args) + N*log(scale) # return starting point for fit (shape arguments + loc + scale) def _fitstart(self, data, args=None): if args is None: args = (1.0,)*self.numargs return args + self.fit_loc_scale(data, *args) # Return the (possibly reduced) function to optimize in order to find MLE # estimates for the .fit method def _reduce_func(self, args, kwds): args = list(args) Nargs = len(args) fixedn = [] index = range(Nargs) names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] x0 = [] for n, key in zip(index, names): if kwds.has_key(key): fixedn.append(n) args[n] = kwds[key] else: x0.append(args[n]) if len(fixedn) == 0: func = self.nnlf restore = None else: if len(fixedn) == len(index): raise ValueError("All parameters fixed. There is nothing to optimize.") def restore(args, theta): # Replace with theta for all numbers not in fixedn # This allows the non-fixed values to vary, but # we still call self.nnlf with all parameters. i = 0 for n in range(Nargs): if n not in fixedn: args[n] = theta[i] i += 1 return args def func(theta, x): newtheta = restore(args[:], theta) return self.nnlf(newtheta, x) return x0, func, restore, args def fit(self, data, *args, **kwds): """ Return MLEs for shape, location, and scale parameters from data. MLE stands for Maximum Likelihood Estimate. Starting estimates for the fit are given by input arguments; for any arguments not provided with starting estimates, ``self._fitstart(data)`` is called to generate such. One can hold some parameters fixed to specific values by passing in keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters) and ``floc`` and ``fscale`` (for location and scale parameters, respectively). Parameters ---------- data : array_like Data to use in calculating the MLEs. args : floats, optional Starting value(s) for any shape-characterizing arguments (those not provided will be determined by a call to ``_fitstart(data)``). No default value. kwds : floats, optional Starting values for the location and scale parameters; no default. Special keyword arguments are recognized as holding certain parameters fixed: f0...fn : hold respective shape parameters fixed. floc : hold location parameter fixed to specified value. fscale : hold scale parameter fixed to specified value. optimizer : The optimizer to use. The optimizer must take func, and starting position as the first two arguments, plus args (for extra arguments to pass to the function to be optimized) and disp=0 to suppress output as keyword arguments. Returns ------- shape, loc, scale : tuple of floats MLEs for any shape statistics, followed by those for location and scale. """ Narg = len(args) if Narg > self.numargs: raise ValueError("Too many input arguments.") start = [None]*2 if (Narg < self.numargs) or not (kwds.has_key('loc') and kwds.has_key('scale')): start = self._fitstart(data) # get distribution specific starting locations args += start[Narg:-2] loc = kwds.get('loc', start[-2]) scale = kwds.get('scale', start[-1]) args += (loc, scale) x0, func, restore, args = self._reduce_func(args, kwds) optimizer = kwds.get('optimizer', optimize.fmin) # convert string to function in scipy.optimize if not callable(optimizer) and isinstance(optimizer, (str, unicode)): if not optimizer.startswith('fmin_'): optimizer = "fmin_"+optimizer if optimizer == 'fmin_': optimizer = 'fmin' try: optimizer = getattr(optimize, optimizer) except AttributeError: raise ValueError("%s is not a valid optimizer" % optimizer) vals = optimizer(func,x0,args=(ravel(data),),disp=0) if restore is not None: vals = restore(args, vals) vals = tuple(vals) return vals def fit_loc_scale(self, data, *args): """ Estimate loc and scale parameters from data using 1st and 2nd moments. Parameters ---------- data : array_like Data to fit. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). Returns ------- Lhat : float Estimated location parameter for the data. Shat : float Estimated scale parameter for the data. """ mu, mu2 = self.stats(*args,**{'moments':'mv'}) tmp = asarray(data) muhat = tmp.mean() mu2hat = tmp.var() Shat = sqrt(mu2hat / mu2) Lhat = muhat - Shat*mu return Lhat, Shat @np.deprecate def est_loc_scale(self, data, *args): """This function is deprecated, use self.fit_loc_scale(data) instead.""" return self.fit_loc_scale(data, *args) def freeze(self,*args,**kwds): """Freeze the distribution for the given arguments. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution. Should include all the non-optional arguments, may include ``loc`` and ``scale``. Returns ------- rv_frozen : rv_frozen instance The frozen distribution. """ return rv_frozen(self,*args,**kwds) def __call__(self, *args, **kwds): return self.freeze(*args, **kwds) def _entropy(self, *args): def integ(x): val = self._pdf(x, *args) return val*log(val) entr = -integrate.quad(integ,self.a,self.b)[0] if not np.isnan(entr): return entr else: # try with different limits if integration problems low,upp = self.ppf([0.001,0.999],*args) if np.isinf(self.b): upper = upp else: upper = self.b if np.isinf(self.a): lower = low else: lower = self.a return -integrate.quad(integ,lower,upper)[0] def entropy(self, *args, **kwds): """ Differential entropy of the RV. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional Scale parameter (default=1). """ loc,scale=map(kwds.get,['loc','scale']) args, loc, scale = self._fix_loc_scale(args, loc, scale) args = tuple(map(asarray,args)) cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc) output = zeros(shape(cond0),'d') place(output,(1-cond0),self.badvalue) goodargs = argsreduce(cond0, *args) #I don't know when or why vecentropy got broken when numargs == 0 if self.numargs == 0: place(output,cond0,self._entropy()+log(scale)) else: place(output,cond0,self.vecentropy(*goodargs)+log(scale)) return output def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds): """Calculate expected value of a function with respect to the distribution Location and scale only tested on a few examples. Parameters ---------- func : callable, optional Function for which integral is calculated. Takes only one argument. The default is the identity mapping f(x) = x. args : tuple, optional Argument (parameters) of the distribution. lb, ub : scalar, optional Lower and upper bound for integration. default is set to the support of the distribution. conditional : bool, optional If True, the integral is corrected by the conditional probability of the integration interval. The return value is the expectation of the function, conditional on being in the given interval. Default is False. Additional keyword arguments are passed to the integration routine. Returns ------- expected value : float Notes ----- This function has not been checked for it's behavior when the integral is not finite. The integration behavior is inherited from integrate.quad. """ lockwds = {'loc': loc, 'scale':scale} if func is None: def fun(x, *args): return x*self.pdf(x, *args, **lockwds) else: def fun(x, *args): return func(x)*self.pdf(x, *args, **lockwds) if lb is None: lb = loc + self.a * scale if ub is None: ub = loc + self.b * scale if conditional: invfac = (self.sf(lb, *args, **lockwds) - self.sf(ub, *args, **lockwds)) else: invfac = 1.0 kwds['args'] = args return integrate.quad(fun, lb, ub, **kwds)[0] / invfac _EULER = 0.577215664901532860606512090082402431042 # -special.psi(1) _ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant ## Kolmogorov-Smirnov one-sided and two-sided test statistics class ksone_gen(rv_continuous): """General Kolmogorov-Smirnov one-sided test. %(default)s """ def _cdf(self,x,n): return 1.0-special.smirnov(n,x) def _ppf(self,q,n): return special.smirnovi(n,1.0-q) ksone = ksone_gen(a=0.0, name='ksone', shapes="n") class kstwobign_gen(rv_continuous): """Kolmogorov-Smirnov two-sided test for large N. %(default)s """ def _cdf(self,x): return 1.0-special.kolmogorov(x) def _sf(self,x): return special.kolmogorov(x) def _ppf(self,q): return special.kolmogi(1.0-q) kstwobign = kstwobign_gen(a=0.0, name='kstwobign') ## Normal distribution # loc = mu, scale = std # Keep these implementations out of the class definition so they can be reused # by other distributions. _norm_pdf_C = math.sqrt(2*pi) _norm_pdf_logC = math.log(_norm_pdf_C) def _norm_pdf(x): return exp(-x**2/2.0) / _norm_pdf_C def _norm_logpdf(x): return -x**2 / 2.0 - _norm_pdf_logC def _norm_cdf(x): return special.ndtr(x) def _norm_logcdf(x): return special.log_ndtr(x) def _norm_ppf(q): return special.ndtri(q) class norm_gen(rv_continuous): """A normal continuous random variable. The location (loc) keyword specifies the mean. The scale (scale) keyword specifies the standard deviation. %(before_notes)s Notes ----- The probability density function for `norm` is:: norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi) %(example)s """ def _rvs(self): return mtrand.standard_normal(self._size) def _pdf(self,x): return _norm_pdf(x) def _logpdf(self, x): return _norm_logpdf(x) def _cdf(self,x): return _norm_cdf(x) def _logcdf(self, x): return _norm_logcdf(x) def _sf(self, x): return _norm_cdf(-x) def _logsf(self, x): return _norm_logcdf(-x) def _ppf(self,q): return _norm_ppf(q) def _isf(self,q): return -_norm_ppf(q) def _stats(self): return 0.0, 1.0, 0.0, 0.0 def _entropy(self): return 0.5*(log(2*pi)+1) norm = norm_gen(name='norm') ## Alpha distribution ## class alpha_gen(rv_continuous): """An alpha continuous random variable. %(before_notes)s Notes ----- The probability density function for `alpha` is:: alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2), where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``. %(example)s """ def _pdf(self, x, a): return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x) def _logpdf(self, x, a): return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a)) def _cdf(self, x, a): return special.ndtr(a-1.0/x) / special.ndtr(a) def _ppf(self, q, a): return 1.0/asarray(a-special.ndtri(q*special.ndtr(a))) def _stats(self, a): return [inf]*2 + [nan]*2 alpha = alpha_gen(a=0.0, name='alpha', shapes='a') ## Anglit distribution ## class anglit_gen(rv_continuous): """An anglit continuous random variable. %(before_notes)s Notes ----- The probability density function for `anglit` is:: anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x), for ``-pi/4 <= x <= pi/4``. %(example)s """ def _pdf(self, x): return cos(2*x) def _cdf(self, x): return sin(x+pi/4)**2.0 def _ppf(self, q): return (arcsin(sqrt(q))-pi/4) def _stats(self): return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2 def _entropy(self): return 1-log(2) anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit') ## Arcsine distribution ## class arcsine_gen(rv_continuous): """An arcsine continuous random variable. %(before_notes)s Notes ----- The probability density function for `arcsine` is:: arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x))) for 0 < x < 1. %(example)s """ def _pdf(self, x): return 1.0/pi/sqrt(x*(1-x)) def _cdf(self, x): return 2.0/pi*arcsin(sqrt(x)) def _ppf(self, q): return sin(pi/2.0*q)**2.0 def _stats(self): #mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0 mu = 0.5 mu2 = 1.0/8 g1 = 0 g2 = -3.0/2.0 return mu, mu2, g1, g2 def _entropy(self): return -0.24156447527049044468 arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine') ## Beta distribution ## class beta_gen(rv_continuous): """A beta continuous random variable. %(before_notes)s Notes ----- The probability density function for `beta` is:: beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(b-1), for ``0 < x < 1``, ``a > 0``, ``b > 0``. %(example)s """ def _rvs(self, a, b): return mtrand.beta(a,b,self._size) def _pdf(self, x, a, b): Px = (1.0-x)**(b-1.0) * x**(a-1.0) Px /= special.beta(a,b) return Px def _logpdf(self, x, a, b): lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x) lPx -= log(special.beta(a,b)) return lPx def _cdf(self, x, a, b): return special.btdtr(a,b,x) def _ppf(self, q, a, b): return special.btdtri(a,b,q) def _stats(self, a, b): mn = a *1.0 / (a + b) var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0 g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b) g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b)) g2 /= a*b*(a+b+2)*(a+b+3) return mn, var, g1, g2 def _fitstart(self, data): g1 = _skew(data) g2 = _kurtosis(data) def func(x): a, b = x sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b) ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2) ku /= a*b*(a+b+2)*(a+b+3) ku *= 6 return [sk-g1, ku-g2] a, b = optimize.fsolve(func, (1.0, 1.0)) return super(beta_gen, self)._fitstart(data, args=(a,b)) def fit(self, data, *args, **kwds): floc = kwds.get('floc', None) fscale = kwds.get('fscale', None) if floc is not None and fscale is not None: # special case data = (ravel(data)-floc)/fscale xbar = data.mean() v = data.var(ddof=0) fac = xbar*(1-xbar)/v - 1 a = xbar * fac b = (1-xbar) * fac return a, b, floc, fscale else: # do general fit return super(beta_gen, self).fit(data, *args, **kwds) beta = beta_gen(a=0.0, b=1.0, name='beta', shapes='a, b') ## Beta Prime class betaprime_gen(rv_continuous): """A beta prima continuous random variable. %(before_notes)s Notes ----- The probability density function for `betaprime` is:: betaprime.pdf(x, a, b) = gamma(a+b) / (gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(-a-b) for ``x > 0``, ``a > 0``, ``b > 0``. %(example)s """ def _rvs(self, a, b): u1 = gamma.rvs(a,size=self._size) u2 = gamma.rvs(b,size=self._size) return (u1 / u2) def _pdf(self, x, a, b): return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b) def _logpdf(self, x, a, b): return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b)) def _cdf_skip(self, x, a, b): # remove for now: special.hyp2f1 is incorrect for large a x = where(x==1.0, 1.0-1e-6,x) return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b) def _munp(self, n, a, b): if (n == 1.0): return where(b > 1, a/(b-1.0), inf) elif (n == 2.0): return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf) elif (n == 3.0): return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)), inf) elif (n == 4.0): return where(b > 4, a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \ *(b-2.0)*(b-1.0)), inf) else: raise NotImplementedError betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b') ## Bradford ## class bradford_gen(rv_continuous): """A Bradford continuous random variable. %(before_notes)s Notes ----- The probability density function for `bradford` is:: bradford.pdf(x, c) = c / (k * (1+c*x)), for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``. %(example)s """ def _pdf(self, x, c): return c / (c*x + 1.0) / log(1.0+c) def _cdf(self, x, c): return log(1.0+c*x) / log(c+1.0) def _ppf(self, q, c): return ((1.0+c)**q-1)/c def _stats(self, c, moments='mv'): k = log(1.0+c) mu = (c-k)/(c*k) mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k) g1 = None g2 = None if 's' in moments: g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3)) g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k) if 'k' in moments: g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \ + 6*c*k*k*(3*k-14) + 12*k**3 g2 /= 3*c*(c*(k-2)+2*k)**2 return mu, mu2, g1, g2 def _entropy(self, c): k = log(1+c) return k/2.0 - log(c/k) bradford = bradford_gen(a=0.0, b=1.0, name='bradford', shapes='c') ## Burr # burr with d=1 is called the fisk distribution class burr_gen(rv_continuous): """A Burr continuous random variable. %(before_notes)s Notes ----- The probability density function for `burr` is:: burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1) for ``x > 0``. %(example)s """ def _pdf(self, x, c, d): return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0)) def _cdf(self, x, c, d): return (1+x**(-c*1.0))**(-d**1.0) def _ppf(self, q, c, d): return (q**(-1.0/d)-1)**(-1.0/c) def _stats(self, c, d, moments='mv'): g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d) g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d) gd = gam(d) k = gd*g2c*g2cd - g1c**2 * g1cd**2 mu = g1c*g1cd / gd mu2 = k / gd**2.0 g1, g2 = None, None g3c, g3cd = None, None if 's' in moments: g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d) g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd g1 /= sqrt(k**3) if 'k' in moments: if g3c is None: g3c = gam(1-3.0/c) if g3cd is None: g3cd = gam(3.0/c+d) g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d) g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd return mu, mu2, g1, g2 burr = burr_gen(a=0.0, name='burr', shapes="c, d") # Fisk distribution # burr is a generalization class fisk_gen(burr_gen): """A Fisk continuous random variable. The Fisk distribution is also known as the log-logistic distribution, and equals the Burr distribution with ``d=1``. %(before_notes)s See Also -------- burr %(example)s """ def _pdf(self, x, c): return burr_gen._pdf(self, x, c, 1.0) def _cdf(self, x, c): return burr_gen._cdf(self, x, c, 1.0) def _ppf(self, x, c): return burr_gen._ppf(self, x, c, 1.0) def _stats(self, c): return burr_gen._stats(self, c, 1.0) def _entropy(self, c): return 2 - log(c) fisk = fisk_gen(a=0.0, name='fisk', shapes='c') ## Cauchy # median = loc class cauchy_gen(rv_continuous): """A Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `cauchy` is:: cauchy.pdf(x) = 1 / (pi * (1 + x**2)) %(example)s """ def _pdf(self, x): return 1.0/pi/(1.0+x*x) def _cdf(self, x): return 0.5 + 1.0/pi*arctan(x) def _ppf(self, q): return tan(pi*q-pi/2.0) def _sf(self, x): return 0.5 - 1.0/pi*arctan(x) def _isf(self, q): return tan(pi/2.0-pi*q) def _stats(self): return inf, inf, nan, nan def _entropy(self): return log(4*pi) def _fitstart(data, args=None): return (0, 1) cauchy = cauchy_gen(name='cauchy') ## Chi ## (positive square-root of chi-square) ## chi(1, loc, scale) = halfnormal ## chi(2, 0, scale) = Rayleigh ## chi(3, 0, scale) = MaxWell class chi_gen(rv_continuous): """A chi continuous random variable. %(before_notes)s Notes ----- The probability density function for `chi` is:: chi.pdf(x,df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2)) for ``x > 0``. %(example)s """ def _rvs(self, df): return sqrt(chi2.rvs(df,size=self._size)) def _pdf(self, x, df): return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5) def _cdf(self, x, df): return special.gammainc(df*0.5,0.5*x*x) def _ppf(self, q, df): return sqrt(2*special.gammaincinv(df*0.5,q)) def _stats(self, df): mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0) mu2 = df - mu*mu g1 = (2*mu**3.0 + mu*(1-2*df))/asarray(mu2**1.5) g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1) g2 /= asarray(mu2**2.0) return mu, mu2, g1, g2 chi = chi_gen(a=0.0, name='chi', shapes='df') ## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2) class chi2_gen(rv_continuous): """A chi-squared continuous random variable. %(before_notes)s Notes ----- The probability density function for `chi2` is:: chi2.pdf(x,df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2) %(example)s """ def _rvs(self, df): return mtrand.chisquare(df,self._size) def _pdf(self, x, df): return exp(self._logpdf(x, df)) def _logpdf(self, x, df): #term1 = (df/2.-1)*log(x) #term1[(df==2)*(x==0)] = 0 #avoid 0*log(0)==nan return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2. ## Px = x**(df/2.0-1)*exp(-x/2.0) ## Px /= special.gamma(df/2.0)* 2**(df/2.0) ## return log(Px) def _cdf(self, x, df): return special.chdtr(df, x) def _sf(self, x, df): return special.chdtrc(df, x) def _isf(self, p, df): return special.chdtri(df, p) def _ppf(self, p, df): return self._isf(1.0-p, df) def _stats(self, df): mu = df mu2 = 2*df g1 = 2*sqrt(2.0/df) g2 = 12.0/df return mu, mu2, g1, g2 chi2 = chi2_gen(a=0.0, name='chi2', shapes='df') ## Cosine (Approximation to the Normal) class cosine_gen(rv_continuous): """A cosine continuous random variable. %(before_notes)s Notes ----- The cosine distribution is an approximation to the normal distribution. The probability density function for `cosine` is:: cosine.pdf(x) = 1/(2*pi) * (1+cos(x)) for ``-pi <= x <= pi``. %(example)s """ def _pdf(self, x): return 1.0/2/pi*(1+cos(x)) def _cdf(self, x): return 1.0/2/pi*(pi + x + sin(x)) def _stats(self): return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2) def _entropy(self): return log(4*pi)-1.0 cosine = cosine_gen(a=-pi, b=pi, name='cosine') ## Double Gamma distribution class dgamma_gen(rv_continuous): """A double gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `dgamma` is:: dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x)) for ``a > 0``. %(example)s """ def _rvs(self, a): u = random(size=self._size) return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1)) def _pdf(self, x, a): ax = abs(x) return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax) def _logpdf(self, x, a): ax = abs(x) return (a-1.0)*log(ax) - ax - log(2) - gamln(a) def _cdf(self, x, a): fac = 0.5*special.gammainc(a,abs(x)) return where(x>0,0.5+fac,0.5-fac) def _sf(self, x, a): fac = 0.5*special.gammainc(a,abs(x)) #return where(x>0,0.5-0.5*fac,0.5+0.5*fac) return where(x>0,0.5-fac,0.5+fac) def _ppf(self, q, a): fac = special.gammainccinv(a,1-abs(2*q-1)) return where(q>0.5, fac, -fac) def _stats(self, a): mu2 = a*(a+1.0) return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0 dgamma = dgamma_gen(name='dgamma', shapes='a') ## Double Weibull distribution ## class dweibull_gen(rv_continuous): """A double Weibull continuous random variable. %(before_notes)s Notes ----- The probability density function for `dweibull` is:: dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c) %(example)s """ def _rvs(self, c): u = random(size=self._size) return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1)) def _pdf(self, x, c): ax = abs(x) Px = c/2.0*ax**(c-1.0)*exp(-ax**c) return Px def _logpdf(self, x, c): ax = abs(x) return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c def _cdf(self, x, c): Cx1 = 0.5*exp(-abs(x)**c) return where(x > 0, 1-Cx1, Cx1) def _ppf_skip(self, q, c): fac = where(q<=0.5,2*q,2*q-1) fac = pow(asarray(log(1.0/fac)),1.0/c) return where(q>0.5,fac,-fac) def _stats(self, c): var = gam(1+2.0/c) return 0.0, var, 0.0, gam(1+4.0/c)/var dweibull = dweibull_gen(name='dweibull', shapes='c') ## ERLANG ## ## Special case of the Gamma distribution with shape parameter an integer. ## class erlang_gen(rv_continuous): """An Erlang continuous random variable. %(before_notes)s See Also -------- gamma Notes ----- The Erlang distribution is a special case of the Gamma distribution, with the shape parameter ``a`` an integer. Refer to the ``gamma`` distribution for further examples. """ def _rvs(self, a): return gamma.rvs(a, size=self._size) def _arg_check(self, a): return (a > 0) & (floor(a)==a) def _pdf(self, x, a): Px = (x)**(a-1.0)*exp(-x)/special.gamma(a) return Px def _logpdf(self, x, a): return (a-1.0)*log(x) - x - gamln(a) def _cdf(self, x, a): return special.gdtr(1.0,a,x) def _sf(self, x, a): return special.gdtrc(1.0,a,x) def _ppf(self, q, a): return special.gdtrix(1.0, a, q) def _stats(self, a): a = a*1.0 return a, a, 2/sqrt(a), 6/a def _entropy(self, a): return special.psi(a)*(1-a) + 1 + gamln(a) erlang = erlang_gen(a=0.0, name='erlang', shapes='a') ## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale) ## scale == 1.0 / lambda class expon_gen(rv_continuous): """An exponential continuous random variable. %(before_notes)s Notes ----- The probability density function for `expon` is:: expon.pdf(x) = lambda * exp(- lambda*x) for ``x >= 0``. The scale parameter is equal to ``scale = 1.0 / lambda``. `expon` does not have shape parameters. %(example)s """ def _rvs(self): return mtrand.standard_exponential(self._size) def _pdf(self, x): return exp(-x) def _logpdf(self, x): return -x def _cdf(self, x): return -expm1(-x) def _ppf(self, q): return -log1p(-q) def _sf(self,x): return exp(-x) def _logsf(self, x): return -x def _isf(self,q): return -log(q) def _stats(self): return 1.0, 1.0, 2.0, 6.0 def _entropy(self): return 1.0 expon = expon_gen(a=0.0, name='expon') ## Exponentiated Weibull class exponweib_gen(rv_continuous): """An exponentiated Weibull continuous random variable. %(before_notes)s Notes ----- The probability density function for `exponweib` is:: exponweib.pdf(x, a, c) = a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1) for ``x > 0``, ``a > 0``, ``c > 0``. %(example)s """ def _pdf(self, x, a, c): exc = exp(-x**c) return a*c*(1-exc)**asarray(a-1) * exc * x**(c-1) def _logpdf(self, x, a, c): exc = exp(-x**c) return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x) def _cdf(self, x, a, c): exm1c = -expm1(-x**c) return (exm1c)**a def _ppf(self, q, a, c): return (-log1p(-q**(1.0/a)))**asarray(1.0/c) exponweib = exponweib_gen(a=0.0, name='exponweib', shapes="a, c") ## Exponential Power class exponpow_gen(rv_continuous): """An exponential power continuous random variable. %(before_notes)s Notes ----- The probability density function for `exponpow` is:: exponpow.pdf(x, b) = b * x**(b-1) * exp(1+x**b - exp(x**b)) for ``x >= 0``, ``b > 0``. %(example)s """ def _pdf(self, x, b): xbm1 = x**(b-1.0) xb = xbm1 * x return exp(1)*b*xbm1 * exp(xb - exp(xb)) def _logpdf(self, x, b): xb = x**(b-1.0)*x return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb) def _cdf(self, x, b): return -expm1(-expm1(x**b)) def _sf(self, x, b): return exp(-expm1(x**b)) def _isf(self, x, b): return (log1p(-log(x)))**(1./b) def _ppf(self, q, b): return pow(log1p(-log1p(-q)), 1.0/b) exponpow = exponpow_gen(a=0.0, name='exponpow', shapes='b') ## Fatigue-Life (Birnbaum-Sanders) class fatiguelife_gen(rv_continuous): """A fatigue-life (Birnbaum-Sanders) continuous random variable. %(before_notes)s Notes ----- The probability density function for `fatiguelife` is:: fatiguelife.pdf(x,c) = (x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2)) for ``x > 0``. %(example)s """ def _rvs(self, c): z = norm.rvs(size=self._size) x = 0.5*c*z x2 = x*x t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2) return t def _pdf(self, x, c): return (x+1)/asarray(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/asarray((2.0*x*c**2))) def _logpdf(self, x, c): return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x)) def _cdf(self, x, c): return special.ndtr(1.0/c*(sqrt(x)-1.0/asarray(sqrt(x)))) def _ppf(self, q, c): tmp = c*special.ndtri(q) return 0.25*(tmp + sqrt(tmp**2 + 4))**2 def _stats(self, c): c2 = c*c mu = c2 / 2.0 + 1 den = 5*c2 + 4 mu2 = c2*den /4.0 g1 = 4*c*sqrt(11*c2+6.0)/den**1.5 g2 = 6*c2*(93*c2+41.0) / den**2.0 return mu, mu2, g1, g2 fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife', shapes='c') ## Folded Cauchy class foldcauchy_gen(rv_continuous): """A folded Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `foldcauchy` is:: foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2)) for ``x >= 0``. %(example)s """ def _rvs(self, c): return abs(cauchy.rvs(loc=c,size=self._size)) def _pdf(self, x, c): return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2)) def _cdf(self, x, c): return 1.0/pi*(arctan(x-c) + arctan(x+c)) def _stats(self, c): return inf, inf, nan, nan foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy', shapes='c') ## F class f_gen(rv_continuous): """An F continuous random variable. %(before_notes)s Notes ----- The probability density function for `f` is:: df2**(df2/2) * df1**(df1/2) * x**(df1/2-1) F.pdf(x, df1, df2) = -------------------------------------------- (df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2) for ``x > 0``. %(example)s """ def _rvs(self, dfn, dfd): return mtrand.f(dfn, dfd, self._size) def _pdf(self, x, dfn, dfd): # n = asarray(1.0*dfn) # m = asarray(1.0*dfd) # Px = m**(m/2) * n**(n/2) * x**(n/2-1) # Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2) return exp(self._logpdf(x, dfn, dfd)) def _logpdf(self, x, dfn, dfd): n = 1.0*dfn m = 1.0*dfd lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x) lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2) return lPx def _cdf(self, x, dfn, dfd): return special.fdtr(dfn, dfd, x) def _sf(self, x, dfn, dfd): return special.fdtrc(dfn, dfd, x) def _ppf(self, q, dfn, dfd): return special.fdtri(dfn, dfd, q) def _stats(self, dfn, dfd): v2 = asarray(dfd*1.0) v1 = asarray(dfn*1.0) mu = where (v2 > 2, v2 / asarray(v2 - 2), inf) mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4)) mu2 = where(v2 > 4, mu2, inf) g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2))) g1 = where(v2 > 6, g1, nan) g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6)) g2 = where(v2 > 8, g2, nan) return mu, mu2, g1, g2 f = f_gen(a=0.0, name='f', shapes="dfn, dfd") ## Folded Normal ## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S) ## ## note: regress docs have scale parameter correct, but first parameter ## he gives is a shape parameter A = c * scale ## Half-normal is folded normal with shape-parameter c=0. class foldnorm_gen(rv_continuous): """A folded normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `foldnorm` is:: foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2) for ``c >= 0``. %(example)s """ def _rvs(self, c): return abs(norm.rvs(loc=c,size=self._size)) def _pdf(self, x, c): return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0) def _cdf(self, x, c,): return special.ndtr(x-c) + special.ndtr(x+c) - 1.0 def _stats(self, c): fac = special.erf(c/sqrt(2)) mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac mu2 = c*c + 1 - mu*mu c2 = c*c g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0)) g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \ pi*c*(fac*fac-1)) g1 /= pi*mu2**1.5 g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4 g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac) g2 /= mu2**2.0 return mu, mu2, g1, g2 foldnorm = foldnorm_gen(a=0.0, name='foldnorm', shapes='c') ## Extreme Value Type II or Frechet ## (defined in Regress+ documentation as Extreme LB) as ## a limiting value distribution. ## class frechet_r_gen(rv_continuous): """A Frechet right (or Weibull minimum) continuous random variable. %(before_notes)s See Also -------- weibull_min : The same distribution as `frechet_r`. frechet_l, weibull_max Notes ----- The probability density function for `frechet_r` is:: frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c) for ``x > 0``, ``c > 0``. %(example)s """ def _pdf(self, x, c): return c*pow(x,c-1)*exp(-pow(x,c)) def _logpdf(self, x, c): return log(c) + (c-1)*log(x) - pow(x,c) def _cdf(self, x, c): return -expm1(-pow(x,c)) def _ppf(self, q, c): return pow(-log1p(-q),1.0/c) def _munp(self, n, c): return special.gamma(1.0+n*1.0/c) def _entropy(self, c): return -_EULER / c - log(c) + _EULER + 1 frechet_r = frechet_r_gen(a=0.0, name='frechet_r', shapes='c') weibull_min = frechet_r_gen(a=0.0, name='weibull_min', shapes='c') class frechet_l_gen(rv_continuous): """A Frechet left (or Weibull maximum) continuous random variable. %(before_notes)s See Also -------- weibull_max : The same distribution as `frechet_l`. frechet_r, weibull_min Notes ----- The probability density function for `frechet_l` is:: frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c) for ``x < 0``, ``c > 0``. %(example)s """ def _pdf(self, x, c): return c*pow(-x,c-1)*exp(-pow(-x,c)) def _cdf(self, x, c): return exp(-pow(-x,c)) def _ppf(self, q, c): return -pow(-log(q),1.0/c) def _munp(self, n, c): val = special.gamma(1.0+n*1.0/c) if (int(n) % 2): sgn = -1 else: sgn = 1 return sgn * val def _entropy(self, c): return -_EULER / c - log(c) + _EULER + 1 frechet_l = frechet_l_gen(b=0.0, name='frechet_l', shapes='c') weibull_max = frechet_l_gen(b=0.0, name='weibull_max', shapes='c') ## Generalized Logistic ## class genlogistic_gen(rv_continuous): """A generalized logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for `genlogistic` is:: genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1) for ``x > 0``, ``c > 0``. %(example)s """ def _pdf(self, x, c): Px = c*exp(-x)/(1+exp(-x))**(c+1.0) return Px def _logpdf(self, x, c): return log(c) - x - (c+1.0)*log1p(exp(-x)) def _cdf(self, x, c): Cx = (1+exp(-x))**(-c) return Cx def _ppf(self, q, c): vals = -log(pow(q,-1.0/c)-1) return vals def _stats(self, c): zeta = special.zeta mu = _EULER + special.psi(c) mu2 = pi*pi/6.0 + zeta(2,c) g1 = -2*zeta(3,c) + 2*_ZETA3 g1 /= mu2**1.5 g2 = pi**4/15.0 + 6*zeta(4,c) g2 /= mu2**2.0 return mu, mu2, g1, g2 genlogistic = genlogistic_gen(name='genlogistic', shapes='c') ## Generalized Pareto class genpareto_gen(rv_continuous): """A generalized Pareto continuous random variable. %(before_notes)s Notes ----- The probability density function for `genpareto` is:: genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c) for ``c != 0``, and for ``x >= 0`` for all c, and ``x < 1/abs(c)`` for ``c < 0``. %(example)s """ def _argcheck(self, c): c = asarray(c) self.b = where(c < 0, 1.0/abs(c), inf) return where(c==0, 0, 1) def _pdf(self, x, c): Px = pow(1+c*x,asarray(-1.0-1.0/c)) return Px def _logpdf(self, x, c): return (-1.0-1.0/c) * np.log1p(c*x) def _cdf(self, x, c): return 1.0 - pow(1+c*x,asarray(-1.0/c)) def _ppf(self, q, c): vals = 1.0/c * (pow(1-q, -c)-1) return vals def _munp(self, n, c): k = arange(0,n+1) val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0) return where(c*n < 1, val, inf) def _entropy(self, c): if (c > 0): return 1+c else: self.b = -1.0 / c return rv_continuous._entropy(self, c) genpareto = genpareto_gen(a=0.0, name='genpareto', shapes='c') ## Generalized Exponential class genexpon_gen(rv_continuous): """A generalized exponential continuous random variable. %(before_notes)s Notes ----- The probability density function for `genexpon` is:: genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \ exp(-a*x - b*x + b/c * (1-exp(-c*x))) for ``x >= 0``, ``a,b,c > 0``. References ---------- H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential Distribution", Journal of the American Statistical Association, 1993. N. Balakrishnan, "The Exponential Distribution: Theory, Methods and Applications", Asit P. Basu. %(example)s """ def _pdf(self, x, a, b, c): return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c) def _cdf(self, x, a, b, c): return -expm1((-a-b)*x + b*(-expm1(-c*x))/c) def _logpdf(self, x, a, b, c): return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c genexpon = genexpon_gen(a=0.0, name='genexpon', shapes='a, b, c') ## Generalized Extreme Value ## c=0 is just gumbel distribution. ## This version does now accept c==0 ## Use gumbel_r for c==0 # new version by Per Brodtkorb, see ticket:767 # also works for c==0, special case is gumbel_r # increased precision for small c class genextreme_gen(rv_continuous): """A generalized extreme value continuous random variable. %(before_notes)s See Also -------- gumbel_r Notes ----- For ``c=0``, `genextreme` is equal to `gumbel_r`. The probability density function for `genextreme` is:: genextreme.pdf(x, c) = exp(-exp(-x))*exp(-x), for c==0 exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0 %(example)s """ def _argcheck(self, c): min = np.minimum max = np.maximum sml = floatinfo.machar.xmin #self.b = where(c > 0, 1.0 / c,inf) #self.a = where(c < 0, 1.0 / c, -inf) self.b = where(c > 0, 1.0 / max(c, sml),inf) self.a = where(c < 0, 1.0 / min(c,-sml), -inf) return where(abs(c)==inf, 0, 1) #True #(c!=0) def _pdf(self, x, c): ## ex2 = 1-c*x ## pex2 = pow(ex2,1.0/c) ## p2 = exp(-pex2)*pex2/ex2 ## return p2 cx = c*x logex2 = where((c==0)*(x==x),0.0,log1p(-cx)) logpex2 = where((c==0)*(x==x),-x,logex2/c) pex2 = exp(logpex2) # % Handle special cases logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2) putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation return exp(logpdf) def _cdf(self, x, c): #return exp(-pow(1-c*x,1.0/c)) loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c) return exp(-exp(loglogcdf)) def _ppf(self, q, c): #return 1.0/c*(1.-(-log(q))**c) x = -log(-log(q)) return where((c==0)*(x==x),x,-expm1(-c*x)/c) def _stats(self,c): g = lambda n : gam(n*c+1) g1 = g(1) g2 = g(2) g3 = g(3); g4 = g(4) g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0) gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0); eps = 1e-14 gamk = where(abs(c)<eps,-_EULER,expm1(gamln(c+1))/c) m = where(c<-1.0,nan,-gamk) v = where(c<-0.5,nan,g1**2.0*gam2k) #% skewness sk1 = where(c<-1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.))); sk = where(abs(c)<=eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1) #% The kurtosis is: ku1 = where(c<-1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2)) ku = where(abs(c)<=(eps)**0.23,12.0/5.0,ku1-3.0) return m,v,sk,ku def _munp(self, n, c): k = arange(0,n+1) vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0) return where(c*n > -1, vals, inf) genextreme = genextreme_gen(name='genextreme', shapes='c') ## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition) ## gamma(a, loc, scale) with a an integer is the Erlang distribution ## gamma(1, loc, scale) is the Exponential distribution ## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom. class gamma_gen(rv_continuous): """A gamma continuous random variable. %(before_notes)s See Also -------- erlang, expon Notes ----- The probability density function for `gamma` is:: gamma.pdf(x, a) = lambda**a * x**(a-1) * exp(-lambda*x) / gamma(a) for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function. The scale parameter is equal to ``scale = 1.0 / lambda``. `gamma` has a shape parameter `a` which needs to be set explicitly. For instance: >>> from scipy.stats import gamma >>> rv = gamma(3., loc = 0., scale = 2.) produces a frozen form of `gamma` with shape ``a = 3.``, ``loc = 0.`` and ``lambda = 1./scale = 1./2.``. When ``a`` is an integer, `gamma` reduces to the Erlang distribution, and when ``a=1`` to the exponential distribution. %(example)s """ def _rvs(self, a): return mtrand.standard_gamma(a, self._size) def _pdf(self, x, a): return exp(self._logpdf(x, a)) def _logpdf(self, x, a): return (a-1)*log(x) - x - gamln(a) def _cdf(self, x, a): return special.gammainc(a, x) def _ppf(self, q, a): return special.gammaincinv(a,q) def _stats(self, a): return a, a, 2.0/sqrt(a), 6.0/a def _entropy(self, a): return special.psi(a)*(1-a) + 1 + gamln(a) def _fitstart(self, data): a = 4 / _skew(data)**2 return super(gamma_gen, self)._fitstart(data, args=(a,)) def fit(self, data, *args, **kwds): floc = kwds.get('floc', None) if floc == 0: xbar = ravel(data).mean() logx_bar = ravel(log(data)).mean() s = log(xbar) - logx_bar def func(a): return log(a) - special.digamma(a) - s aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s) xa = aest*(1-0.4) xb = aest*(1+0.4) a = optimize.brentq(func, xa, xb, disp=0) scale = xbar / a return a, floc, scale else: return super(gamma_gen, self).fit(data, *args, **kwds) gamma = gamma_gen(a=0.0, name='gamma', shapes='a') # Generalized Gamma class gengamma_gen(rv_continuous): """A generalized gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `gengamma` is:: gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a) for ``x > 0``, ``a > 0``, and ``c != 0``. %(example)s """ def _argcheck(self, a, c): return (a > 0) & (c != 0) def _pdf(self, x, a, c): return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a)) def _cdf(self, x, a, c): val = special.gammainc(a,x**c) cond = c + 0*val return where(cond>0,val,1-val) def _ppf(self, q, a, c): val1 = special.gammaincinv(a,q) val2 = special.gammaincinv(a,1.0-q) ic = 1.0/c cond = c+0*val1 return where(cond > 0,val1**ic,val2**ic) def _munp(self, n, a, c): return special.gamma(a+n*1.0/c) / special.gamma(a) def _entropy(self, a,c): val = special.psi(a) return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c)) gengamma = gengamma_gen(a=0.0, name='gengamma', shapes="a, c") ## Generalized Half-Logistic ## class genhalflogistic_gen(rv_continuous): """A generalized half-logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for `genhalflogistic` is:: genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2 for ``0 <= x <= 1/c``, and ``c > 0``. %(example)s """ def _argcheck(self, c): self.b = 1.0 / c return (c > 0) def _pdf(self, x, c): limit = 1.0/c tmp = asarray(1-c*x) tmp0 = tmp**(limit-1) tmp2 = tmp0*tmp return 2*tmp0 / (1+tmp2)**2 def _cdf(self, x, c): limit = 1.0/c tmp = asarray(1-c*x) tmp2 = tmp**(limit) return (1.0-tmp2) / (1+tmp2) def _ppf(self, q, c): return 1.0/c*(1-((1.0-q)/(1.0+q))**c) def _entropy(self,c): return 2 - (2*c+1)*log(2) genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic', shapes='c') ## Gompertz (Truncated Gumbel) ## Defined for x>=0 class gompertz_gen(rv_continuous): """A Gompertz (or truncated Gumbel) continuous random variable. %(before_notes)s Notes ----- The probability density function for `gompertz` is:: gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1)) for ``x >= 0``, ``c > 0``. %(example)s """ def _pdf(self, x, c): ex = exp(x) return c*ex*exp(-c*(ex-1)) def _cdf(self, x, c): return 1.0-exp(-c*(exp(x)-1)) def _ppf(self, q, c): return log(1-1.0/c*log(1-q)) def _entropy(self, c): return 1.0 - log(c) - exp(c)*special.expn(1,c) gompertz = gompertz_gen(a=0.0, name='gompertz', shapes='c') ## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz ## The left-skewed gumbel distribution. ## and right-skewed are available as gumbel_l and gumbel_r class gumbel_r_gen(rv_continuous): """A right-skewed Gumbel continuous random variable. %(before_notes)s See Also -------- gumbel_l, gompertz, genextreme Notes ----- The probability density function for `gumbel_r` is:: gumbel_r.pdf(x) = exp(-(x + exp(-x))) The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett distribution. It is also related to the extreme value distribution, log-Weibull and Gompertz distributions. %(example)s """ def _pdf(self, x): ex = exp(-x) return ex*exp(-ex) def _logpdf(self, x): return -x - exp(-x) def _cdf(self, x): return exp(-exp(-x)) def _logcdf(self, x): return -exp(-x) def _ppf(self, q): return -log(-log(q)) def _stats(self): return _EULER, pi*pi/6.0, \ 12*sqrt(6)/pi**3 * _ZETA3, 12.0/5 def _entropy(self): return 1.0608407169541684911 gumbel_r = gumbel_r_gen(name='gumbel_r') class gumbel_l_gen(rv_continuous): """A left-skewed Gumbel continuous random variable. %(before_notes)s See Also -------- gumbel_r, gompertz, genextreme Notes ----- The probability density function for `gumbel_l` is:: gumbel_l.pdf(x) = exp(x - exp(x)) The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett distribution. It is also related to the extreme value distribution, log-Weibull and Gompertz distributions. %(example)s """ def _pdf(self, x): ex = exp(x) return ex*exp(-ex) def _logpdf(self, x): return x - exp(x) def _cdf(self, x): return 1.0-exp(-exp(x)) def _ppf(self, q): return log(-log(1-q)) def _stats(self): return -_EULER, pi*pi/6.0, \ -12*sqrt(6)/pi**3 * _ZETA3, 12.0/5 def _entropy(self): return 1.0608407169541684911 gumbel_l = gumbel_l_gen(name='gumbel_l') # Half-Cauchy class halfcauchy_gen(rv_continuous): """A Half-Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `halfcauchy` is:: halfcauchy.pdf(x) = 2 / (pi * (1 + x**2)) for ``x >= 0``. %(example)s """ def _pdf(self, x): return 2.0/pi/(1.0+x*x) def _logpdf(self, x): return np.log(2.0/pi) - np.log1p(x*x) def _cdf(self, x): return 2.0/pi*arctan(x) def _ppf(self, q): return tan(pi/2*q) def _stats(self): return inf, inf, nan, nan def _entropy(self): return log(2*pi) halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy') ## Half-Logistic ## class halflogistic_gen(rv_continuous): """A half-logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for `halflogistic` is:: halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2 for ``x >= 0``. %(example)s """ def _pdf(self, x): return 0.5/(cosh(x/2.0))**2.0 def _cdf(self, x): return tanh(x/2.0) def _ppf(self, q): return 2*arctanh(q) def _munp(self, n): if n==1: return 2*log(2) if n==2: return pi*pi/3.0 if n==3: return 9*_ZETA3 if n==4: return 7*pi**4 / 15.0 return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1) def _entropy(self): return 2-log(2) halflogistic = halflogistic_gen(a=0.0, name='halflogistic') ## Half-normal = chi(1, loc, scale) class halfnorm_gen(rv_continuous): """A half-normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `halfnorm` is:: halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2) for ``x > 0``. %(example)s """ def _rvs(self): return abs(norm.rvs(size=self._size)) def _pdf(self, x): return sqrt(2.0/pi)*exp(-x*x/2.0) def _logpdf(self, x): return 0.5 * np.log(2.0/pi) - x*x/2.0 def _cdf(self, x): return special.ndtr(x)*2-1.0 def _ppf(self, q): return special.ndtri((1+q)/2.0) def _stats(self): return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \ 8*(pi-3)/(pi-2)**2 def _entropy(self): return 0.5*log(pi/2.0)+0.5 halfnorm = halfnorm_gen(a=0.0, name='halfnorm') ## Hyperbolic Secant class hypsecant_gen(rv_continuous): """A hyperbolic secant continuous random variable. %(before_notes)s Notes ----- The probability density function for `hypsecant` is:: hypsecant.pdf(x) = 1/pi * sech(x) %(example)s """ def _pdf(self, x): return 1.0/(pi*cosh(x)) def _cdf(self, x): return 2.0/pi*arctan(exp(x)) def _ppf(self, q): return log(tan(pi*q/2.0)) def _stats(self): return 0, pi*pi/4, 0, 2 def _entropy(self): return log(2*pi) hypsecant = hypsecant_gen(name='hypsecant') ## Gauss Hypergeometric class gausshyper_gen(rv_continuous): """A Gauss hypergeometric continuous random variable. %(before_notes)s Notes ----- The probability density function for `gausshyper` is:: gausshyper.pdf(x, a, b, c, z) = C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c) for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and ``C = 1 / (B(a,b) F[2,1](c, a; a+b; -z))`` %(example)s """ def _argcheck(self, a, b, c, z): return (a > 0) & (b > 0) & (c==c) & (z==z) def _pdf(self, x, a, b, c, z): Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z) return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c def _munp(self, n, a, b, c, z): fac = special.beta(n+a,b) / special.beta(a,b) num = special.hyp2f1(c,a+n,a+b+n,-z) den = special.hyp2f1(c,a,a+b,-z) return fac*num / den gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper', shapes="a, b, c, z") ## Inverted Gamma # special case of generalized gamma with c=-1 # class invgamma_gen(rv_continuous): """An inverted gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `invgamma` is:: invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x) for x > 0, a > 0. %(example)s """ def _pdf(self, x, a): return exp(self._logpdf(x,a)) def _logpdf(self, x, a): return (-(a+1)*log(x)-gamln(a) - 1.0/x) def _cdf(self, x, a): return 1.0-special.gammainc(a, 1.0/x) def _ppf(self, q, a): return 1.0/special.gammaincinv(a,1-q) def _munp(self, n, a): return exp(gamln(a-n) - gamln(a)) def _entropy(self, a): return a - (a+1.0)*special.psi(a) + gamln(a) invgamma = invgamma_gen(a=0.0, name='invgamma', shapes='a') ## Inverse Gaussian Distribution (used to be called 'invnorm' # scale is gamma from DATAPLOT and B from Regress class invgauss_gen(rv_continuous): """An inverse Gaussian continuous random variable. %(before_notes)s Notes ----- The probability density function for `invgauss` is:: invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2)) for ``x > 0``. When `mu` is too small, evaluating the cumulative density function will be inaccurate due to ``cdf(mu -> 0) = inf * 0``. NaNs are returned for ``mu <= 0.0028``. %(example)s """ def _rvs(self, mu): return mtrand.wald(mu, 1.0, size=self._size) def _pdf(self, x, mu): return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2) def _logpdf(self, x, mu): return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x) def _cdf(self, x, mu): fac = sqrt(1.0/x) # Numerical accuracy for small `mu` is bad. See #869. C1 = norm.cdf(fac*(x-mu)/mu) C1 += exp(1.0/mu) * norm.cdf(-fac*(x+mu)/mu) * exp(1.0/mu) return C1 def _stats(self, mu): return mu, mu**3.0, 3*sqrt(mu), 15*mu invgauss = invgauss_gen(a=0.0, name='invgauss', shapes="mu") ## Inverted Weibull class invweibull_gen(rv_continuous): """An inverted Weibull continuous random variable. %(before_notes)s Notes ----- The probability density function for `invweibull` is:: invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c)) for ``x > 0``, ``c > 0``. %(example)s """ def _pdf(self, x, c): xc1 = x**(-c-1.0) #xc2 = xc1*x xc2 = x**(-c) xc2 = exp(-xc2) return c*xc1*xc2 def _cdf(self, x, c): xc1 = x**(-c) return exp(-xc1) def _ppf(self, q, c): return pow(-log(q),asarray(-1.0/c)) def _entropy(self, c): return 1+_EULER + _EULER / c - log(c) invweibull = invweibull_gen(a=0, name='invweibull', shapes='c') ## Johnson SB class johnsonsb_gen(rv_continuous): """A Johnson SB continuous random variable. %(before_notes)s See Also -------- johnsonsu Notes ----- The probability density function for `johnsonsb` is:: johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x))) for ``0 < x < 1`` and ``a,b > 0``, and ``phi`` is the normal pdf. %(example)s """ def _argcheck(self, a, b): return (b > 0) & (a==a) def _pdf(self, x, a, b): trm = norm.pdf(a+b*log(x/(1.0-x))) return b*1.0/(x*(1-x))*trm def _cdf(self, x, a, b): return norm.cdf(a+b*log(x/(1.0-x))) def _ppf(self, q, a, b): return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a))) johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonb', shapes="a, b") ## Johnson SU class johnsonsu_gen(rv_continuous): """A Johnson SU continuous random variable. %(before_notes)s See Also -------- johnsonsb Notes ----- The probability density function for `johnsonsu` is:: johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) * phi(a + b * log(x + sqrt(x**2 + 1))) for all ``x, a, b > 0``, and `phi` is the normal pdf. %(example)s """ def _argcheck(self, a, b): return (b > 0) & (a==a) def _pdf(self, x, a, b): x2 = x*x trm = norm.pdf(a+b*log(x+sqrt(x2+1))) return b*1.0/sqrt(x2+1.0)*trm def _cdf(self, x, a, b): return norm.cdf(a+b*log(x+sqrt(x*x+1))) def _ppf(self, q, a, b): return sinh((norm.ppf(q)-a)/b) johnsonsu = johnsonsu_gen(name='johnsonsu', shapes="a, b") ## Laplace Distribution class laplace_gen(rv_continuous): """A Laplace continuous random variable. %(before_notes)s Notes ----- The probability density function for `laplace` is:: laplace.pdf(x) = 1/2 * exp(-abs(x)) %(example)s """ def _rvs(self): return mtrand.laplace(0, 1, size=self._size) def _pdf(self, x): return 0.5*exp(-abs(x)) def _cdf(self, x): return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x)) def _ppf(self, q): return where(q > 0.5, -log(2*(1-q)), log(2*q)) def _stats(self): return 0, 2, 0, 3 def _entropy(self): return log(2)+1 laplace = laplace_gen(name='laplace') ## Levy Distribution class levy_gen(rv_continuous): """A Levy continuous random variable. %(before_notes)s See Also -------- levy_stable, levy_l Notes ----- The probability density function for `levy` is:: levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x)) for ``x > 0``. This is the same as the Levy-stable distribution with a=1/2 and b=1. %(example)s """ def _pdf(self, x): return 1/sqrt(2*pi*x)/x*exp(-1/(2*x)) def _cdf(self, x): return 2*(1-norm._cdf(1/sqrt(x))) def _ppf(self, q): val = norm._ppf(1-q/2.0) return 1.0/(val*val) def _stats(self): return inf, inf, nan, nan levy = levy_gen(a=0.0,name="levy") ## Left-skewed Levy Distribution class levy_l_gen(rv_continuous): """A left-skewed Levy continuous random variable. %(before_notes)s See Also -------- levy, levy_stable Notes ----- The probability density function for `levy_l` is:: levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x))) for ``x < 0``. This is the same as the Levy-stable distribution with a=1/2 and b=-1. %(example)s """ def _pdf(self, x): ax = abs(x) return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax)) def _cdf(self, x): ax = abs(x) return 2*norm._cdf(1/sqrt(ax))-1 def _ppf(self, q): val = norm._ppf((q+1.0)/2) return -1.0/(val*val) def _stats(self): return inf, inf, nan, nan levy_l = levy_l_gen(b=0.0, name="levy_l") ## Levy-stable Distribution (only random variates) class levy_stable_gen(rv_continuous): """A Levy-stable continuous random variable. %(before_notes)s See Also -------- levy, levy_l Notes ----- Levy-stable distribution (only random variates available -- ignore other docs) %(example)s """ def _rvs(self, alpha, beta): sz = self._size TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz) W = expon.rvs(size=sz) if alpha==1: return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH)) # else ialpha = 1.0/alpha aTH = alpha*TH if beta==0: return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha # else val0 = beta*tan(pi*alpha/2) th0 = arctan(val0)/alpha val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH)) res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha return res3 def _argcheck(self, alpha, beta): if beta == -1: self.b = 0.0 elif beta == 1: self.a = 0.0 return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1) def _pdf(self, x, alpha, beta): raise NotImplementedError levy_stable = levy_stable_gen(name='levy_stable', shapes="alpha, beta") ## Logistic (special case of generalized logistic with c=1) ## Sech-squared class logistic_gen(rv_continuous): """A logistic continuous random variable. %(before_notes)s Notes ----- The probability density function for `logistic` is:: logistic.pdf(x) = exp(-x) / (1+exp(-x))**2 %(example)s """ def _rvs(self): return mtrand.logistic(size=self._size) def _pdf(self, x): ex = exp(-x) return ex / (1+ex)**2.0 def _cdf(self, x): return 1.0/(1+exp(-x)) def _ppf(self, q): return -log(1.0/q-1) def _stats(self): return 0, pi*pi/3.0, 0, 6.0/5.0 def _entropy(self): return 1.0 logistic = logistic_gen(name='logistic') ## Log Gamma # class loggamma_gen(rv_continuous): """A log gamma continuous random variable. %(before_notes)s Notes ----- The probability density function for `loggamma` is:: loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c) for all ``x, c > 0``. %(example)s """ def _rvs(self, c): return log(mtrand.gamma(c, size=self._size)) def _pdf(self, x, c): return exp(c*x-exp(x)-gamln(c)) def _cdf(self, x, c): return special.gammainc(c, exp(x)) def _ppf(self, q, c): return log(special.gammaincinv(c,q)) def _munp(self,n,*args): # use generic moment calculation using ppf return self._mom0_sc(n,*args) loggamma = loggamma_gen(name='loggamma', shapes='c') ## Log-Laplace (Log Double Exponential) ## class loglaplace_gen(rv_continuous): """A log-Laplace continuous random variable. %(before_notes)s Notes ----- The probability density function for `loglaplace` is:: loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1 = c / 2 * x**(-c-1), for x >= 1 for ``c > 0``. %(example)s """ def _pdf(self, x, c): cd2 = c/2.0 c = where(x < 1, c, -c) return cd2*x**(c-1) def _cdf(self, x, c): return where(x < 1, 0.5*x**c, 1-0.5*x**(-c)) def _ppf(self, q, c): return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c)) def _entropy(self, c): return log(2.0/c) + 1.0 loglaplace = loglaplace_gen(a=0.0, name='loglaplace', shapes='c') ## Lognormal (Cobb-Douglass) ## std is a shape parameter and is the variance of the underlying ## distribution. ## the mean of the underlying distribution is log(scale) class lognorm_gen(rv_continuous): """A lognormal continuous random variable. %(before_notes)s Notes ----- The probability density function for `lognorm` is:: lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2) for ``x > 0``, ``s > 0``. If log x is normally distributed with mean mu and variance sigma**2, then x is log-normally distributed with shape paramter sigma and scale parameter exp(mu). %(example)s """ def _rvs(self, s): return exp(s * norm.rvs(size=self._size)) def _pdf(self, x, s): Px = exp(-log(x)**2 / (2*s**2)) return Px / (s*x*sqrt(2*pi)) def _cdf(self, x, s): return norm.cdf(log(x)/s) def _ppf(self, q, s): return exp(s*norm._ppf(q)) def _stats(self, s): p = exp(s*s) mu = sqrt(p) mu2 = p*(p-1) g1 = sqrt((p-1))*(2+p) g2 = numpy.polyval([1,2,3,0,-6.0],p) return mu, mu2, g1, g2 def _entropy(self, s): return 0.5*(1+log(2*pi)+2*log(s)) lognorm = lognorm_gen(a=0.0, name='lognorm', shapes='s') # Gibrat's distribution is just lognormal with s=1 class gilbrat_gen(lognorm_gen): """A Gilbrat continuous random variable. %(before_notes)s Notes ----- The probability density function for `gilbrat` is:: gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2) %(example)s """ def _rvs(self): return lognorm_gen._rvs(self, 1.0) def _pdf(self, x): return lognorm_gen._pdf(self, x, 1.0) def _cdf(self, x): return lognorm_gen._cdf(self, x, 1.0) def _ppf(self, q): return lognorm_gen._ppf(self, q, 1.0) def _stats(self): return lognorm_gen._stats(self, 1.0) def _entropy(self): return 0.5*log(2*pi) + 0.5 gilbrat = gilbrat_gen(a=0.0, name='gilbrat') # MAXWELL class maxwell_gen(rv_continuous): """A Maxwell continuous random variable. %(before_notes)s Notes ----- A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``, and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in the Mathworld description [1]_. The probability density function for `maxwell` is:: maxwell.pdf(x, a) = sqrt(2/pi)x**2 * exp(-x**2/2) for ``x > 0``. References ---------- .. [1] http://mathworld.wolfram.com/MaxwellDistribution.html %(example)s """ def _rvs(self): return chi.rvs(3.0,size=self._size) def _pdf(self, x): return sqrt(2.0/pi)*x*x*exp(-x*x/2.0) def _cdf(self, x): return special.gammainc(1.5,x*x/2.0) def _ppf(self, q): return sqrt(2*special.gammaincinv(1.5,q)) def _stats(self): val = 3*pi-8 return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \ (-12*pi*pi + 160*pi - 384) / val**2.0 def _entropy(self): return _EULER + 0.5*log(2*pi)-0.5 maxwell = maxwell_gen(a=0.0, name='maxwell') # Mielke's Beta-Kappa class mielke_gen(rv_continuous): """A Mielke's Beta-Kappa continuous random variable. %(before_notes)s Notes ----- The probability density function for `mielke` is:: mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s) for ``x > 0``. %(example)s """ def _pdf(self, x, k, s): return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s) def _cdf(self, x, k, s): return x**k / (1.0+x**s)**(k*1.0/s) def _ppf(self, q, k, s): qsk = pow(q,s*1.0/k) return pow(qsk/(1.0-qsk),1.0/s) mielke = mielke_gen(a=0.0, name='mielke', shapes="k, s") # Nakagami (cf Chi) class nakagami_gen(rv_continuous): """A Nakagami continuous random variable. %(before_notes)s Notes ----- The probability density function for `nakagami` is:: nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) * x**(2*nu-1) * exp(-nu*x**2) for ``x > 0``, ``nu > 0``. %(example)s """ def _pdf(self, x, nu): return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x) def _cdf(self, x, nu): return special.gammainc(nu,nu*x*x) def _ppf(self, q, nu): return sqrt(1.0/nu*special.gammaincinv(nu,q)) def _stats(self, nu): mu = gam(nu+0.5)/gam(nu)/sqrt(nu) mu2 = 1.0-mu*mu g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5 g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1 g2 /= nu*mu2**2.0 return mu, mu2, g1, g2 nakagami = nakagami_gen(a=0.0, name="nakagami", shapes='nu') # Non-central chi-squared # nc is lambda of definition, df is nu class ncx2_gen(rv_continuous): """A non-central chi-squared continuous random variable. %(before_notes)s Notes ----- The probability density function for `ncx2` is:: ncx2.pdf(x, df, nc) = exp(-(nc+df)/2) * 1/2 * (x/nc)**((df-2)/4) * I[(df-2)/2](sqrt(nc*x)) for ``x > 0``. %(example)s """ def _rvs(self, df, nc): return mtrand.noncentral_chisquare(df,nc,self._size) def _logpdf(self, x, df, nc): a = asarray(df/2.0) fac = -nc/2.0 - x/2.0 + (a-1)*np.log(x) - a*np.log(2) - special.gammaln(a) return fac + np.nan_to_num(np.log(special.hyp0f1(a, nc * x/4.0))) def _pdf(self, x, df, nc): return np.exp(self._logpdf(x, df, nc)) def _cdf(self, x, df, nc): return special.chndtr(x,df,nc) def _ppf(self, q, df, nc): return special.chndtrix(q,df,nc) def _stats(self, df, nc): val = df + 2.0*nc return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \ 12.0*(val+2*nc)/val**2.0 ncx2 = ncx2_gen(a=0.0, name='ncx2', shapes="df, nc") # Non-central F class ncf_gen(rv_continuous): """A non-central F distribution continuous random variable. %(before_notes)s Notes ----- The probability density function for `ncf` is:: ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2))) * df1**(df1/2) * df2**(df2/2) * x**(df1/2-1) * (df2+df1*x)**(-(df1+df2)/2) * gamma(df1/2)*gamma(1+df2/2) * L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2))) / (B(v1/2, v2/2) * gamma((v1+v2)/2)) for ``df1, df2, nc > 0``. %(example)s """ def _rvs(self, dfn, dfd, nc): return mtrand.noncentral_f(dfn,dfd,nc,self._size) def _pdf_skip(self, x, dfn, dfd, nc): n1,n2 = dfn, dfd term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.) term -= gamln((n1+n2)/2.0) Px = exp(term) Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1) Px *= (n2+n1*x)**(-(n1+n2)/2) Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1) Px /= special.beta(n1/2,n2/2) #this function does not have a return # drop it for now, the generic function seems to work ok def _cdf(self, x, dfn, dfd, nc): return special.ncfdtr(dfn,dfd,nc,x) def _ppf(self, q, dfn, dfd, nc): return special.ncfdtri(dfn, dfd, nc, q) def _munp(self, n, dfn, dfd, nc): val = (dfn *1.0/dfd)**n term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5) val *= exp(-nc / 2.0+term) val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc) return val def _stats(self, dfn, dfd, nc): mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn)) mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \ ((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \ ((dfd-2.0)**2.0 * (dfd-4.0))) return mu, mu2, None, None ncf = ncf_gen(a=0.0, name='ncf', shapes="dfn, dfd, nc") ## Student t distribution class t_gen(rv_continuous): """A Student's T continuous random variable. %(before_notes)s Notes ----- The probability density function for `t` is:: gamma((df+1)/2) t.pdf(x, df) = --------------------------------------------------- sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2) for ``df > 0``. %(example)s """ def _rvs(self, df): return mtrand.standard_t(df, size=self._size) #Y = f.rvs(df, df, size=self._size) #sY = sqrt(Y) #return 0.5*sqrt(df)*(sY-1.0/sY) def _pdf(self, x, df): r = asarray(df*1.0) Px = exp(gamln((r+1)/2)-gamln(r/2)) Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2) return Px def _logpdf(self, x, df): r = df*1.0 lPx = gamln((r+1)/2)-gamln(r/2) lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r) return lPx def _cdf(self, x, df): return special.stdtr(df, x) def _sf(self, x, df): return special.stdtr(df, -x) def _ppf(self, q, df): return special.stdtrit(df, q) def _isf(self, q, df): return -special.stdtrit(df, q) def _stats(self, df): mu2 = where(df > 2, df / (df-2.0), inf) g1 = where(df > 3, 0.0, nan) g2 = where(df > 4, 6.0/(df-4.0), nan) return 0, mu2, g1, g2 t = t_gen(name='t', shapes="df") ## Non-central T distribution class nct_gen(rv_continuous): """A non-central Student's T continuous random variable. %(before_notes)s Notes ----- The probability density function for `nct` is:: df**(df/2) * gamma(df+1) nct.pdf(x, df, nc) = ---------------------------------------------------- 2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2) for ``df > 0``, ``nc > 0``. %(example)s """ def _rvs(self, df, nc): return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size)) def _pdf(self, x, df, nc): n = df*1.0 nc = nc*1.0 x2 = x*x ncx2 = nc*nc*x2 fac1 = n + x2 trm1 = n/2.*log(n) + gamln(n+1) trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.) Px = exp(trm1) valF = ncx2 / (2*fac1) trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF) trm1 /= asarray(fac1*special.gamma((n+1)/2)) trm2 = special.hyp1f1((n+1)/2,0.5,valF) trm2 /= asarray(sqrt(fac1)*special.gamma(n/2+1)) Px *= trm1+trm2 return Px def _cdf(self, x, df, nc): return special.nctdtr(df, nc, x) def _ppf(self, q, df, nc): return special.nctdtrit(df, nc, q) def _stats(self, df, nc, moments='mv'): mu, mu2, g1, g2 = None, None, None, None val1 = gam((df-1.0)/2.0) val2 = gam(df/2.0) if 'm' in moments: mu = nc*sqrt(df/2.0)*val1/val2 if 'v' in moments: var = (nc*nc+1.0)*df/(df-2.0) var -= nc*nc*df* val1**2 / 2.0 / val2**2 mu2 = var if 's' in moments: g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \ -nc*nc*(df-2)*(df-3)*val1**2) g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \ nc*nc*df*(val1/val2)**2) * val2 * \ (nc*nc*(df-2)*val1**2 - \ 2*(nc*nc+1)*val2**2) g1 = g1n/g1d if 'k' in moments: g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \ 2**(6-2*df) * nc*nc*(df-2)*(df-4)* \ (nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \ 4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4) g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \ 2*(nc*nc+1)*val2)**2 g2 = g2n / g2d return mu, mu2, g1, g2 nct = nct_gen(name="nct", shapes="df, nc") # Pareto class pareto_gen(rv_continuous): """A Pareto continuous random variable. %(before_notes)s Notes ----- The probability density function for `pareto` is:: pareto.pdf(x, b) = b / x**(b+1) for ``x >= 1``, ``b > 0``. %(example)s """ def _pdf(self, x, b): return b * x**(-b-1) def _cdf(self, x, b): return 1 - x**(-b) def _ppf(self, q, b): return pow(1-q, -1.0/b) def _stats(self, b, moments='mv'): mu, mu2, g1, g2 = None, None, None, None if 'm' in moments: mask = b > 1 bt = extract(mask,b) mu = valarray(shape(b),value=inf) place(mu, mask, bt / (bt-1.0)) if 'v' in moments: mask = b > 2 bt = extract( mask,b) mu2 = valarray(shape(b), value=inf) place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2) if 's' in moments: mask = b > 3 bt = extract( mask,b) g1 = valarray(shape(b), value=nan) vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b)) place(g1, mask, vals) if 'k' in moments: mask = b > 4 bt = extract( mask,b) g2 = valarray(shape(b), value=nan) vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \ polyval([1.0,-7.0,12.0,0.0],bt) place(g2, mask, vals) return mu, mu2, g1, g2 def _entropy(self, c): return 1 + 1.0/c - log(c) pareto = pareto_gen(a=1.0, name="pareto", shapes="b") # LOMAX (Pareto of the second kind.) class lomax_gen(rv_continuous): """A Lomax (Pareto of the second kind) continuous random variable. %(before_notes)s Notes ----- The Lomax distribution is a special case of the Pareto distribution, with (loc=-1.0). The probability density function for `lomax` is:: lomax.pdf(x, c) = c / (1+x)**(c+1) for ``x >= 0``, ``c > 0``. %(example)s """ def _pdf(self, x, c): return c*1.0/(1.0+x)**(c+1.0) def _logpdf(self, x, c): return log(c) - (c+1)*log(1+x) def _cdf(self, x, c): return 1.0-1.0/(1.0+x)**c def _sf(self, x, c): return 1.0/(1.0+x)**c def _logsf(self, x, c): return -c*log(1+x) def _ppf(self, q, c): return pow(1.0-q,-1.0/c)-1 def _stats(self, c): mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk') return mu, mu2, g1, g2 def _entropy(self, c): return 1+1.0/c-log(c) lomax = lomax_gen(a=0.0, name="lomax", shapes="c") ## Power-function distribution ## Special case of beta dist. with d =1.0 class powerlaw_gen(rv_continuous): """A power-function continuous random variable. %(before_notes)s Notes ----- The probability density function for `powerlaw` is:: powerlaw.pdf(x, a) = a * x**(a-1) for ``0 <= x <= 1``, ``a > 0``. %(example)s """ def _pdf(self, x, a): return a*x**(a-1.0) def _logpdf(self, x, a): return log(a) + (a-1)*log(x) def _cdf(self, x, a): return x**(a*1.0) def _logcdf(self, x, a): return a*log(x) def _ppf(self, q, a): return pow(q, 1.0/a) def _stats(self, a): return (a / (a + 1.0), a / (a + 2.0) / (a + 1.0) ** 2, -2.0 * ((a - 1.0) / (a + 3.0)) * sqrt((a + 2.0) / a), 6 * polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4))) def _entropy(self, a): return 1 - 1.0/a - log(a) powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw", shapes="a") # Power log normal class powerlognorm_gen(rv_continuous): """A power log-normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `powerlognorm` is:: powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) * (Phi(-log(x)/s))**(c-1), where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf, and ``x > 0``, ``s, c > 0``. %(example)s """ def _pdf(self, x, c, s): return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0) def _cdf(self, x, c, s): return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0) def _ppf(self, q, c, s): return exp(-s*norm.ppf(pow(1.0-q,1.0/c))) powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm", shapes="c, s") # Power Normal class powernorm_gen(rv_continuous): """A power normal continuous random variable. %(before_notes)s Notes ----- The probability density function for `powernorm` is:: powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1) where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf, and ``x > 0``, ``c > 0``. %(example)s """ def _pdf(self, x, c): return c*_norm_pdf(x)* \ (_norm_cdf(-x)**(c-1.0)) def _logpdf(self, x, c): return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x) def _cdf(self, x, c): return 1.0-_norm_cdf(-x)**(c*1.0) def _ppf(self, q, c): return -norm.ppf(pow(1.0-q,1.0/c)) powernorm = powernorm_gen(name='powernorm', shapes="c") # R-distribution ( a general-purpose distribution with a # variety of shapes. # FIXME: PPF does not work. class rdist_gen(rv_continuous): """An R-distributed continuous random variable. %(before_notes)s Notes ----- The probability density function for `rdist` is:: rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2) for ``-1 <= x <= 1``, ``c > 0``. %(example)s """ def _pdf(self, x, c): return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0) def _cdf_skip(self, x, c): #error inspecial.hyp2f1 for some values see tickets 758, 759 return 0.5 + x/special.beta(0.5,c/2.0)* \ special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x) def _munp(self, n, c): return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0) rdist = rdist_gen(a=-1.0, b=1.0, name="rdist", shapes="c") # Rayleigh distribution (this is chi with df=2 and loc=0.0) # scale is the mode. class rayleigh_gen(rv_continuous): """A Rayleigh continuous random variable. %(before_notes)s Notes ----- The probability density function for `rayleigh` is:: rayleigh.pdf(r) = r * exp(-r**2/2) for ``x >= 0``. %(example)s """ def _rvs(self): return chi.rvs(2,size=self._size) def _pdf(self, r): return r*exp(-r*r/2.0) def _cdf(self, r): return 1.0-exp(-r*r/2.0) def _ppf(self, q): return sqrt(-2*log(1-q)) def _stats(self): val = 4-pi return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \ 6*pi/val-16/val**2 def _entropy(self): return _EULER/2.0 + 1 - 0.5*log(2) rayleigh = rayleigh_gen(a=0.0, name="rayleigh") # Reciprocal Distribution class reciprocal_gen(rv_continuous): """A reciprocal continuous random variable. %(before_notes)s Notes ----- The probability density function for `reciprocal` is:: reciprocal.pdf(x, a, b) = 1 / (x*log(b/a)) for ``a <= x <= b``, ``a, b > 0``. %(example)s """ def _argcheck(self, a, b): self.a = a self.b = b self.d = log(b*1.0 / a) return (a > 0) & (b > 0) & (b > a) def _pdf(self, x, a, b): # argcheck should be called before _pdf return 1.0/(x*self.d) def _logpdf(self, x, a, b): return -log(x) - log(self.d) def _cdf(self, x, a, b): return (log(x)-log(a)) / self.d def _ppf(self, q, a, b): return a*pow(b*1.0/a,q) def _munp(self, n, a, b): return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n)) def _entropy(self,a,b): return 0.5*log(a*b)+log(log(b/a)) reciprocal = reciprocal_gen(name="reciprocal", shapes="a, b") # Rice distribution # FIXME: PPF does not work. class rice_gen(rv_continuous): """A Rice continuous random variable. %(before_notes)s Notes ----- The probability density function for `rice` is:: rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b) for ``x > 0``, ``b > 0``. %(example)s """ def _pdf(self, x, b): return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b) def _logpdf(self, x, b): return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b)) def _munp(self, n, b): nd2 = n/2.0 n1 = 1+nd2 b2 = b*b/2.0 return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \ special.hyp1f1(n1,1,b2) rice = rice_gen(a=0.0, name="rice", shapes="b") # Reciprocal Inverse Gaussian # FIXME: PPF does not work. class recipinvgauss_gen(rv_continuous): """A reciprocal inverse Gaussian continuous random variable. %(before_notes)s Notes ----- The probability density function for `recipinvgauss` is:: recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2)) for ``x >= 0``. %(example)s """ def _rvs(self, mu): #added, taken from invgauss return 1.0/mtrand.wald(mu, 1.0, size=self._size) def _pdf(self, x, mu): return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0)) def _logpdf(self, x, mu): return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x) def _cdf(self, x, mu): trm1 = 1.0/mu - x trm2 = 1.0/mu + x isqx = 1.0/sqrt(x) return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2) recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss', shapes="mu") # Semicircular class semicircular_gen(rv_continuous): """A semicircular continuous random variable. %(before_notes)s Notes ----- The probability density function for `semicircular` is:: semicircular.pdf(x) = 2/pi * sqrt(1-x**2) for ``-1 <= x <= 1``. %(example)s """ def _pdf(self, x): return 2.0/pi*sqrt(1-x*x) def _cdf(self, x): return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x)) def _stats(self): return 0, 0.25, 0, -1.0 def _entropy(self): return 0.64472988584940017414 semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular") # Triangular class triang_gen(rv_continuous): """A triangular continuous random variable. %(before_notes)s Notes ----- The triangular distribution can be represented with an up-sloping line from ``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)`` to ``(loc+scale)``. The standard form is in the range [0, 1] with c the mode. The location parameter shifts the start to `loc`. The scale parameter changes the width from 1 to `scale`. %(example)s """ def _rvs(self, c): return mtrand.triangular(0, c, 1, self._size) def _argcheck(self, c): return (c >= 0) & (c <= 1) def _pdf(self, x, c): return where(x < c, 2*x/c, 2*(1-x)/(1-c)) def _cdf(self, x, c): return where(x < c, x*x/c, (x*x-2*x+c)/(c-1)) def _ppf(self, q, c): return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q))) def _stats(self, c): return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \ (5*(1.0-c+c*c)**1.5), -3.0/5.0 def _entropy(self,c): return 0.5-log(2) triang = triang_gen(a=0.0, b=1.0, name="triang", shapes="c") # Truncated Exponential class truncexpon_gen(rv_continuous): """A truncated exponential continuous random variable. %(before_notes)s Notes ----- The probability density function for `truncexpon` is:: truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b)) for ``0 < x < b``. %(example)s """ def _argcheck(self, b): self.b = b return (b > 0) def _pdf(self, x, b): return exp(-x)/(1-exp(-b)) def _logpdf(self, x, b): return -x - log(1-exp(-b)) def _cdf(self, x, b): return (1.0-exp(-x))/(1-exp(-b)) def _ppf(self, q, b): return -log(1-q+q*exp(-b)) def _munp(self, n, b): #wrong answer with formula, same as in continuous.pdf #return gam(n+1)-special.gammainc(1+n,b) if n == 1: return (1-(b+1)*exp(-b))/(-expm1(-b)) elif n == 2: return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b)) else: #return generic for higher moments #return rv_continuous._mom1_sc(self,n, b) return self._mom1_sc(n, b) def _entropy(self, b): eB = exp(b) return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB) truncexpon = truncexpon_gen(a=0.0, name='truncexpon', shapes="b") # Truncated Normal class truncnorm_gen(rv_continuous): """A truncated normal continuous random variable. %(before_notes)s Notes ----- The standard form of this distribution is a standard normal truncated to the range [a,b] --- notice that a and b are defined over the domain of the standard normal. To convert clip values for a specific mean and standard deviation, use:: a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std %(example)s """ def _argcheck(self, a, b): self.a = a self.b = b self._nb = _norm_cdf(b) self._na = _norm_cdf(a) self._delta = self._nb - self._na self._logdelta = log(self._delta) return (a != b) # All of these assume that _argcheck is called first # and no other thread calls _pdf before. def _pdf(self, x, a, b): return _norm_pdf(x) / self._delta def _logpdf(self, x, a, b): return _norm_logpdf(x) - self._logdelta def _cdf(self, x, a, b): return (_norm_cdf(x) - self._na) / self._delta def _ppf(self, q, a, b): return norm._ppf(q*self._nb + self._na*(1.0-q)) def _stats(self, a, b): nA, nB = self._na, self._nb d = nB - nA pA, pB = _norm_pdf(a), _norm_pdf(b) mu = (pA - pB) / d #correction sign mu2 = 1 + (a*pA - b*pB) / d - mu*mu return mu, mu2, None, None truncnorm = truncnorm_gen(name='truncnorm', shapes="a, b") # Tukey-Lambda # FIXME: RVS does not work. class tukeylambda_gen(rv_continuous): """A Tukey-Lamdba continuous random variable. %(before_notes)s Notes ----- A flexible distribution, able to represent and interpolate between the following distributions: - Cauchy (lam=-1) - logistic (lam=0.0) - approx Normal (lam=0.14) - u-shape (lam = 0.5) - uniform from -1 to 1 (lam = 1) %(example)s """ def _argcheck(self, lam): # lam in RR. return np.ones(np.shape(lam), dtype=bool) def _pdf(self, x, lam): Fx = asarray(special.tklmbda(x,lam)) Px = Fx**(lam-1.0) + (asarray(1-Fx))**(lam-1.0) Px = 1.0/asarray(Px) return where((lam <= 0) | (abs(x) < 1.0/asarray(lam)), Px, 0.0) def _cdf(self, x, lam): return special.tklmbda(x, lam) def _ppf(self, q, lam): q = q*1.0 vals1 = (q**lam - (1-q)**lam)/lam vals2 = log(q/(1-q)) return where((lam == 0)&(q==q), vals2, vals1) def _stats(self, lam): return 0, _tlvar(lam), 0, _tlkurt(lam) def _entropy(self, lam): def integ(p): return log(pow(p,lam-1)+pow(1-p,lam-1)) return integrate.quad(integ,0,1)[0] tukeylambda = tukeylambda_gen(name='tukeylambda', shapes="lam") # Uniform class uniform_gen(rv_continuous): """A uniform continuous random variable. This distribution is constant between `loc` and ``loc + scale``. %(before_notes)s %(example)s """ def _rvs(self): return mtrand.uniform(0.0,1.0,self._size) def _pdf(self, x): return 1.0*(x==x) def _cdf(self, x): return x def _ppf(self, q): return q def _stats(self): return 0.5, 1.0/12, 0, -1.2 def _entropy(self): return 0.0 uniform = uniform_gen(a=0.0, b=1.0, name='uniform') # Von-Mises # if x is not in range or loc is not in range it assumes they are angles # and converts them to [-pi, pi] equivalents. eps = numpy.finfo(float).eps class vonmises_gen(rv_continuous): """A Von Mises continuous random variable. %(before_notes)s Notes ----- If `x` is not in range or `loc` is not in range it assumes they are angles and converts them to [-pi, pi] equivalents. The probability density function for `vonmises` is:: vonmises.pdf(x, b) = exp(b*cos(x)) / (2*pi*I[0](b)) for ``-pi <= x <= pi``, ``b > 0``. %(example)s """ def _rvs(self, b): return mtrand.vonmises(0.0, b, size=self._size) def _pdf(self, x, b): return exp(b*cos(x)) / (2*pi*special.i0(b)) def _cdf(self, x, b): return vonmises_cython.von_mises_cdf(b,x) def _stats_skip(self, b): return 0, None, 0, None vonmises = vonmises_gen(name='vonmises', shapes="b") ## Wald distribution (Inverse Normal with shape parameter mu=1.0) class wald_gen(invgauss_gen): """A Wald continuous random variable. %(before_notes)s Notes ----- The probability density function for `wald` is:: wald.pdf(x, a) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x)) for ``x > 0``. %(example)s """ def _rvs(self): return mtrand.wald(1.0, 1.0, size=self._size) def _pdf(self, x): return invgauss._pdf(x, 1.0) def _logpdf(self, x): return invgauss._logpdf(x, 1.0) def _cdf(self, x): return invgauss._cdf(x, 1.0) def _stats(self): return 1.0, 1.0, 3.0, 15.0 wald = wald_gen(a=0.0, name="wald") # Wrapped Cauchy class wrapcauchy_gen(rv_continuous): """A wrapped Cauchy continuous random variable. %(before_notes)s Notes ----- The probability density function for `wrapcauchy` is:: wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x))) for ``0 <= x <= 2*pi``, ``0 < c < 1``. %(example)s """ def _argcheck(self, c): return (c > 0) & (c < 1) def _pdf(self, x, c): return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x))) def _cdf(self, x, c): output = 0.0*x val = (1.0+c)/(1.0-c) c1 = x<pi c2 = 1-c1 xp = extract( c1,x) #valp = extract(c1,val) xn = extract( c2,x) #valn = extract(c2,val) if (any(xn)): valn = extract(c2, np.ones_like(x)*val) xn = 2*pi - xn yn = tan(xn/2.0) on = 1.0-1.0/pi*arctan(valn*yn) place(output, c2, on) if (any(xp)): valp = extract(c1, np.ones_like(x)*val) yp = tan(xp/2.0) op = 1.0/pi*arctan(valp*yp) place(output, c1, op) return output def _ppf(self, q, c): val = (1.0-c)/(1.0+c) rcq = 2*arctan(val*tan(pi*q)) rcmq = 2*pi-2*arctan(val*tan(pi*(1-q))) return where(q < 1.0/2, rcq, rcmq) def _entropy(self, c): return log(2*pi*(1-c*c)) wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy', shapes="c") ### DISCRETE DISTRIBUTIONS ### def entropy(pk, qk=None, base=None): """ Calculate the entropy of a distribution for given probability values. If only probabilities `pk` are given, the entropy is calculated as ``S = -sum(pk * log(pk), axis=0)``. If `qk` is not None, then compute a relative entropy ``S = sum(pk * log(pk / qk), axis=0)``. This routine will normalize `pk` and `qk` if they don't sum to 1. Parameters ---------- pk : sequence Defines the (discrete) distribution. ``pk[i]`` is the (possibly unnormalized) probability of event ``i``. qk : sequence, optional Sequence against which the relative entropy is computed. Should be in the same format as `pk`. base : float, optional The logarithmic base to use, defaults to ``e`` (natural logarithm). Returns ------- S : float The calculated entropy. """ pk = asarray(pk) pk = 1.0* pk / sum(pk, axis=0) if qk is None: vec = where(pk == 0, 0.0, pk*log(pk)) else: qk = asarray(qk) if len(qk) != len(pk): raise ValueError("qk and pk must have same length.") qk = 1.0*qk / sum(qk, axis=0) # If qk is zero anywhere, then unless pk is zero at those places # too, the relative entropy is infinite. if any(take(pk, nonzero(qk == 0.0), axis=0) != 0.0, 0): return inf vec = where (pk == 0, 0.0, -pk*log(pk / qk)) S = -sum(vec, axis=0) if base is not None: S /= log(base) return S ## Handlers for generic case where xk and pk are given def _drv_pmf(self, xk, *args): try: return self.P[xk] except KeyError: return 0.0 def _drv_cdf(self, xk, *args): indx = argmax((self.xk>xk),axis=-1)-1 return self.F[self.xk[indx]] def _drv_ppf(self, q, *args): indx = argmax((self.qvals>=q),axis=-1) return self.Finv[self.qvals[indx]] def _drv_nonzero(self, k, *args): return 1 def _drv_moment(self, n, *args): n = asarray(n) return sum(self.xk**n[newaxis,...] * self.pk, axis=0) def _drv_moment_gen(self, t, *args): t = asarray(t) return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0) def _drv2_moment(self, n, *args): """Non-central moment of discrete distribution.""" #many changes, originally not even a return tot = 0.0 diff = 1e100 #pos = self.a pos = max(0.0, 1.0*self.a) count = 0 #handle cases with infinite support ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 ) llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 ) while (pos <= self.b) and ((pos <= ulimit) or \ (diff > self.moment_tol)): diff = np.power(pos, n) * self.pmf(pos,*args) # use pmf because _pmf does not check support in randint # and there might be problems ? with correct self.a, self.b at this stage tot += diff pos += self.inc count += 1 if self.a < 0: #handle case when self.a = -inf diff = 1e100 pos = -self.inc while (pos >= self.a) and ((pos >= llimit) or \ (diff > self.moment_tol)): diff = np.power(pos, n) * self.pmf(pos,*args) #using pmf instead of _pmf, see above tot += diff pos -= self.inc count += 1 return tot def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm b = self.invcdf_b a = self.invcdf_a if isinf(b): # Be sure ending point is > q b = max(100*q,10) while 1: if b >= self.b: qb = 1.0; break qb = self._cdf(b,*args) if (qb < q): b += 10 else: break else: qb = 1.0 if isinf(a): # be sure starting point < q a = min(-100*q,-10) while 1: if a <= self.a: qb = 0.0; break qa = self._cdf(a,*args) if (qa > q): a -= 10 else: break else: qa = self._cdf(a, *args) while 1: if (qa == q): return a if (qb == q): return b if b == a+1: #testcase: return wrong number at lower index #python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong #python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)" #python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)" if qa > q: return a else: return b c = int((a+b)/2.0) qc = self._cdf(c, *args) if (qc < q): a = c qa = qc elif (qc > q): b = c qb = qc else: return c def reverse_dict(dict): newdict = {} sorted_keys = copy(dict.keys()) sorted_keys.sort() for key in sorted_keys[::-1]: newdict[dict[key]] = key return newdict def make_dict(keys, values): d = {} for key, value in zip(keys, values): d[key] = value return d # Must over-ride one of _pmf or _cdf or pass in # x_k, p(x_k) lists in initialization class rv_discrete(rv_generic): """ A generic discrete random variable class meant for subclassing. `rv_discrete` is a base class to construct specific distribution classes and instances from for discrete random variables. rv_discrete can be used to construct an arbitrary distribution with defined by a list of support points and the corresponding probabilities. Parameters ---------- a : float, optional Lower bound of the support of the distribution, default: 0 b : float, optional Upper bound of the support of the distribution, default: plus infinity moment_tol : float, optional The tolerance for the generic calculation of moments values : tuple of two array_like (xk, pk) where xk are points (integers) with positive probability pk with sum(pk) = 1 inc : integer increment for the support of the distribution, default: 1 other values have not been tested badvalue : object, optional The value in (masked) arrays that indicates a value that should be ignored. name : str, optional The name of the instance. This string is used to construct the default example for distributions. longname : str, optional This string is used as part of the first line of the docstring returned when a subclass has no docstring of its own. Note: `longname` exists for backwards compatibility, do not use for new subclasses. shapes : str, optional The shape of the distribution. For example ``"m, n"`` for a distribution that takes two integers as the first two arguments for all its methods. extradoc : str, optional This string is used as the last part of the docstring returned when a subclass has no docstring of its own. Note: `extradoc` exists for backwards compatibility, do not use for new subclasses. Methods ------- generic.rvs(<shape(s)>, loc=0, size=1) random variates generic.pmf(x, <shape(s)>, loc=0) probability mass function logpmf(x, <shape(s)>, loc=0) log of the probability density function generic.cdf(x, <shape(s)>, loc=0) cumulative density function generic.logcdf(x, <shape(s)>, loc=0) log of the cumulative density function generic.sf(x, <shape(s)>, loc=0) survival function (1-cdf --- sometimes more accurate) generic.logsf(x, <shape(s)>, loc=0, scale=1) log of the survival function generic.ppf(q, <shape(s)>, loc=0) percent point function (inverse of cdf --- percentiles) generic.isf(q, <shape(s)>, loc=0) inverse survival function (inverse of sf) generic.moment(n, <shape(s)>, loc=0) non-central n-th moment of the distribution. May not work for array arguments. generic.stats(<shape(s)>, loc=0, moments='mv') mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k') generic.entropy(<shape(s)>, loc=0) entropy of the RV generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False) Expected value of a function with respect to the distribution. Additional kwd arguments passed to integrate.quad generic.median(<shape(s)>, loc=0) Median of the distribution. generic.mean(<shape(s)>, loc=0) Mean of the distribution. generic.std(<shape(s)>, loc=0) Standard deviation of the distribution. generic.var(<shape(s)>, loc=0) Variance of the distribution. generic.interval(alpha, <shape(s)>, loc=0) Interval that with `alpha` percent probability contains a random realization of this distribution. generic(<shape(s)>, loc=0) calling a distribution instance returns a frozen distribution Notes ----- You can construct an arbitrary discrete rv where ``P{X=xk} = pk`` by passing to the rv_discrete initialization method (through the values=keyword) a tuple of sequences (xk, pk) which describes only those values of X (xk) that occur with nonzero probability (pk). To create a new discrete distribution, we would do the following:: class poisson_gen(rv_continuous): #"Poisson distribution" def _pmf(self, k, mu): ... and create an instance:: poisson = poisson_gen(name="poisson", shapes="mu", longname='A Poisson') The docstring can be created from a template. Alternatively, the object may be called (as a function) to fix the shape and location parameters returning a "frozen" discrete RV object:: myrv = generic(<shape(s)>, loc=0) - frozen RV object with the same methods but holding the given shape and location fixed. Examples -------- Custom made discrete distribution: >>> import matplotlib.pyplot as plt >>> from scipy import stats >>> xk = np.arange(7) >>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1) >>> custm = stats.rv_discrete(name='custm', values=(xk, pk)) >>> h = plt.plot(xk, custm.pmf(xk)) Random number generation: >>> R = custm.rvs(size=100) Display frozen pmf: >>> numargs = generic.numargs >>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs >>> rv = generic(<shape(s)>) >>> x = np.arange(0, np.min(rv.dist.b, 3)+1) >>> h = plt.plot(x, rv.pmf(x)) Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``. Check accuracy of cdf and ppf: >>> prb = generic.cdf(x, <shape(s)>) >>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20) """ def __init__(self, a=0, b=inf, name=None, badvalue=None, moment_tol=1e-8,values=None,inc=1,longname=None, shapes=None, extradoc=None): super(rv_generic,self).__init__() if badvalue is None: badvalue = nan if name is None: name = 'Distribution' self.badvalue = badvalue self.a = a self.b = b self.invcdf_a = a # what's the difference to self.a, .b self.invcdf_b = b self.name = name self.moment_tol = moment_tol self.inc = inc self._cdfvec = sgf(self._cdfsingle,otypes='d') self.return_integers = 1 self.vecentropy = vectorize(self._entropy) self.shapes = shapes self.extradoc = extradoc if values is not None: self.xk, self.pk = values self.return_integers = 0 indx = argsort(ravel(self.xk)) self.xk = take(ravel(self.xk),indx, 0) self.pk = take(ravel(self.pk),indx, 0) self.a = self.xk[0] self.b = self.xk[-1] self.P = make_dict(self.xk, self.pk) self.qvals = numpy.cumsum(self.pk,axis=0) self.F = make_dict(self.xk, self.qvals) self.Finv = reverse_dict(self.F) self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'), self, rv_discrete) self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'), self, rv_discrete) self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'), self, rv_discrete) self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete) self.generic_moment = instancemethod(_drv_moment, self, rv_discrete) self.moment_gen = instancemethod(_drv_moment_gen, self, rv_discrete) self.numargs=0 else: cdf_signature = inspect.getargspec(self._cdf.im_func) numargs1 = len(cdf_signature[0]) - 2 pmf_signature = inspect.getargspec(self._pmf.im_func) numargs2 = len(pmf_signature[0]) - 2 self.numargs = max(numargs1, numargs2) #nin correction needs to be after we know numargs #correct nin for generic moment vectorization self.vec_generic_moment = sgf(_drv2_moment, otypes='d') self.vec_generic_moment.nin = self.numargs + 2 self.generic_moment = instancemethod(self.vec_generic_moment, self, rv_discrete) #correct nin for ppf vectorization _vppf = sgf(_drv2_ppfsingle,otypes='d') _vppf.nin = self.numargs + 2 # +1 is for self self._vecppf = instancemethod(_vppf, self, rv_discrete) #now that self.numargs is defined, we can adjust nin self._cdfvec.nin = self.numargs + 1 # generate docstring for subclass instances if longname is None: if name[0] in ['aeiouAEIOU']: hstr = "An " else: hstr = "A " longname = hstr + name if self.__doc__ is None: self._construct_default_doc(longname=longname, extradoc=extradoc) else: self._construct_doc() ## This only works for old-style classes... # self.__class__.__doc__ = self.__doc__ def _construct_default_doc(self, longname=None, extradoc=None): """Construct instance docstring from the rv_discrete template.""" if extradoc is None: extradoc = '' if extradoc.startswith('\n\n'): extradoc = extradoc[2:] self.__doc__ = ''.join(['%s discrete random variable.'%longname, '\n\n%(before_notes)s\n', docheaders['notes'], extradoc, '\n%(example)s']) self._construct_doc() def _construct_doc(self): """Construct the instance docstring with string substitutions.""" tempdict = docdict_discrete.copy() tempdict['name'] = self.name or 'distname' tempdict['shapes'] = self.shapes or '' if self.shapes is None: # remove shapes from call parameters if there are none for item in ['callparams', 'default', 'before_notes']: tempdict[item] = tempdict[item].replace(\ "\n%(shapes)s : array_like\n shape parameters", "") for i in range(2): if self.shapes is None: # necessary because we use %(shapes)s in two forms (w w/o ", ") self.__doc__ = self.__doc__.replace("%(shapes)s, ", "") self.__doc__ = doccer.docformat(self.__doc__, tempdict) def _rvs(self, *args): return self._ppf(mtrand.random_sample(self._size),*args) def _nonzero(self, k, *args): return floor(k)==k def _argcheck(self, *args): cond = 1 for arg in args: cond &= (arg > 0) return cond def _pmf(self, k, *args): return self._cdf(k,*args) - self._cdf(k-1,*args) def _logpmf(self, k, *args): return log(self._pmf(k, *args)) def _cdfsingle(self, k, *args): m = arange(int(self.a),k+1) return sum(self._pmf(m,*args),axis=0) def _cdf(self, x, *args): k = floor(x) return self._cdfvec(k,*args) def _logcdf(self, x, *args): return log(self._cdf(x, *args)) def _sf(self, x, *args): return 1.0-self._cdf(x,*args) def _logsf(self, x, *args): return log(self._sf(x, *args)) def _ppf(self, q, *args): return self._vecppf(q, *args) def _isf(self, q, *args): return self._ppf(1-q,*args) def _stats(self, *args): return None, None, None, None def _munp(self, n, *args): return self.generic_moment(n, *args) def rvs(self, *args, **kwargs): """ Random variates of given type. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). size : int or tuple of ints, optional Defining number of random variates (default=1). Returns ------- rvs : array_like Random variates of given `size`. """ kwargs['discrete'] = True return super(rv_discrete, self).rvs(*args, **kwargs) def pmf(self, k,*args, **kwds): """ Probability mass function at k of the given RV. Parameters ---------- k : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional Location parameter (default=0). Returns ------- pmf : array_like Probability mass function evaluated at k """ loc = kwds.get('loc') args, loc = self._fix_loc(args, loc) k,loc = map(asarray,(k,loc)) args = tuple(map(asarray,args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args) cond = cond0 & cond1 output = zeros(shape(cond),'d') place(output,(1-cond0) + np.isnan(k),self.badvalue) if any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output,cond,self._pmf(*goodargs)) if output.ndim == 0: return output[()] return output def logpmf(self, k,*args, **kwds): """ Log of the probability mass function at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter. Default is 0. Returns ------- logpmf : array_like Log of the probability mass function evaluated at k. """ loc = kwds.get('loc') args, loc = self._fix_loc(args, loc) k,loc = map(asarray,(k,loc)) args = tuple(map(asarray,args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args) cond = cond0 & cond1 output = empty(shape(cond),'d') output.fill(NINF) place(output,(1-cond0) + np.isnan(k),self.badvalue) if any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output,cond,self._logpmf(*goodargs)) if output.ndim == 0: return output[()] return output def cdf(self, k, *args, **kwds): """ Cumulative distribution function at k of the given RV. Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- cdf : array_like Cumulative distribution function evaluated at k. """ loc = kwds.get('loc') args, loc = self._fix_loc(args, loc) k,loc = map(asarray,(k,loc)) args = tuple(map(asarray,args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k >= self.b) cond = cond0 & cond1 output = zeros(shape(cond),'d') place(output,(1-cond0) + np.isnan(k),self.badvalue) place(output,cond2*(cond0==cond0), 1.0) if any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output,cond,self._cdf(*goodargs)) if output.ndim == 0: return output[()] return output def logcdf(self, k, *args, **kwds): """ Log of the cumulative distribution function at k of the given RV Parameters ---------- k : array_like, int Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- logcdf : array_like Log of the cumulative distribution function evaluated at k. """ loc = kwds.get('loc') args, loc = self._fix_loc(args, loc) k,loc = map(asarray,(k,loc)) args = tuple(map(asarray,args)) k = asarray((k-loc)) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k < self.b) cond2 = (k >= self.b) cond = cond0 & cond1 output = empty(shape(cond),'d') output.fill(NINF) place(output,(1-cond0) + np.isnan(k),self.badvalue) place(output,cond2*(cond0==cond0), 0.0) if any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output,cond,self._logcdf(*goodargs)) if output.ndim == 0: return output[()] return output def sf(self,k,*args,**kwds): """ Survival function (1-cdf) at k of the given RV. Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- sf : array_like Survival function evaluated at k. """ loc= kwds.get('loc') args, loc = self._fix_loc(args, loc) k,loc = map(asarray,(k,loc)) args = tuple(map(asarray,args)) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) cond2 = (k < self.a) & cond0 cond = cond0 & cond1 output = zeros(shape(cond),'d') place(output,(1-cond0) + np.isnan(k),self.badvalue) place(output,cond2,1.0) if any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output,cond,self._sf(*goodargs)) if output.ndim == 0: return output[()] return output def logsf(self,k,*args,**kwds): """ Log of the survival function (1-cdf) at k of the given RV Parameters ---------- k : array_like Quantiles. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- sf : array_like Survival function evaluated at k. """ loc= kwds.get('loc') args, loc = self._fix_loc(args, loc) k,loc = map(asarray,(k,loc)) args = tuple(map(asarray,args)) k = asarray(k-loc) cond0 = self._argcheck(*args) cond1 = (k >= self.a) & (k <= self.b) cond2 = (k < self.a) & cond0 cond = cond0 & cond1 output = empty(shape(cond),'d') output.fill(NINF) place(output,(1-cond0) + np.isnan(k),self.badvalue) place(output,cond2,0.0) if any(cond): goodargs = argsreduce(cond, *((k,)+args)) place(output,cond,self._logsf(*goodargs)) if output.ndim == 0: return output[()] return output def ppf(self,q,*args,**kwds): """ Percent point function (inverse of cdf) at q of the given RV Parameters ---------- q : array_like Lower tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). scale : array_like, optional Scale parameter (default=1). Returns ------- k : array_like Quantile corresponding to the lower tail probability, q. """ loc = kwds.get('loc') args, loc = self._fix_loc(args, loc) q,loc = map(asarray,(q,loc)) args = tuple(map(asarray,args)) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 output = valarray(shape(cond),value=self.badvalue,typecode='d') #output type 'd' to handle nin and inf place(output,(q==0)*(cond==cond), self.a-1) place(output,cond2,self.b) if any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] place(output,cond,self._ppf(*goodargs) + loc) if output.ndim == 0: return output[()] return output def isf(self,q,*args,**kwds): """ Inverse survival function (1-sf) at q of the given RV. Parameters ---------- q : array_like Upper tail probability. arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). Returns ------- k : array_like Quantile corresponding to the upper tail probability, q. """ loc = kwds.get('loc') args, loc = self._fix_loc(args, loc) q,loc = map(asarray,(q,loc)) args = tuple(map(asarray,args)) cond0 = self._argcheck(*args) & (loc == loc) cond1 = (q > 0) & (q < 1) cond2 = (q==1) & cond0 cond = cond0 & cond1 #old: ## output = valarray(shape(cond),value=self.b,typecode='d') ## #typecode 'd' to handle nin and inf ## place(output,(1-cond0)*(cond1==cond1), self.badvalue) ## place(output,cond2,self.a-1) #same problem as with ppf # copied from ppf and changed output = valarray(shape(cond),value=self.badvalue,typecode='d') #output type 'd' to handle nin and inf place(output,(q==0)*(cond==cond), self.b) place(output,cond2,self.a-1) # call place only if at least 1 valid argument if any(cond): goodargs = argsreduce(cond, *((q,)+args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766 if output.ndim == 0: return output[()] return output def stats(self, *args, **kwds): """ Some statistics of the given discrete RV. Parameters ---------- arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional Location parameter (default=0). moments : string, optional Composed of letters ['mvsk'] defining which moments to compute: - 'm' = mean, - 'v' = variance, - 's' = (Fisher's) skew, - 'k' = (Fisher's) kurtosis. The default is'mv'. Returns ------- stats : sequence of requested moments. """ loc,moments=map(kwds.get,['loc','moments']) N = len(args) if N > self.numargs: if N == self.numargs + 1 and loc is None: # loc is given without keyword loc = args[-1] if N == self.numargs + 2 and moments is None: # loc, scale, and moments loc, moments = args[-2:] args = args[:self.numargs] if loc is None: loc = 0.0 if moments is None: moments = 'mv' loc = asarray(loc) args = tuple(map(asarray,args)) cond = self._argcheck(*args) & (loc==loc) signature = inspect.getargspec(self._stats.im_func) if (signature[2] is not None) or ('moments' in signature[0]): mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments}) else: mu, mu2, g1, g2 = self._stats(*args) if g1 is None: mu3 = None else: mu3 = g1*(mu2**1.5) default = valarray(shape(cond), self.badvalue) output = [] # Use only entries that are valid in calculation goodargs = argsreduce(cond, *(args+(loc,))) loc, goodargs = goodargs[-1], goodargs[:-1] if 'm' in moments: if mu is None: mu = self._munp(1.0,*goodargs) out0 = default.copy() place(out0,cond,mu+loc) output.append(out0) if 'v' in moments: if mu2 is None: mu2p = self._munp(2.0,*goodargs) if mu is None: mu = self._munp(1.0,*goodargs) mu2 = mu2p - mu*mu out0 = default.copy() place(out0,cond,mu2) output.append(out0) if 's' in moments: if g1 is None: mu3p = self._munp(3.0,*goodargs) if mu is None: mu = self._munp(1.0,*goodargs) if mu2 is None: mu2p = self._munp(2.0,*goodargs) mu2 = mu2p - mu*mu mu3 = mu3p - 3*mu*mu2 - mu**3 g1 = mu3 / mu2**1.5 out0 = default.copy() place(out0,cond,g1) output.append(out0) if 'k' in moments: if g2 is None: mu4p = self._munp(4.0,*goodargs) if mu is None: mu = self._munp(1.0,*goodargs) if mu2 is None: mu2p = self._munp(2.0,*goodargs) mu2 = mu2p - mu*mu if mu3 is None: mu3p = self._munp(3.0,*goodargs) mu3 = mu3p - 3*mu*mu2 - mu**3 mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4 g2 = mu4 / mu2**2.0 - 3.0 out0 = default.copy() place(out0,cond,g2) output.append(out0) if len(output) == 1: return output[0] else: return tuple(output) def moment(self, n, *args, **kwds): # Non-central moments in standard form. """ n'th non-central moment of the distribution Parameters ---------- n : int, n>=1 order of moment arg1, arg2, arg3,...: float The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : float, optional location parameter (default=0) scale : float, optional scale parameter (default=1) """ loc = kwds.get('loc', 0) scale = kwds.get('scale', 1) if not (self._argcheck(*args) and (scale > 0)): return nan if (floor(n) != n): raise ValueError("Moment must be an integer.") if (n < 0): raise ValueError("Moment must be positive.") mu, mu2, g1, g2 = None, None, None, None if (n > 0) and (n < 5): signature = inspect.getargspec(self._stats.im_func) if (signature[2] is not None) or ('moments' in signature[0]): dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} else: dict = {} mu, mu2, g1, g2 = self._stats(*args,**dict) val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args) # Convert to transformed X = L + S*Y # so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n) if loc == 0: return scale**n * val else: result = 0 fac = float(scale) / float(loc) for k in range(n): valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args) result += comb(n,k,exact=True)*(fac**k) * valk result += fac**n * val return result * loc**n def freeze(self, *args, **kwds): return rv_frozen(self, *args, **kwds) def _entropy(self, *args): if hasattr(self,'pk'): return entropy(self.pk) else: mu = int(self.stats(*args, **{'moments':'m'})) val = self.pmf(mu,*args) if (val==0.0): ent = 0.0 else: ent = -val*log(val) k = 1 term = 1.0 while (abs(term) > eps): val = self.pmf(mu+k,*args) if val == 0.0: term = 0.0 else: term = -val * log(val) val = self.pmf(mu-k,*args) if val != 0.0: term -= val*log(val) k += 1 ent += term return ent def entropy(self, *args, **kwds): loc= kwds.get('loc') args, loc = self._fix_loc(args, loc) loc = asarray(loc) args = map(asarray,args) cond0 = self._argcheck(*args) & (loc==loc) output = zeros(shape(cond0),'d') place(output,(1-cond0),self.badvalue) goodargs = argsreduce(cond0, *args) place(output,cond0,self.vecentropy(*goodargs)) return output def __call__(self, *args, **kwds): return self.freeze(*args,**kwds) def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False): """calculate expected value of a function with respect to the distribution for discrete distribution Parameters ---------- fn : function (default: identity mapping) Function for which sum is calculated. Takes only one argument. args : tuple argument (parameters) of the distribution optional keyword parameters lb, ub : numbers lower and upper bound for integration, default is set to the support of the distribution, lb and ub are inclusive (ul<=k<=ub) conditional : boolean (False) If true then the expectation is corrected by the conditional probability of the integration interval. The return value is the expectation of the function, conditional on being in the given interval (k such that ul<=k<=ub). Returns ------- expected value : float Notes ----- * function is not vectorized * accuracy: uses self.moment_tol as stopping criterium for heavy tailed distribution e.g. zipf(4), accuracy for mean, variance in example is only 1e-5, increasing precision (moment_tol) makes zipf very slow * suppnmin=100 internal parameter for minimum number of points to evaluate could be added as keyword parameter, to evaluate functions with non-monotonic shapes, points include integers in (-suppnmin, suppnmin) * uses maxcount=1000 limits the number of points that are evaluated to break loop for infinite sums (a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers are evaluated) """ #moment_tol = 1e-12 # increase compared to self.moment_tol, # too slow for only small gain in precision for zipf #avoid endless loop with unbound integral, eg. var of zipf(2) maxcount = 1000 suppnmin = 100 #minimum number of points to evaluate (+ and -) if func is None: def fun(x): #loc and args from outer scope return (x+loc)*self._pmf(x, *args) else: def fun(x): #loc and args from outer scope return func(x+loc)*self._pmf(x, *args) # used pmf because _pmf does not check support in randint # and there might be problems(?) with correct self.a, self.b at this stage # maybe not anymore, seems to work now with _pmf self._argcheck(*args) # (re)generate scalar self.a and self.b if lb is None: lb = (self.a) else: lb = lb - loc #convert bound for standardized distribution if ub is None: ub = (self.b) else: ub = ub - loc #convert bound for standardized distribution if conditional: if np.isposinf(ub)[()]: #work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan invfac = 1 - self.cdf(lb-1,*args) else: invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args) else: invfac = 1.0 tot = 0.0 low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args) low = max(min(-suppnmin, low), lb) upp = min(max(suppnmin, upp), ub) supp = np.arange(low, upp+1, self.inc) #check limits #print 'low, upp', low, upp tot = np.sum(fun(supp)) diff = 1e100 pos = upp + self.inc count = 0 #handle cases with infinite support while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount: diff = fun(pos) tot += diff pos += self.inc count += 1 if self.a < 0: #handle case when self.a = -inf diff = 1e100 pos = low - self.inc while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount: diff = fun(pos) tot += diff pos -= self.inc count += 1 if count > maxcount: # fixme: replace with proper warning print 'sum did not converge' return tot/invfac # Binomial class binom_gen(rv_discrete): """A binomial discrete random variable. %(before_notes)s Notes ----- The probability mass function for `binom` is:: binom.pmf(k) = choose(n,k) * p**k * (1-p)**(n-k) for ``k`` in ``{0,1,...,n}``. `binom` takes ``n`` and ``p`` as shape parameters. %(example)s """ def _rvs(self, n, p): return mtrand.binomial(n,p,self._size) def _argcheck(self, n, p): self.b = n return (n>=0) & (p >= 0) & (p <= 1) def _logpmf(self, x, n, p): k = floor(x) combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1))) return combiln + k*np.log(p) + (n-k)*np.log(1-p) def _pmf(self, x, n, p): return exp(self._logpmf(x, n, p)) def _cdf(self, x, n, p): k = floor(x) vals = special.bdtr(k,n,p) return vals def _sf(self, x, n, p): k = floor(x) return special.bdtrc(k,n,p) def _ppf(self, q, n, p): vals = ceil(special.bdtrik(q,n,p)) vals1 = vals-1 temp = special.bdtr(vals1,n,p) return where(temp >= q, vals1, vals) def _stats(self, n, p): q = 1.0-p mu = n * p var = n * p * q g1 = (q-p) / sqrt(n*p*q) g2 = (1.0-6*p*q)/(n*p*q) return mu, var, g1, g2 def _entropy(self, n, p): k = r_[0:n+1] vals = self._pmf(k,n,p) lvals = where(vals==0,0.0,log(vals)) return -sum(vals*lvals,axis=0) binom = binom_gen(name='binom',shapes="n, p") # Bernoulli distribution class bernoulli_gen(binom_gen): """A Bernoulli discrete random variable. %(before_notes)s Notes ----- The probability mass function for `bernoulli` is:: bernoulli.pmf(k) = 1-p if k = 0 = p if k = 1 for ``k`` in ``{0,1}``. `bernoulli` takes ``p`` as shape parameter. %(example)s """ def _rvs(self, pr): return binom_gen._rvs(self, 1, pr) def _argcheck(self, pr): return (pr >=0 ) & (pr <= 1) def _logpmf(self, x, pr): return binom._logpmf(x, 1, pr) def _pmf(self, x, pr): return binom._pmf(x, 1, pr) def _cdf(self, x, pr): return binom._cdf(x, 1, pr) def _sf(self, x, pr): return binom._sf(x, 1, pr) def _ppf(self, q, pr): return binom._ppf(q, 1, pr) def _stats(self, pr): return binom._stats(1, pr) def _entropy(self, pr): return -pr*log(pr)-(1-pr)*log(1-pr) bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="p") # Negative binomial class nbinom_gen(rv_discrete): """A negative binomial discrete random variable. %(before_notes)s Notes ----- The probability mass function for `nbinom` is:: nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k for ``k >= 0``. `nbinom` takes ``n`` and ``p`` as shape parameters. %(example)s """ def _rvs(self, n, p): return mtrand.negative_binomial(n, p, self._size) def _argcheck(self, n, p): return (n >= 0) & (p >= 0) & (p <= 1) def _pmf(self, x, n, p): coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n)) return coeff * power(p,n) * power(1-p,x) def _logpmf(self, x, n, p): coeff = gamln(n+x) - gamln(x+1) - gamln(n) return coeff + n*log(p) + x*log(1-p) def _cdf(self, x, n, p): k = floor(x) return special.betainc(n, k+1, p) def _sf_skip(self, x, n, p): #skip because special.nbdtrc doesn't work for 0<n<1 k = floor(x) return special.nbdtrc(k,n,p) def _ppf(self, q, n, p): vals = ceil(special.nbdtrik(q,n,p)) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1,n,p) return where(temp >= q, vals1, vals) def _stats(self, n, p): Q = 1.0 / p P = Q - 1.0 mu = n*P var = n*P*Q g1 = (Q+P)/sqrt(n*P*Q) g2 = (1.0 + 6*P*Q) / (n*P*Q) return mu, var, g1, g2 nbinom = nbinom_gen(name='nbinom', shapes="n, p") ## Geometric distribution class geom_gen(rv_discrete): """A geometric discrete random variable. %(before_notes)s Notes ----- The probability mass function for `geom` is:: geom.pmf(k) = (1-p)**(k-1)*p for ``k >= 1``. `geom` takes ``p`` as shape parameter. %(example)s """ def _rvs(self, p): return mtrand.geometric(p,size=self._size) def _argcheck(self, p): return (p<=1) & (p >= 0) def _pmf(self, k, p): return (1-p)**(k-1) * p def _logpmf(self, k, p): return (k-1)*log(1-p) + p def _cdf(self, x, p): k = floor(x) return (1.0-(1.0-p)**k) def _sf(self, x, p): k = floor(x) return (1.0-p)**k def _ppf(self, q, p): vals = ceil(log(1.0-q)/log(1-p)) temp = 1.0-(1.0-p)**(vals-1) return where((temp >= q) & (vals > 0), vals-1, vals) def _stats(self, p): mu = 1.0/p qr = 1.0-p var = qr / p / p g1 = (2.0-p) / sqrt(qr) g2 = numpy.polyval([1,-6,6],p)/(1.0-p) return mu, var, g1, g2 geom = geom_gen(a=1,name='geom', longname="A geometric", shapes="p") ## Hypergeometric distribution class hypergeom_gen(rv_discrete): """A hypergeometric discrete random variable. The hypergeometric distribution models drawing objects from a bin. M is the total number of objects, n is total number of Type I objects. The random variate represents the number of Type I objects in N drawn without replacement from the total population. %(before_notes)s Notes ----- The probability mass function is defined as:: pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N), for N - (M-n) <= k <= min(m,N) Examples -------- >>> from scipy.stats import hypergeom Suppose we have a collection of 20 animals, of which 7 are dogs. Then if we want to know the probability of finding a given number of dogs if we choose at random 12 of the 20 animals, we can initialize a frozen distribution and plot the probability mass function: >>> [M, n, N] = [20, 7, 12] >>> rv = hypergeom(M, n, N) >>> x = np.arange(0, n+1) >>> pmf_dogs = rv.pmf(x) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.plot(x, pmf_dogs, 'bo') >>> ax.vlines(x, 0, pmf_dogs, lw=2) >>> ax.set_xlabel('# of dogs in our group of chosen animals') >>> ax.set_ylabel('hypergeom PMF') >>> plt.show() Instead of using a frozen distribution we can also use `hypergeom` methods directly. To for example obtain the cumulative distribution function, use: >>> prb = hypergeom.cdf(x, M, n, N) And to generate random numbers: >>> R = hypergeom.rvs(M, n, N, size=10) """ def _rvs(self, M, n, N): return mtrand.hypergeometric(n,M-n,N,size=self._size) def _argcheck(self, M, n, N): cond = rv_discrete._argcheck(self,M,n,N) cond &= (n <= M) & (N <= M) self.a = N-(M-n) self.b = min(n,N) return cond def _logpmf(self, k, M, n, N): tot, good = M, n bad = tot - good return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \ - gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \ + gamln(N+1) def _pmf(self, k, M, n, N): #same as the following but numerically more precise #return comb(good,k) * comb(bad,N-k) / comb(tot,N) return exp(self._logpmf(k, M, n, N)) def _stats(self, M, n, N): tot, good = M, n n = good*1.0 m = (tot-good)*1.0 N = N*1.0 tot = m+n p = n/tot mu = N*p var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1)) g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N))) m2, m3, m4, m5 = m**2, m**3, m**4, m**5 n2, n3, n4, n5 = n**2, n**2, n**4, n**5 g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \ - 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \ - 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \ + 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \ 12*m*n2 - 6*n3) return mu, var, g1, g2 def _entropy(self, M, n, N): k = r_[N-(M-n):min(n,N)+1] vals = self.pmf(k,M,n,N) lvals = where(vals==0.0,0.0,log(vals)) return -sum(vals*lvals,axis=0) def _sf(self, k, M, n, N): """More precise calculation, 1 - cdf doesn't cut it.""" # This for loop is needed because `k` can be an array. If that's the # case, the sf() method makes M, n and N arrays of the same shape. We # therefore unpack all inputs args, so we can do the manual integration. res = [] for quant, tot, good, draw in zip(k, M, n, N): # Manual integration over probability mass function. More accurate # than integrate.quad. k2 = np.arange(quant + 1, draw + 1) res.append(np.sum(self._pmf(k2, tot, good, draw))) return np.asarray(res) hypergeom = hypergeom_gen(name='hypergeom', shapes="M, n, N") ## Logarithmic (Log-Series), (Series) distribution # FIXME: Fails _cdfvec class logser_gen(rv_discrete): """A Logarithmic (Log-Series, Series) discrete random variable. %(before_notes)s Notes ----- The probability mass function for `logser` is:: logser.pmf(k) = - p**k / (k*log(1-p)) for ``k >= 1``. `logser` takes ``p`` as shape parameter. %(example)s """ def _rvs(self, pr): # looks wrong for pr>0.5, too few k=1 # trying to use generic is worse, no k=1 at all return mtrand.logseries(pr,size=self._size) def _argcheck(self, pr): return (pr > 0) & (pr < 1) def _pmf(self, k, pr): return -pr**k * 1.0 / k / log(1-pr) def _stats(self, pr): r = log(1-pr) mu = pr / (pr - 1.0) / r mu2p = -pr / r / (pr-1.0)**2 var = mu2p - mu*mu mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3 mu3 = mu3p - 3*mu*mu2p + 2*mu**3 g1 = mu3 / var**1.5 mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \ 6*pr*pr / (pr-1)**4) mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 g2 = mu4 / var**2 - 3.0 return mu, var, g1, g2 logser = logser_gen(a=1,name='logser', longname='A logarithmic', shapes='p') ## Poisson distribution class poisson_gen(rv_discrete): """A Poisson discrete random variable. %(before_notes)s Notes ----- The probability mass function for `poisson` is:: poisson.pmf(k) = exp(-mu) * mu**k / k! for ``k >= 0``. `poisson` takes ``mu`` as shape parameter. %(example)s """ def _rvs(self, mu): return mtrand.poisson(mu, self._size) def _logpmf(self, k, mu): Pk = k*log(mu)-gamln(k+1) - mu return Pk def _pmf(self, k, mu): return exp(self._logpmf(k, mu)) def _cdf(self, x, mu): k = floor(x) return special.pdtr(k,mu) def _sf(self, x, mu): k = floor(x) return special.pdtrc(k,mu) def _ppf(self, q, mu): vals = ceil(special.pdtrik(q,mu)) vals1 = vals-1 temp = special.pdtr(vals1,mu) return where((temp >= q), vals1, vals) def _stats(self, mu): var = mu tmp = asarray(mu) g1 = 1.0 / tmp g2 = 1.0 / tmp return mu, var, g1, g2 poisson = poisson_gen(name="poisson", longname='A Poisson', shapes="mu") ## (Planck) Discrete Exponential class planck_gen(rv_discrete): """A Planck discrete exponential random variable. %(before_notes)s Notes ----- The probability mass function for `planck` is:: planck.pmf(k) = (1-exp(-lambda))*exp(-lambda*k) for ``k*lambda >= 0``. `planck` takes ``lambda`` as shape parameter. %(example)s """ def _argcheck(self, lambda_): if (lambda_ > 0): self.a = 0 self.b = inf return 1 elif (lambda_ < 0): self.a = -inf self.b = 0 return 1 return 0 # lambda_ = 0 def _pmf(self, k, lambda_): fact = (1-exp(-lambda_)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_): k = floor(x) return 1-exp(-lambda_*(k+1)) def _ppf(self, q, lambda_): vals = ceil(-1.0/lambda_ * log1p(-q)-1) vals1 = (vals-1).clip(self.a, np.inf) temp = self._cdf(vals1, lambda_) return where(temp >= q, vals1, vals) def _stats(self, lambda_): mu = 1/(exp(lambda_)-1) var = exp(-lambda_)/(expm1(-lambda_))**2 g1 = 2*cosh(lambda_/2.0) g2 = 4+2*cosh(lambda_) return mu, var, g1, g2 def _entropy(self, lambda_): l = lambda_ C = (1-exp(-l)) return l*exp(-l)/C - log(C) planck = planck_gen(name='planck',longname='A discrete exponential ', shapes="lamda") class boltzmann_gen(rv_discrete): """A Boltzmann (Truncated Discrete Exponential) random variable. %(before_notes)s Notes ----- The probability mass function for `boltzmann` is:: boltzmann.pmf(k) = (1-exp(-lambda)*exp(-lambda*k)/(1-exp(-lambda*N)) for ``k = 0,...,N-1``. `boltzmann` takes ``lambda`` and ``N`` as shape parameters. %(example)s """ def _pmf(self, k, lambda_, N): fact = (1-exp(-lambda_))/(1-exp(-lambda_*N)) return fact*exp(-lambda_*k) def _cdf(self, x, lambda_, N): k = floor(x) return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N)) def _ppf(self, q, lambda_, N): qnew = q*(1-exp(-lambda_*N)) vals = ceil(-1.0/lambda_ * log(1-qnew)-1) vals1 = (vals-1).clip(0.0, np.inf) temp = self._cdf(vals1, lambda_, N) return where(temp >= q, vals1, vals) def _stats(self, lambda_, N): z = exp(-lambda_) zN = exp(-lambda_*N) mu = z/(1.0-z)-N*zN/(1-zN) var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2 trm = (1-zN)/(1-z) trm2 = (z*trm**2 - N*N*zN) g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN) g1 = g1 / trm2**(1.5) g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN) g2 = g2 / trm2 / trm2 return mu, var, g1, g2 boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ', shapes="lamda, N") ## Discrete Uniform class randint_gen(rv_discrete): """A uniform discrete random variable. %(before_notes)s Notes ----- The probability mass function for `randint` is:: randint.pmf(k) = 1./(max- min) for ``k = min,...,max``. `randint` takes ``min`` and ``max`` as shape parameters. %(example)s """ def _argcheck(self, min, max): self.a = min self.b = max-1 return (max > min) def _pmf(self, k, min, max): fact = 1.0 / (max - min) return fact def _cdf(self, x, min, max): k = floor(x) return (k-min+1)*1.0/(max-min) def _ppf(self, q, min, max): vals = ceil(q*(max-min)+min)-1 vals1 = (vals-1).clip(min, max) temp = self._cdf(vals1, min, max) return where(temp >= q, vals1, vals) def _stats(self, min, max): m2, m1 = asarray(max), asarray(min) mu = (m2 + m1 - 1.0) / 2 d = m2 - m1 var = (d-1)*(d+1.0)/12.0 g1 = 0.0 g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0) return mu, var, g1, g2 def _rvs(self, min, max=None): """An array of *size* random integers >= min and < max. If max is None, then range is >=0 and < min """ return mtrand.randint(min, max, self._size) def _entropy(self, min, max): return log(max-min) randint = randint_gen(name='randint',longname='A discrete uniform '\ '(random integer)', shapes="min, max") # Zipf distribution # FIXME: problems sampling. class zipf_gen(rv_discrete): """A Zipf discrete random variable. %(before_notes)s Notes ----- The probability mass function for `zipf` is:: zipf.pmf(k) = 1/(zeta(a)*k**a) for ``k >= 1``. `zipf` takes ``a`` as shape parameter. %(example)s """ def _rvs(self, a): return mtrand.zipf(a, size=self._size) def _argcheck(self, a): return a > 1 def _pmf(self, k, a): Pk = 1.0 / asarray(special.zeta(a,1) * k**a) return Pk def _munp(self, n, a): return special.zeta(a-n,1) / special.zeta(a,1) def _stats(self, a): sv = special.errprint(0) fac = asarray(special.zeta(a,1)) mu = special.zeta(a-1.0,1)/fac mu2p = special.zeta(a-2.0,1)/fac var = mu2p - mu*mu mu3p = special.zeta(a-3.0,1)/fac mu3 = mu3p - 3*mu*mu2p + 2*mu**3 g1 = mu3 / asarray(var**1.5) mu4p = special.zeta(a-4.0,1)/fac sv = special.errprint(sv) mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4 g2 = mu4 / asarray(var**2) - 3.0 return mu, var, g1, g2 zipf = zipf_gen(a=1,name='zipf', longname='A Zipf', shapes="a") # Discrete Laplacian class dlaplace_gen(rv_discrete): """A Laplacian discrete random variable. %(before_notes)s Notes ----- The probability mass function for `dlaplace` is:: dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k)) for ``a >0``. `dlaplace` takes ``a`` as shape parameter. %(example)s """ def _pmf(self, k, a): return tanh(a/2.0)*exp(-a*abs(k)) def _cdf(self, x, a): k = floor(x) ind = (k >= 0) const = exp(a)+1 return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const) def _ppf(self, q, a): const = 1.0/(1+exp(-a)) cons2 = 1+exp(a) ind = q < const vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a)) vals1 = (vals-1) temp = self._cdf(vals1, a) return where(temp >= q, vals1, vals) def _stats_skip(self, a): # variance mu2 does not aggree with sample variance, # nor with direct calculation using pmf # remove for now because generic calculation works # except it does not show nice zeros for mean and skew(?) ea = exp(-a) e2a = exp(-2*a) e3a = exp(-3*a) e4a = exp(-4*a) mu2 = 2* (e2a + ea) / (1-ea)**3.0 mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0 return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3 def _entropy(self, a): return a / sinh(a) - log(tanh(a/2.0)) dlaplace = dlaplace_gen(a=-inf, name='dlaplace', longname='A discrete Laplacian', shapes="a") class skellam_gen(rv_discrete): """A Skellam discrete random variable. %(before_notes)s Notes ----- Probability distribution of the difference of two correlated or uncorrelated Poisson random variables. Let k1 and k2 be two Poisson-distributed r.v. with expected values lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and ``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation coefficient between k1 and k2. If the two Poisson-distributed r.v. are independent then ``rho = 0``. Parameters mu1 and mu2 must be strictly positive. For details see: http://en.wikipedia.org/wiki/Skellam_distribution `skellam` takes ``mu1`` and ``mu2`` as shape parameters. %(example)s """ def _rvs(self, mu1, mu2): n = self._size return np.random.poisson(mu1, n)-np.random.poisson(mu2, n) def _pmf(self, x, mu1, mu2): px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2, ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2) #ncx2.pdf() returns nan's for extremely low probabilities return px def _cdf(self, x, mu1, mu2): x = np.floor(x) px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1), 1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2)) return px # enable later ## def _cf(self, w, mu1, mu2): ## # characteristic function ## poisscf = poisson._cf ## return poisscf(w, mu1) * poisscf(-w, mu2) def _stats(self, mu1, mu2): mean = mu1 - mu2 var = mu1 + mu2 g1 = mean / np.sqrt((var)**3) g2 = 1 / var return mean, var, g1, g2 skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam', shapes="mu1,mu2")
bsd-3-clause
teonlamont/mne-python
mne/time_frequency/tests/test_tfr.py
3
25782
import numpy as np import os.path as op from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_equal) import pytest import mne from mne import Epochs, read_events, pick_types, create_info, EpochsArray from mne.io import read_raw_fif from mne.utils import _TempDir, run_tests_if_main, requires_h5py, grand_average from mne.time_frequency.tfr import (morlet, tfr_morlet, _make_dpss, tfr_multitaper, AverageTFR, read_tfrs, write_tfrs, combine_tfr, cwt, _compute_tfr, EpochsTFR) from mne.time_frequency import tfr_array_multitaper, tfr_array_morlet from mne.viz.utils import _fake_click from itertools import product import matplotlib matplotlib.use('Agg') # for testing don't use X server data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(data_path, 'test_raw.fif') event_fname = op.join(data_path, 'test-eve.fif') raw_ctf_fname = op.join(data_path, 'test_ctf_raw.fif') def test_tfr_ctf(): """Test that TFRs can be calculated on CTF data.""" raw = read_raw_fif(raw_ctf_fname).crop(0, 1) raw.apply_gradient_compensation(3) events = mne.make_fixed_length_events(raw, duration=0.5) epochs = mne.Epochs(raw, events) for method in (tfr_multitaper, tfr_morlet): method(epochs, [10], 1) # smoke test def test_morlet(): """Test morlet with and without zero mean.""" Wz = morlet(1000, [10], 2., zero_mean=True) W = morlet(1000, [10], 2., zero_mean=False) assert (np.abs(np.mean(np.real(Wz[0]))) < 1e-5) assert (np.abs(np.mean(np.real(W[0]))) > 1e-3) def test_time_frequency(): """Test time-frequency transform (PSD and ITC).""" # Set parameters event_id = 1 tmin = -0.2 tmax = 0.498 # Allows exhaustive decimation testing # Setup for reading the raw data raw = read_raw_fif(raw_fname) events = read_events(event_fname) include = [] exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, include=include, exclude=exclude) picks = picks[:2] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks) data = epochs.get_data() times = epochs.times nave = len(data) epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax) freqs = np.arange(6, 20, 5) # define frequencies of interest n_cycles = freqs / 4. # Test first with a single epoch power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) # Now compute evoked evoked = epochs.average() power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True, return_itc=False) pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True) power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True) power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim=slice(0, 2)) # Test picks argument and average parameter pytest.raises(ValueError, tfr_morlet, epochs, freqs=freqs, n_cycles=n_cycles, return_itc=True, average=False) power_picks, itc_picks = \ tfr_morlet(epochs_nopicks, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, picks=picks, average=True) epochs_power_picks = \ tfr_morlet(epochs_nopicks, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=False, picks=picks, average=False) power_picks_avg = epochs_power_picks.average() # the actual data arrays here are equivalent, too... assert_array_almost_equal(power.data, power_picks.data) assert_array_almost_equal(power.data, power_picks_avg.data) assert_array_almost_equal(itc.data, itc_picks.data) assert_array_almost_equal(power.data, power_evoked.data) # complex output pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles, return_itc=False, average=True, output="complex") pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles, output="complex", average=False, return_itc=True) epochs_power_complex = tfr_morlet(epochs, freqs, n_cycles, output="complex", average=False, return_itc=False) epochs_power_2 = abs(epochs_power_complex) epochs_power_3 = epochs_power_2.copy() epochs_power_3.data[:] = np.inf # test that it's actually copied assert_array_almost_equal(epochs_power_2.data, epochs_power_picks.data) power_2 = epochs_power_2.average() assert_array_almost_equal(power_2.data, power.data) print(itc) # test repr print(itc.ch_names) # test property itc += power # test add itc -= power # test sub power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio') assert 'meg' in power assert 'grad' in power assert 'mag' not in power assert 'eeg' not in power assert_equal(power.nave, nave) assert_equal(itc.nave, nave) assert (power.data.shape == (len(picks), len(freqs), len(times))) assert (power.data.shape == itc.data.shape) assert (power_.data.shape == (len(picks), len(freqs), 2)) assert (power_.data.shape == itc_.data.shape) assert (np.sum(itc.data >= 1) == 0) assert (np.sum(itc.data <= 0) == 0) # grand average itc2 = itc.copy() itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop gave = grand_average([itc2, itc]) assert_equal(gave.data.shape, (itc2.data.shape[0] - 1, itc2.data.shape[1], itc2.data.shape[2])) assert_equal(itc2.ch_names[1:], gave.ch_names) assert_equal(gave.nave, 2) itc2.drop_channels(itc2.info["bads"]) assert_array_almost_equal(gave.data, itc2.data) itc2.data = np.ones(itc2.data.shape) itc.data = np.zeros(itc.data.shape) itc2.nave = 2 itc.nave = 1 itc.drop_channels([itc.ch_names[0]]) combined_itc = combine_tfr([itc2, itc]) assert_array_almost_equal(combined_itc.data, np.ones(combined_itc.data.shape) * 2 / 3) # more tests power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False, return_itc=True) assert (power.data.shape == (len(picks), len(freqs), len(times))) assert (power.data.shape == itc.data.shape) assert (np.sum(itc.data >= 1) == 0) assert (np.sum(itc.data <= 0) == 0) tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False, return_itc=False).data[0] assert (tfr.shape == (len(picks), len(freqs), len(times))) tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, decim=slice(0, 2), average=False, return_itc=False).data[0] assert (tfr2.shape == (len(picks), len(freqs), 2)) single_power = tfr_morlet(epochs, freqs, 2, average=False, return_itc=False).data single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2), average=False, return_itc=False).data single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3), average=False, return_itc=False).data single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4), average=False, return_itc=False).data assert_array_almost_equal(np.mean(single_power, axis=0), power.data) assert_array_almost_equal(np.mean(single_power2, axis=0), power.data[:, :, :2]) assert_array_almost_equal(np.mean(single_power3, axis=0), power.data[:, :, 1:3]) assert_array_almost_equal(np.mean(single_power4, axis=0), power.data[:, :, 2:4]) power_pick = power.pick_channels(power.ch_names[:10:2]) assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2])) assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2])) power_drop = power.drop_channels(power.ch_names[1:10:2]) assert_equal(power_drop.ch_names, power_pick.ch_names) assert_equal(power_pick.data.shape[0], len(power_drop.ch_names)) mne.equalize_channels([power_pick, power_drop]) assert_equal(power_pick.ch_names, power_drop.ch_names) assert_equal(power_pick.data.shape, power_drop.data.shape) # Test decimation: # 2: multiple of len(times) even # 3: multiple odd # 8: not multiple, even # 9: not multiple, odd for decim in [2, 3, 8, 9]: for use_fft in [True, False]: power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=use_fft, return_itc=True, decim=decim) assert_equal(power.data.shape[2], np.ceil(float(len(times)) / decim)) freqs = list(range(50, 55)) decim = 2 _, n_chan, n_time = data.shape tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False, return_itc=False).data[0] assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim)) # Test cwt modes Ws = morlet(512, [10, 20], n_cycles=2) pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo') for use_fft in [True, False]: for mode in ['same', 'valid', 'full']: cwt(data[0], Ws, use_fft=use_fft, mode=mode) # Test decim parameter checks pytest.raises(TypeError, tfr_morlet, epochs, freqs=freqs, n_cycles=n_cycles, use_fft=True, return_itc=True, decim='decim') # When convolving in time, wavelets must not be longer than the data pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws, use_fft=False) with pytest.warns(UserWarning, match='one of the wavelets is longer'): cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True) # Check for off-by-one errors when using wavelets with an even number of # samples psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full') assert_equal(psd.shape, (2, 1, 420)) def test_dpsswavelet(): """Test DPSS tapers.""" freqs = np.arange(5, 25, 3) Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0, zero_mean=True) assert (len(Ws) == 3) # 3 tapers expected # Check that zero mean is true assert (np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5) assert (len(Ws[0]) == len(freqs)) # As many wavelets as asked for @pytest.mark.slowtest def test_tfr_multitaper(): """Test tfr_multitaper.""" sfreq = 200.0 ch_names = ['SIM0001', 'SIM0002'] ch_types = ['grad', 'grad'] info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types) n_times = int(sfreq) # Second long epochs n_epochs = 3 seed = 42 rng = np.random.RandomState(seed) noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times) t = np.arange(n_times, dtype=np.float) / sfreq signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing on_time = np.logical_and(t >= 0.45, t <= 0.55) signal[on_time] *= np.hanning(on_time.sum()) # Ramping dat = noise + signal reject = dict(grad=4000.) events = np.empty((n_epochs, 3), int) first_event_sample = 100 event_id = dict(sin50hz=1) for k in range(n_epochs): events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz'] epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id, reject=reject) freqs = np.arange(35, 70, 5, dtype=np.float) power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0) power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0, decim=slice(0, 2)) picks = np.arange(len(ch_names)) power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0, picks=picks) power_epochs = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0, return_itc=False, average=False) power_averaged = power_epochs.average() power_evoked = tfr_multitaper(epochs.average(), freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0, return_itc=False, average=False).average() print(power_evoked) # test repr for EpochsTFR # Test channel picking power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002']) assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200)) assert_equal(power_epochs_picked.ch_names, ['SIM0001']) pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs, n_cycles=freqs / 2., return_itc=True, average=False) # test picks argument assert_array_almost_equal(power.data, power_picks.data) assert_array_almost_equal(power.data, power_averaged.data) assert_array_almost_equal(power.times, power_epochs.times) assert_array_almost_equal(power.times, power_averaged.times) assert_equal(power.nave, power_averaged.nave) assert_equal(power_epochs.data.shape, (3, 2, 7, 200)) assert_array_almost_equal(itc.data, itc_picks.data) # one is squared magnitude of the average (evoked) and # the other is average of the squared magnitudes (epochs PSD) # so values shouldn't match, but shapes should assert_array_equal(power.data.shape, power_evoked.data.shape) pytest.raises(AssertionError, assert_array_almost_equal, power.data, power_evoked.data) tmax = t[np.argmax(itc.data[0, freqs == 50, :])] fmax = freqs[np.argmax(power.data[1, :, t == 0.5])] assert (tmax > 0.3 and tmax < 0.7) assert not np.any(itc.data < 0.) assert (fmax > 40 and fmax < 60) assert (power2.data.shape == (len(picks), len(freqs), 2)) assert (power2.data.shape == itc2.data.shape) # Test decim parameter checks and compatibility between wavelets length # and instance length in the time dimension. pytest.raises(TypeError, tfr_multitaper, epochs, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,)) pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs, n_cycles=1000, time_bandwidth=4.0) def test_crop(): """Test TFR cropping.""" data = np.zeros((3, 2, 3)) times = np.array([.1, .2, .3]) freqs = np.array([.10, .20]) info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000., ['mag', 'mag', 'mag']) tfr = AverageTFR(info, data=data, times=times, freqs=freqs, nave=20, comment='test', method='crazy-tfr') tfr.crop(0.2, 0.3) assert_array_equal(tfr.times, [0.2, 0.3]) assert_equal(tfr.data.shape[-1], 2) @requires_h5py def test_io(): """Test TFR IO capacities.""" tempdir = _TempDir() fname = op.join(tempdir, 'test-tfr.h5') data = np.zeros((3, 2, 3)) times = np.array([.1, .2, .3]) freqs = np.array([.10, .20]) info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000., ['mag', 'mag', 'mag']) tfr = AverageTFR(info, data=data, times=times, freqs=freqs, nave=20, comment='test', method='crazy-tfr') tfr.save(fname) tfr2 = read_tfrs(fname, condition='test') assert_array_equal(tfr.data, tfr2.data) assert_array_equal(tfr.times, tfr2.times) assert_array_equal(tfr.freqs, tfr2.freqs) assert_equal(tfr.comment, tfr2.comment) assert_equal(tfr.nave, tfr2.nave) pytest.raises(IOError, tfr.save, fname) tfr.comment = None tfr.save(fname, overwrite=True) assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment) tfr.comment = 'test-A' tfr2.comment = 'test-B' fname = op.join(tempdir, 'test2-tfr.h5') write_tfrs(fname, [tfr, tfr2]) tfr3 = read_tfrs(fname, condition='test-A') assert_equal(tfr.comment, tfr3.comment) assert (isinstance(tfr.info, mne.Info)) tfrs = read_tfrs(fname, condition=None) assert_equal(len(tfrs), 2) tfr4 = tfrs[1] assert_equal(tfr2.comment, tfr4.comment) pytest.raises(ValueError, read_tfrs, fname, condition='nonono') # Test save of EpochsTFR. data = np.zeros((5, 3, 2, 3)) tfr = EpochsTFR(info, data=data, times=times, freqs=freqs, comment='test', method='crazy-tfr') tfr.save(fname, True) read_tfr = read_tfrs(fname)[0] assert_array_equal(tfr.data, read_tfr.data) def test_plot(): """Test TFR plotting.""" import matplotlib.pyplot as plt data = np.zeros((3, 2, 3)) times = np.array([.1, .2, .3]) freqs = np.array([.10, .20]) info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000., ['mag', 'mag', 'mag']) tfr = AverageTFR(info, data=data, times=times, freqs=freqs, nave=20, comment='test', method='crazy-tfr') tfr.plot([1, 2], title='title', colorbar=False, mask=np.ones(tfr.data.shape[1:], bool)) plt.close('all') ax = plt.subplot2grid((2, 2), (0, 0)) ax2 = plt.subplot2grid((2, 2), (1, 1)) ax3 = plt.subplot2grid((2, 2), (0, 1)) tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3]) plt.close('all') tfr.plot([1, 2], title='title', colorbar=False, exclude='bads') plt.close('all') tfr.plot_topo(picks=[1, 2]) plt.close('all') fig = tfr.plot(picks=[1], cmap='RdBu_r') # interactive mode on by default fig.canvas.key_press_event('up') fig.canvas.key_press_event(' ') fig.canvas.key_press_event('down') cbar = fig.get_axes()[0].CB # Fake dragging with mouse. ax = cbar.cbar.ax _fake_click(fig, ax, (0.1, 0.1)) _fake_click(fig, ax, (0.1, 0.2), kind='motion') _fake_click(fig, ax, (0.1, 0.3), kind='release') _fake_click(fig, ax, (0.1, 0.1), button=3) _fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion') _fake_click(fig, ax, (0.1, 0.3), kind='release') fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up plt.close('all') def test_plot_joint(): """Test TFR joint plotting.""" import matplotlib.pyplot as plt raw = read_raw_fif(raw_fname) times = np.linspace(-0.1, 0.1, 200) n_freqs = 3 nave = 1 rng = np.random.RandomState(42) data = rng.randn(len(raw.ch_names), n_freqs, len(times)) tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave) topomap_args = {'res': 8, 'contours': 0, 'sensors': False} for combine in ('mean', 'rms', None): tfr.plot_joint(title='auto', colorbar=True, combine=combine, topomap_args=topomap_args) plt.close('all') # check various timefreqs for timefreqs in ( {(tfr.times[0], tfr.freqs[1]): (0.1, 0.5), (tfr.times[-1], tfr.freqs[-1]): (0.2, 0.6)}, [(tfr.times[1], tfr.freqs[1])]): tfr.plot_joint(timefreqs=timefreqs, topomap_args=topomap_args) plt.close('all') # test bad timefreqs timefreqs = ([(-100, 1)], tfr.times[1], [1], [(tfr.times[1], tfr.freqs[1], tfr.freqs[1])]) for these_timefreqs in timefreqs: pytest.raises(ValueError, tfr.plot_joint, these_timefreqs) # test that the object is not internally modified tfr_orig = tfr.copy() tfr.plot_joint(baseline=(0, None), exclude=[tfr.ch_names[0]], topomap_args=topomap_args) plt.close('all') assert_array_equal(tfr.data, tfr_orig.data) assert (set(tfr.ch_names) == set(tfr_orig.ch_names)) assert (set(tfr.times) == set(tfr_orig.times)) def test_add_channels(): """Test tfr splitting / re-appending channel types.""" data = np.zeros((6, 2, 3)) times = np.array([.1, .2, .3]) freqs = np.array([.10, .20]) info = mne.create_info( ['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'], 1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim']) tfr = AverageTFR(info, data=data, times=times, freqs=freqs, nave=20, comment='test', method='crazy-tfr') tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True) tfr_meg = tfr.copy().pick_types(meg=True) tfr_stim = tfr.copy().pick_types(meg=False, stim=True) tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True) tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim]) assert all(ch in tfr_new.ch_names for ch in tfr_stim.ch_names + tfr_meg.ch_names) tfr_new = tfr_meg.copy().add_channels([tfr_eeg]) assert all(ch in tfr_new.ch_names for ch in tfr.ch_names if ch != 'STIM 001') assert_array_equal(tfr_new.data, tfr_eeg_meg.data) assert all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names) # Now test errors tfr_badsf = tfr_eeg.copy() tfr_badsf.info['sfreq'] = 3.1415927 tfr_eeg = tfr_eeg.crop(-.1, .1) pytest.raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf]) pytest.raises(AssertionError, tfr_meg.add_channels, [tfr_eeg]) pytest.raises(ValueError, tfr_meg.add_channels, [tfr_meg]) pytest.raises(TypeError, tfr_meg.add_channels, tfr_badsf) def test_compute_tfr(): """Test _compute_tfr function.""" # Set parameters event_id = 1 tmin = -0.2 tmax = 0.498 # Allows exhaustive decimation testing # Setup for reading the raw data raw = read_raw_fif(raw_fname) events = read_events(event_fname) exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more # picks MEG gradiometers picks = pick_types(raw.info, meg='grad', eeg=False, stim=False, include=[], exclude=exclude) picks = picks[:2] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks) data = epochs.get_data() sfreq = epochs.info['sfreq'] freqs = np.arange(10, 20, 3).astype(float) # Check all combination of options for func, use_fft, zero_mean, output in product( (tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True), ('complex', 'power', 'phase', 'avg_power_itc', 'avg_power', 'itc')): # Check exception if (func == tfr_array_multitaper) and (output == 'phase'): pytest.raises(NotImplementedError, func, data, sfreq=sfreq, freqs=freqs, output=output) continue # Check runs out = func(data, sfreq=sfreq, freqs=freqs, use_fft=use_fft, zero_mean=zero_mean, n_cycles=2., output=output) # Check shapes shape = np.r_[data.shape[:2], len(freqs), data.shape[2]] if ('avg' in output) or ('itc' in output): assert_array_equal(shape[1:], out.shape) else: assert_array_equal(shape, out.shape) # Check types if output in ('complex', 'avg_power_itc'): assert_equal(np.complex, out.dtype) else: assert_equal(np.float, out.dtype) assert (np.all(np.isfinite(out))) # Check errors params for _data in (None, 'foo', data[0]): pytest.raises(ValueError, _compute_tfr, _data, freqs, sfreq) for _freqs in (None, 'foo', [[0]]): pytest.raises(ValueError, _compute_tfr, data, _freqs, sfreq) for _sfreq in (None, 'foo'): pytest.raises(ValueError, _compute_tfr, data, freqs, _sfreq) for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'): for value in (None, 'foo'): kwargs = {key: value} # FIXME pep8 pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq, **kwargs) # No time_bandwidth param in morlet pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq, method='morlet', time_bandwidth=1) # No phase in multitaper XXX Check ? pytest.raises(NotImplementedError, _compute_tfr, data, freqs, sfreq, method='multitaper', output='phase') # Inter-trial coherence tests out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.) assert (np.sum(out >= 1) == 0) assert (np.sum(out <= 0) == 0) # Check decim shapes # 2: multiple of len(times) even # 3: multiple odd # 8: not multiple, even # 9: not multiple, odd for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)): _decim = slice(None, None, decim) if isinstance(decim, int) else decim n_time = len(np.arange(data.shape[2])[_decim]) shape = np.r_[data.shape[:2], len(freqs), n_time] for method in ('multitaper', 'morlet'): # Single trials out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim, n_cycles=2.) assert_array_equal(shape, out.shape) # Averages out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim, output='avg_power', n_cycles=2.) assert_array_equal(shape[1:], out.shape) run_tests_if_main()
bsd-3-clause
rschenck/Capsid_IDP_Classifier
development/tuning_and_validating.py
1
9852
#!/usr/bin/env python import sys import operator import pandas as pd import numpy as np from sklearn import cross_validation from sklearn.ensemble import ExtraTreesClassifier from sklearn.cross_validation import train_test_split from sklearn.preprocessing import label_binarize from sklearn.metrics import roc_curve, auc from sklearn.metrics import confusion_matrix import matplotlib.pyplot as plt from scipy import interp from dataset import load_data # obtains the classifications from the final curated dataset def get_targets(): with open('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/FINAL_CURATED_TABLE.csv','r') as table: typed = {} for line in table: line = line.split(',') acc = line[1].rstrip(' ') typed.update({acc:line[2]}) return typed # obtain FINAL_DATASET for model (all data) def get_data(): with open('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/FINAL_CURATED_SCORES.csv', 'r') as scores: scores = scores.readlines() formatted = [] for item in scores: item = item.rstrip('\n') item = item.split(',') sample = [item[0]] for i in range(1, len(item)): ind = float(item[i]) sample.append(ind) formatted.append(sample) scores = None return formatted # get arrays after fetching the proper classification and getting that classifications set of scores def get_arrays(types, scores): order_types = [] out_scores = [] for item in scores: acc = item[0] ctype = types[acc] order_types.append(ctype) del item[0] out_scores.append(item) # the arrays needed for cross validation type_array = np.asarray(order_types) scores = np.asarray(out_scores) # cleanup item = None ourder_types = None out_scores = None return scores, type_array # ExtraTreesClassifier model def extratrees_model(x, y): clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1) clf = clf.fit(x, y) return clf # Voting model def results_vote(x, y): pass # Section for running loops on different parameters def tune_model_parameters(data, targets): # cross validate and tuning of the ExtraTreesClassifier parameters my_range = range(1,20) n_scores = [] for n in my_range: clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1) scores = cross_validation.cross_val_score(clf, data, targets, cv=10, scoring='accuracy') n_scores.append(scores.mean()) plt.plot(my_range,n_scores) plt.xlabel('Number of Trees in the Forest') plt.ylabel('Cross-Validated Accuracy (10-fold Mean)') plt.show() #plt.savefig('/Users/ryan/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/max_features_10_126.png', bbox_inches = 'tight') # get the parameter with the maximum mean output m = max(n_scores) mi = min(n_scores) print 'Max Accuracy: ' + repr(m) index = [i for i, j in enumerate(n_scores) if j == m] for i in index: print 'Parameter value max: ' + repr(my_range[i]) indexmi = [i for i, j in enumerate(n_scores) if j == mi] print 'Min Accuracy: ' + repr(mi) for i in indexmi: print 'Parameter value min: ' + repr(my_range[i]) # get ROC curves for the predictions def get_roc(data, targets): # binarize the classifactions bi_targets = label_binarize(targets, classes=['Type A', 'Type B', 'Neither']) #print bi_targets #print targets n_classes = bi_targets.shape[1] #print n_classes # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(data, bi_targets, train_size=.8) # convert array to array of strings instead of arrays of arrays for the classifier (for the weights) string_test = [] for i in range(0, len(y_train)): string_test.append(str(y_train[i])) string_test = np.asarray(string_test) clf = ExtraTreesClassifier(n_estimators=25, class_weight={"[1 0 0]":0.4,"[0 1 0]":0.5,"[0 1 0]":0.1}, bootstrap=False, max_features=125, criterion='gini', n_jobs = -1) model = clf.fit(X_train, string_test) y_score = model.predict(X_test) # get output of scores from string list into a np array array_scores = [] for item in y_score: ind = item.split(' ') ind0 = ind[0].lstrip('[') ind1 = ind[1] ind2 = ind[2].rstrip(']') ind = [int(ind0),int(ind1), int(ind2)] array_scores.append(ind) array_scores = np.asarray(array_scores) print array_scores # Compute ROC curve and ROC area for each class fpr = dict() tpr = dict() roc_auc = dict() for i in range(n_classes): fpr[i], tpr[i], _ = roc_curve(y_test[:, i], array_scores[:, i]) roc_auc[i] = auc(fpr[i], tpr[i]) # Compute micro-average ROC curve and ROC area fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), array_scores.ravel()) roc_auc["micro"] = auc(fpr["micro"], tpr["micro"]) ''' plt.figure() plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2]) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristic example') plt.legend(loc="lower right") plt.show() ''' # Plot ROC curves for the multiclass problem # Compute macro-average ROC curve and ROC area # First aggregate all false positive rates all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)])) # Then interpolate all ROC curves at this points mean_tpr = np.zeros_like(all_fpr) for i in range(n_classes): mean_tpr += interp(all_fpr, fpr[i], tpr[i]) # Finally average it and compute AUC mean_tpr /= n_classes fpr["macro"] = all_fpr tpr["macro"] = mean_tpr roc_auc["macro"] = auc(fpr["macro"], tpr["macro"]) # Plot all ROC curves plt.figure() plt.plot(fpr["micro"], tpr["micro"], label='micro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["micro"]), linewidth=2) plt.plot(fpr["macro"], tpr["macro"], label='macro-average ROC curve (area = {0:0.2f})' ''.format(roc_auc["macro"]), linewidth=2) for i in range(n_classes): plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})' ''.format(i, roc_auc[i])) plt.plot([0, 1], [0, 1], 'k--') plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.05]) plt.xlabel('False Positive Rate') plt.ylabel('True Positive Rate') plt.title('Receiver operating characteristics') plt.legend(loc="lower right") plt.savefig('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/ROC_curves.eps', bbox_inches = 'tight') # plot confusion matrices def plot_confusion_matrix(cm, labels, title='Confusion matrix', cmap=plt.cm.Greens): plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(labels)) plt.xticks(tick_marks, labels, rotation=45) plt.yticks(tick_marks, labels) plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label') def cm_model_p1(X_train, y_train): clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1) model = clf.fit(X_train, y_train) return model def cm_model_p2(model, X_test): # generate 100 predictions and vote for the majority for final prediction hundred_pred = [] for i in range(0,100): y_pred = model.predict(X_test) hundred_pred.append(y_pred) final_pred = [] for i in range(0, len(hundred_pred[0])): types = [] for k,t in enumerate(hundred_pred): types.append(hundred_pred[k][i]) counts = [types.count('Type A'),types.count('Type B'),types.count('Neither')] index, value = max(enumerate(counts), key=operator.itemgetter(1)) if index == 0: final_pred.append('Type A') elif index == 1: final_pred.append('Type B') elif index == 2: final_pred.append('Neither') else: pass y_pred = np.asarray(final_pred) return y_pred # Generate confusion matrix def get_conf_matrix(data, targets): # shuffle and split training and test sets X_train, X_test, y_train, y_test = train_test_split(data, targets, train_size=.8) # gets the model for predictions model = cm_model_p1(X_train, y_train) # generate 100 confusion matrices, get mean value for each out_cm = np.zeros((3,3)) for i in range(0,100): y_pred = cm_model_p2(model, X_test) # Compute confusion matrix labels = ['Type A', 'Type B', 'Neither'] cm = confusion_matrix(y_test, y_pred, labels=labels) np.set_printoptions(precision=2) # Normalize the confusion matrix by row (i.e by the number of samples # in each class) cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] out_cm += cm_normalized print out_cm cm_normalized = np.divide(out_cm, 100.0) print('Normalized confusion matrix (Mean of 100 predictions)') print(cm_normalized) plt.figure() plot_confusion_matrix(cm_normalized, labels, title='Normalized confusion matrix') # plt.show() plt.savefig('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/confusion_matrix_RYANFINAL_100mean.eps', bbox_inches = 'tight') def main(): ''' # Use these three to get the data loaded, targets loaded, and the accessions stripped (Otherwise use dataset.py load_data()) # get classifications type_dict = get_targets() # load data scores = get_data() # get arrays of scores and targets data, targets = get_arrays(type_dict, scores) ''' data, targets = load_data() # tune model parameters #tune_model_parameters(data,targets) # get ROC curves #get_roc(data, targets) # get confusion matrix get_conf_matrix(data, targets) '''I WANT TO RE-RUN the ROC curves and the Confusion matrix data using predictions from a cross-validation rather than train/test_split''' if __name__ == "__main__": main()
gpl-2.0
rvraghav93/scikit-learn
examples/neighbors/plot_nearest_centroid.py
38
1817
""" =============================== Nearest Centroid Classification =============================== Sample usage of Nearest Centroid classification. It will plot the decision boundaries for each class. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn import datasets from sklearn.neighbors import NearestCentroid n_neighbors = 15 # import some data to play with iris = datasets.load_iris() # we only take the first two features. We could avoid this ugly # slicing by using a two-dim dataset X = iris.data[:, :2] y = iris.target h = .02 # step size in the mesh # Create color maps cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) for shrinkage in [None, .2]: # we create an instance of Neighbours Classifier and fit the data. clf = NearestCentroid(shrink_threshold=shrinkage) clf.fit(X, y) y_pred = clf.predict(X) print(shrinkage, np.mean(y == y_pred)) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold, edgecolor='b', s=20) plt.title("3-Class classification (shrink_threshold=%r)" % shrinkage) plt.axis('tight') plt.show()
bsd-3-clause
cbpygit/pypmj
projects/scattering/photonic_crystals/slabs/hexagonal/half_spaces/hex_plane_tools.py
1
4284
from scipy.linalg import expm, norm import numpy as np def rot_mat(axis, theta): return expm(np.cross(np.eye(3), axis/norm(axis)*theta)) def rotate_vector(v, axis, theta): M = rot_mat(axis, theta) return np.tensordot(M,v,axes=([0],[1])).T #np.dot(M, v) def rotate_around_z(v, theta): return rotate_vector(v, np.array([0.,0.,1.]), theta) def is_odd(num): return num & 0x1 def is_inside_hexagon(x, y, d=None, x0=0., y0=0.): p_eps = 10.*np.finfo(float).eps if d is None: d = y.max() - y.min() + p_eps dx = np.abs(x - x0)/d dy = np.abs(y - y0)/d a = 0.25 * np.sqrt(3.0) return np.logical_and(dx <= a, a*dy + 0.25*dx <= 0.5*a) def get_hex_plane(plane_idx, inradius, z_height, z_center, np_xy, np_z): # We use 10* float machine precision to correct the ccordinates # to avoid leaving the computational domain due to precision # problems p_eps = 10.*np.finfo(float).eps ri = inradius # short for inradius rc = inradius/np.sqrt(3.)*2. # short for circumradius if np_z == 'auto': np_z = int(np.round(float(np_xy)/2./rc*z_height)) # XY-plane (no hexagonal shape!) if plane_idx == 6: X = np.linspace(-ri+p_eps, ri-p_eps, np_xy) Y = np.linspace(-rc+p_eps, rc-p_eps, np_xy) XY = np.meshgrid(X,Y) XYrs = np.concatenate((XY[0][..., np.newaxis], XY[1][..., np.newaxis]), axis=2) Z = np.ones((np_xy, np_xy, 1))*z_center pl = np.concatenate((XYrs, Z), axis=2) pl = pl.reshape(-1, pl.shape[-1]) # Restrict to hexagon idx_hex = is_inside_hexagon(pl[:,0], pl[:,1]) return pl[idx_hex] # Vertical planes elif plane_idx < 6: r = rc if is_odd(plane_idx) else ri r = r-p_eps xy_line = np.empty((np_xy,2)) xy_line[:,0] = np.linspace(-r, r, np_xy) xy_line[:,1] = 0. z_points = np.linspace(0.+p_eps, z_height-p_eps, np_z) # Construct the plane plane = np.empty((np_xy*np_z, 3)) for i, xy in enumerate(xy_line): for j, z in enumerate(z_points): idx = i*np_z + j plane[idx, :2] = xy plane[idx, 2] = z # Rotate the plane return rotate_around_z(plane, plane_idx*np.pi/6.) else: raise ValueError('`plane_idx` must be in [0...6].') def get_hex_planes_point_list(inradius, z_height, z_center, np_xy, np_z, plane_indices=[0,1,2,3,6]): # Construct the desired planes planes = [] for i in plane_indices: planes.append(get_hex_plane(i, inradius, z_height, z_center, np_xy, np_z)) # Flatten and save lengths lengths = [len(p) for p in planes] return np.vstack(planes), np.array(lengths) def hex_planes_point_list_for_keys(keys, plane_indices=[0,1,2,3,6]): if not 'uol' in keys: keys['uol'] = 1.e-9 inradius = keys['p'] * keys['uol'] /2. z_height = (keys['h'] + keys['h_sub'] + keys['h_sup']) * keys['uol'] z_center = (keys['h_sub']+keys['h']/2.) * keys['uol'] np_xy = keys['hex_np_xy'] if not 'hex_np_z' in keys: np_z = 'auto' return get_hex_planes_point_list(inradius, z_height, z_center, np_xy, np_z) def plane_idx_iter(lengths_): """Yields the plane index plus lower index `idx_i` and upper index `idx_f` of the point list representing this plane (i.e. pointlist[idx_i:idx_f]). """ i = 0 while i < len(lengths_): yield i, lengths_[:i].sum(), lengths_[:(i+1)].sum() i += 1 def plot_planes(pointlist, lengths): import matplotlib.pyplot as plt import seaborn as sns from mpl_toolkits.mplot3d import Axes3D fig = plt.figure() ax = fig.add_subplot(111, projection='3d') colors = sns.color_palette('husl', len(lengths)) for i, idx_i, idx_f in plane_idx_iter(lengths): pl = pointlist[idx_i:idx_f] ax.scatter(pl[:,0], pl[:,1], pl[:,2], s=10., c=colors[i], label='plane {}'.format(i+1), linewidth=0.) _ = plt.legend(loc='upper left')
gpl-3.0
moonbury/notebooks
github/MatplotlibCookbook/Chapter 8/wx-supershape-1.py
3
1121
import wx, numpy from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg from matplotlib.figure import Figure def supershape_radius(phi, a, b, m, n1, n2, n3): theta = .25 * m * phi cos = numpy.fabs(numpy.cos(theta) / a) ** n2 sin = numpy.fabs(numpy.sin(theta) / b) ** n3 r = (cos + sin) ** (-1. / n1) r /= numpy.max(r) return r class SuperShapeFrame(wx.Frame): def __init__(self, parent, id, title): wx.Frame.__init__(self, parent, id, title, style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER, size = (480, 480)) self.fig = Figure((6, 6), dpi = 80) self.panel = wx.Panel(self, -1) sizer = wx.BoxSizer(wx.VERTICAL) sizer.Add(FigureCanvasWxAgg(self.panel, -1, self.fig), 1) self.panel.SetSizer(sizer) self.draw_figure() def draw_figure(self): phi = numpy.linspace(0, 2 * numpy.pi, 1024) r = supershape_radius(phi, 1, 1, 3, 2, 18, 18) ax = self.fig.add_subplot(111, polar = True) ax.plot(phi, r, lw = 3.) self.fig.canvas.draw() app = wx.App(redirect = True) top = SuperShapeFrame(None, -1, 'SuperShape') top.Show() app.MainLoop()
gpl-3.0
imitrichev/cantera
interfaces/cython/cantera/examples/reactors/sensitivity1.py
4
2165
""" Constant-pressure, adiabatic kinetics simulation with sensitivity analysis """ import sys import numpy as np import cantera as ct gri3 = ct.Solution('gri30.xml') temp = 1500.0 pres = ct.one_atm gri3.TPX = temp, pres, 'CH4:0.1, O2:2, N2:7.52' r = ct.IdealGasConstPressureReactor(gri3, name='R1') sim = ct.ReactorNet([r]) # enable sensitivity with respect to the rates of the first 10 # reactions (reactions 0 through 9) for i in range(10): r.add_sensitivity_reaction(i) # set the tolerances for the solution and for the sensitivity coefficients sim.rtol = 1.0e-6 sim.atol = 1.0e-15 sim.rtol_sensitivity = 1.0e-6 sim.atol_sensitivity = 1.0e-6 n_times = 400 tim = np.zeros(n_times) data = np.zeros((n_times,6)) time = 0.0 for n in range(n_times): time += 5.0e-6 sim.advance(time) tim[n] = 1000 * time data[n,0] = r.T data[n,1:4] = r.thermo['OH','H','CH4'].X # sensitivity of OH to reaction 2 data[n,4] = sim.sensitivity('OH',2) # sensitivity of OH to reaction 3 data[n,5] = sim.sensitivity('OH',3) print('%10.3e %10.3f %10.3f %14.6e %10.3f %10.3f' % (sim.time, r.T, r.thermo.P, r.thermo.u, data[n,4], data[n,5])) # plot the results if matplotlib is installed. # see http://matplotlib.org/ to get it if '--plot' in sys.argv: import matplotlib.pyplot as plt plt.subplot(2,2,1) plt.plot(tim,data[:,0]) plt.xlabel('Time (ms)') plt.ylabel('Temperature (K)') plt.subplot(2,2,2) plt.plot(tim,data[:,1]) plt.xlabel('Time (ms)') plt.ylabel('OH Mole Fraction') plt.subplot(2,2,3) plt.plot(tim,data[:,2]) plt.xlabel('Time (ms)') plt.ylabel('H Mole Fraction') plt.subplot(2,2,4) plt.plot(tim,data[:,3]) plt.xlabel('Time (ms)') plt.ylabel('H2 Mole Fraction') plt.tight_layout() plt.figure(2) plt.plot(tim,data[:,4],'-',tim,data[:,5],'-g') plt.legend([sim.sensitivity_parameter_name(2),sim.sensitivity_parameter_name(3)],'best') plt.xlabel('Time (ms)') plt.ylabel('OH Sensitivity') plt.tight_layout() plt.show() else: print("""To view a plot of these results, run this script with the option '--plot""")
bsd-3-clause
SEMAFORInformatik/femagtools
femagtools/forcedens.py
1
6880
# -*- coding: utf-8 -*- """ femagtools.forcedens ~~~~~~~~~~~~~~~~~~~~ Read Force Density Plot Files """ import os import re import glob import numpy as np import logging logger = logging.getLogger('femagtools.forcedens') filename_pat = re.compile(r'^(\w+)_(\d{3}).PLT(\d+)') num_pat = re.compile(r'([+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?)\s*') pos_pat = re.compile(r'^\s*POSITION\s*\[(\w+)\]') unit_pat = re.compile(r'\[([^\]]+)') def _readSections(f): """return list of PLT sections sections are surrounded by lines starting with '[***' or 2d arrays with 7 columns Args: param f (file) PLT file to be read Returns: list of sections """ section = [] for line in f: if line.startswith('[****') or pos_pat.match(line): if section: if len(section) > 2 and section[1].startswith('Date'): yield section[1:] else: yield section if line.startswith('[****'): section = [] else: section = [line.strip()] else: section.append(line.strip()) yield section class ForceDensity(object): def __init__(self): self.type = '' self.positions = [] pass def __read_version(self, content): rec = content[0].split(' ') if len(rec) > 3: self.version = rec[3] else: self.version = rec[-1] def __read_project_filename(self, content): self.project = content[1].strip() def __read_nodes_and_mesh(self, content): self.nodes, self.elements, self.quality = \ [float(r[0]) for r in [num_pat.findall(l) for l in content[:3]]] for l in content[3:]: m = re.match(r'\*+([^\*]+)\*+', l) if m: self.type = m.group(1).strip() return def __read_date_and_title(self, content): d = content[0].split(':')[1].strip().split() dd, MM, yy = d[0].split('.') hh, mm = ''.join(d[1:-1]).split('.') self.date = '{}-{}-{}T{:02}:{:02}'.format( yy, MM, dd, int(hh), int(mm)) if len(content) > 6: self.title = content[2].strip() + ', ' + content[6].strip() else: self.title = content[2].strip() self.current = float(num_pat.findall(content[4])[0]) def __read_filename(self, content): self.filename = content[0].split(':')[1].strip() def __read_position(self, content): d = dict(position=float(num_pat.findall(content[0])[-1]), unit=unit_pat.findall(content[0].split()[1])[0]) cols = content[2].split() labels = cols[::2] # either X, FN, FT, B_N, B_T # or X FX FY B_X B_Y d['column_units'] = {k: u for k, u in zip(labels, [unit_pat.findall(u)[0] for u in cols[1::2]])} m = [] for l in content[4:]: rec = l.split()[1:] if len(rec) == len(labels): m.append([float(x) for x in rec]) d.update({k: v for k, v in zip(labels, list(zip(*m)))}) self.positions.append(d) def read(self, filename): with open(filename) as f: for s in _readSections(f.readlines()): logger.debug('Section %s' % s[0:2]) if s[0].startswith('FEMAG'): self.__read_version(s) elif s[0].startswith('Project'): self.__read_project_filename(s) elif s[0].startswith('Number'): self.__read_nodes_and_mesh(s) elif s[0].startswith('File'): self.__read_filename(s) elif s[0].startswith('Date'): self.__read_date_and_title(s) elif s[0].startswith('POSITION'): self.__read_position(s) def fft(self): """return FFT of FN""" import scipy.fftpack try: ntiles = int(360/self.positions[0]['X'][-1]) FN = np.tile( np.array([p['FN'][:-1] for p in self.positions[:-1]]), (ntiles, ntiles)) except AttributeError: return [] N = FN.shape[0] fdn = scipy.fftpack.fft2(FN) dim = N//ntiles//2 return np.abs(fdn)[1:dim, 1:dim]/N def items(self): return [(k, getattr(self, k)) for k in ('version', 'type', 'title', 'current', 'filename', 'date', 'positions')] def __str__(self): "return string format of this object" if self.type: return "\n".join([ 'FEMAG {}: {}'.format(self.version, self.type), 'File: {} {}'.format(self.filename, self.date)] + ['{}: {}'.format(k, v) for k, v in self.items()]) return "{}" def read(filename): f = ForceDensity() f.read(filename) return f def readall(workdir='.'): """collect all recent PLT files returns list of ForceDensity objects """ plt = dict() pltfiles = sorted(glob.glob(os.path.join(workdir, '*_*.PLT*'))) base = os.path.basename(pltfiles[-1]) lastserie = filename_pat.match(base).groups()[1] for p in pltfiles: base = os.path.basename(p) m = filename_pat.match(base) if m and lastserie == m.groups()[1]: model, i, k = m.groups() fdens = ForceDensity() fdens.read(p) logging.info("%s: %s", p, fdens.title) if model in plt: plt[model].append(fdens) else: plt[model] = [fdens] return plt if __name__ == "__main__": import matplotlib.pyplot as pl import femagtools.plot import sys if len(sys.argv) == 2: filename = sys.argv[1] else: filename = sys.stdin.readline().strip() fdens = read(filename) # Show the results title = '{}, Rotor position {}'.format( fdens.title, fdens.positions[0]['position']) pos = fdens.positions[0]['X'] FT_FN = (fdens.positions[0]['FT'], fdens.positions[0]['FN']) femagtools.plot.forcedens(title, pos, FT_FN) pl.show() title = 'Force Density Harmonics' femagtools.plot.forcedens_fft(title, fdens) pl.show()
bsd-2-clause
jstoxrocky/statsmodels
statsmodels/examples/ex_kernel_regression2.py
34
1511
# -*- coding: utf-8 -*- """ Created on Wed Jan 02 13:43:44 2013 Author: Josef Perktold """ from __future__ import print_function import numpy as np import numpy.testing as npt import statsmodels.nonparametric.api as nparam if __name__ == '__main__': np.random.seed(500) nobs = [250, 1000][0] sig_fac = 1 x = np.random.uniform(-2, 2, size=nobs) x.sort() y_true = np.sin(x*5)/x + 2*x y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs) model = nparam.KernelReg(endog=[y], exog=[x], reg_type='lc', var_type='c', bw='cv_ls', defaults=nparam.EstimatorSettings(efficient=True)) sm_bw = model.bw sm_mean, sm_mfx = model.fit() model1 = nparam.KernelReg(endog=[y], exog=[x], reg_type='lc', var_type='c', bw='cv_ls') mean1, mfx1 = model1.fit() model2 = nparam.KernelReg(endog=[y], exog=[x], reg_type='ll', var_type='c', bw='cv_ls') mean2, mfx2 = model2.fit() print(model.bw) print(model1.bw) print(model2.bw) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(x, y, 'o', alpha=0.5) ax.plot(x, y_true, lw=2, label='DGP mean') ax.plot(x, sm_mean, lw=2, label='kernel mean') ax.plot(x, mean2, lw=2, label='kernel mean') ax.legend() plt.show()
bsd-3-clause
petebachant/CFT-vectors
cft_vectors.py
1
18584
#!/usr/bin/env python """ This script generates a force and velocity vector diagram for a cross-flow turbine. """ from __future__ import division, print_function import numpy as np import matplotlib import matplotlib.pyplot as plt import pandas as pd from scipy.interpolate import interp1d import seaborn as sns from pxl.styleplot import set_sns import os # Define some colors (some from the Seaborn deep palette) blue = sns.color_palette()[0] green = sns.color_palette()[1] dark_gray = (0.3, 0.3, 0.3) red = sns.color_palette()[2] purple = sns.color_palette()[3] tan = sns.color_palette()[4] light_blue = sns.color_palette()[5] def load_foildata(): """Loads NACA 0020 airfoil data at Re = 2.1 x 10^5.""" Re = 2.1e5 foil = "0020" fname = "NACA {}_T1_Re{:.3f}_M0.00_N9.0.dat".format(foil, Re/1e6) fpath = "data/{}".format(fname) alpha, cl, cd = np.loadtxt(fpath, skiprows=14, unpack=True) if alpha[0] != 0.0: alpha = np.append([0.0], alpha[:-1]) cl = np.append([1e-12], cl[:-1]) cd = np.append(cd[0], cd[:-1]) # Mirror data about 0 degrees AoA since it's a symmetrical foil alpha = np.append(-np.flipud(alpha), alpha) cl = np.append(-np.flipud(cl), cl) cd = np.append(np.flipud(cd), cd) df = pd.DataFrame() df["alpha_deg"] = alpha df["cl"] = cl df["cd"] = cd return df def lookup_foildata(alpha_deg): """Lookup foil characteristics at given angle of attack.""" alpha_deg = np.asarray(alpha_deg) df = load_foildata() df["alpha_rad"] = np.deg2rad(df.alpha_deg) f_cl = interp1d(df.alpha_deg, df.cl, bounds_error=False) f_cd = interp1d(df.alpha_deg, df.cd, bounds_error=False) f_ct = interp1d(df.alpha_deg, df.cl*np.sin(df.alpha_rad) \ - df.cd*np.cos(df.alpha_rad), bounds_error=False) cl, cd, ct = f_cl(alpha_deg), f_cd(alpha_deg), f_ct(alpha_deg) return {"cl": cl, "cd": cd, "ct": ct} def calc_cft_ctorque(tsr=2.0, chord=0.14, R=0.5): """Calculate the geometric torque coefficient for a CFT.""" U_infty = 1.0 omega = tsr*U_infty/R theta_blade_deg = np.arange(0, 721) theta_blade_rad = np.deg2rad(theta_blade_deg) blade_vel_mag = omega*R blade_vel_x = blade_vel_mag*np.cos(theta_blade_rad) blade_vel_y = blade_vel_mag*np.sin(theta_blade_rad) u = U_infty # No induction rel_vel_mag = np.sqrt((blade_vel_x + u)**2 + blade_vel_y**2) rel_vel_x = u + blade_vel_x rel_vel_y = blade_vel_y relvel_dot_bladevel = (blade_vel_x*rel_vel_x + blade_vel_y*rel_vel_y) alpha_rad = np.arccos(relvel_dot_bladevel/(rel_vel_mag*blade_vel_mag)) alpha_rad[theta_blade_deg > 180] *= -1 alpha_deg = np.rad2deg(alpha_rad) foil_coeffs = lookup_foildata(alpha_deg) ctorque = foil_coeffs["ct"]*chord/(2*R)*rel_vel_mag**2/U_infty**2 cdx = -foil_coeffs["cd"]*np.sin(np.pi/2 - alpha_rad + theta_blade_rad) clx = foil_coeffs["cl"]*np.cos(np.pi/2 - alpha_rad - theta_blade_rad) df = pd.DataFrame() df["theta"] = theta_blade_deg df["alpha_deg"] = alpha_deg df["rel_vel_mag"] = rel_vel_mag df["ctorque"] = ctorque df["cdrag"] = clx + cdx return df def mag(v): """ Return magnitude of 2-D vector (input as a tuple, list, or NumPy array). """ return np.sqrt(v[0]**2 + v[1]**2) def rotate(v, rad): """Rotate a 2-D vector by rad radians.""" dc, ds = np.cos(rad), np.sin(rad) x, y = v[0], v[1] x, y = dc*x - ds*y, ds*x + dc*y return np.array((x, y)) def gen_naca_points(naca="0020", c=100, npoints=100, tuples=True): """Generate points for a NACA foil.""" x = np.linspace(0, 1, npoints)*c t = float(naca[2:])/100.0 y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \ + 0.2843*(x/c)**3 - 0.1015*(x/c)**4) y = np.append(y, -y[::-1]) x = np.append(x, x[::-1]) if tuples: return np.array([(x0, y0) for x0, y0 in zip(x, y)]) else: return x, y def test_gen_naca_points(): points = gen_naca_points() x = [] y = [] for p in points: x.append(p[0]) y.append(p[1]) fig, ax = plt.subplots() ax.plot(x, y, "o") ax.set_aspect(1) plt.show() def plot_radius(ax, theta_deg=0): """Plot radius at given azimuthal angle.""" r = 0.495 theta_rad = np.deg2rad(theta_deg) x2, y2 = r*np.cos(theta_rad), r*np.sin(theta_rad) ax.plot((0, x2), (0, y2), "gray", linewidth=2) def plot_center(ax, length=0.07, linewidth=1.2): """Plot centermark at origin.""" ax.plot((0, 0), (-length/2, length/2), lw=linewidth, color="black") ax.plot((-length/2, length/2), (0, 0), lw=linewidth, color="black") def make_naca_path(c=0.3, theta_deg=0.0): verts = gen_naca_points(c=c) verts = np.array([rotate(v, -np.pi/2) for v in verts]) verts += (0.5, c/4) theta_rad = np.deg2rad(theta_deg) verts = np.array([rotate(v, theta_rad) for v in verts]) p = matplotlib.path.Path(verts, closed=True) return p def plot_foil(ax, c=0.3, theta_deg=0.0): """Plot the foil shape using a matplotlib patch.""" p = matplotlib.patches.PathPatch(make_naca_path(c, theta_deg), facecolor="gray", linewidth=1, edgecolor="gray") ax.add_patch(p) def plot_blade_path(ax, R=0.5): """Plot blade path as a dashed line.""" p = plt.Circle((0, 0), R, linestyle="dashed", edgecolor="black", facecolor="none", linewidth=1) ax.add_patch(p) def plot_vectors(fig, ax, theta_deg=0.0, tsr=2.0, c=0.3, label=False): """Plot blade velocity, free stream velocity, relative velocity, lift, and drag vectors. """ r = 0.5 u_infty = 0.26 theta_deg %= 360 theta_rad = np.deg2rad(theta_deg) blade_xy = r*np.cos(theta_rad), r*np.sin(theta_rad) head_width = 0.04 head_length = 0.11 linewidth = 1.5 # Function for plotting vector labels def plot_label(text, x, y, dx, dy, text_width=0.09, text_height=0.03, sign=-1, dist=1.0/3.0): text_width *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1] text_height *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1] dvec = np.array((dx, dy)) perp_vec = rotate(dvec, np.pi/2) perp_vec /= mag(perp_vec) if theta_deg > 270: diag = text_height else: diag = np.array((text_width, text_height)) # Projection of text diagonal vector onto normal vector proj = np.dot(diag, perp_vec) if sign != -1: proj = 0 # Text is on right side of vector if theta_deg > 180: sign *= -1 dxlab, dylab = perp_vec*(np.abs(proj) + .01)*sign xlab, ylab = x + dx*dist + dxlab, y + dy*dist + dylab ax.text(xlab, ylab, text) # Make blade velocity vector x1, y1 = rotate((0.5, tsr*u_infty), np.deg2rad(theta_deg)) dx, dy = np.array(blade_xy) - np.array((x1, y1)) blade_vel = np.array((dx, dy)) ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length, length_includes_head=True, color=dark_gray, linewidth=linewidth) if label: plot_label(r"$-\omega r$", x1, y1, dx*0.25, dy*0.5) # Make chord line vector x1c, y1c = np.array((x1, y1)) - np.array((dx, dy))*0.5 x2c, y2c = np.array((x1, y1)) + np.array((dx, dy))*2 ax.plot([x1c, x2c], [y1c, y2c], marker=None, color="k", linestyle="-.", zorder=1) # Make free stream velocity vector y1 += u_infty ax.arrow(x1, y1, 0, -u_infty, head_width=head_width, head_length=head_length, length_includes_head=True, color=blue, linewidth=linewidth) u_infty = np.array((0, -u_infty)) if label: dy = -mag(u_infty) plot_label(r"$U_\mathrm{in}$", x1, y1, 0, dy, text_width=0.1) # Make relative velocity vector dx, dy = np.array(blade_xy) - np.array((x1, y1)) rel_vel = u_infty + blade_vel ax.plot((x1, x1 + dx), (y1, y1 + dy), lw=0) ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length, length_includes_head=True, color=tan, linewidth=linewidth) if label: plot_label(r"$U_\mathrm{rel}$", x1, y1, dx, dy, sign=1, text_width=0.11) # Calculate angle between blade vel and rel vel alpha_deg = np.rad2deg(np.arccos(np.dot(blade_vel/mag(blade_vel), rel_vel/mag(rel_vel)))) if theta_deg > 180: alpha_deg *= -1 # Make drag vector drag_amplify = 3.0 data = lookup_foildata(alpha_deg) drag = data["cd"]*mag(rel_vel)**2*drag_amplify if drag < 0.4/drag_amplify: hs = 0.5 else: hs = 1 dx, dy = drag*np.array((dx, dy))/mag((dx, dy)) ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs, head_length=head_length*hs, length_includes_head=True, color=red, linewidth=linewidth) if label: plot_label(r"$F_d$", blade_xy[0], blade_xy[1], dx, dy, sign=-1, dist=0.66) # Make lift vector lift_amplify = 1.5 lift = data["cl"]*mag(rel_vel)**2*lift_amplify dx, dy = rotate((dx, dy), -np.pi/2)/mag((dx, dy))*lift if np.abs(lift) < 0.4/lift_amplify: hs = 0.5 else: hs = 1 ax.plot((blade_xy[0], blade_xy[0] + dx), (blade_xy[1], blade_xy[1] + dy), linewidth=0) ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs, head_length=head_length*hs, length_includes_head=True, color=green, linewidth=linewidth) if label: plot_label(r"$F_l$", blade_xy[0], blade_xy[1], dx, dy, sign=-1, text_width=0.12, text_height=0.02, dist=0.66) # Label radius if label: plot_label("$r$", 0, 0, blade_xy[0], blade_xy[1], text_width=0.04, text_height=0.04) # Label angle of attack if label: ast = "simple,head_width={},tail_width={},head_length={}".format( head_width*8, linewidth/16, head_length*8) xy = blade_xy - rel_vel/mag(rel_vel)*0.2 ax.annotate(r"$\alpha$", xy=xy, xycoords="data", xytext=(37.5, 22.5), textcoords="offset points", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=0.1", color="k")) xy = blade_xy - blade_vel/mag(blade_vel)*0.2 ax.annotate("", xy=xy, xycoords="data", xytext=(-15, -30), textcoords="offset points", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=-0.1", color="k")) # Label azimuthal angle if label: xy = np.array(blade_xy)*0.6 ast = "simple,head_width={},tail_width={},head_length={}".format( head_width*5.5, linewidth/22, head_length*5.5) ax.annotate(r"$\theta$", xy=xy, xycoords="data", xytext=(0.28, 0.12), textcoords="data", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=0.1", color="k")) ax.annotate("", xy=(0.41, 0), xycoords="data", xytext=(0.333, 0.12), textcoords="data", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=-0.1", color="k")) # Label pitching moment if label: xy = np.array(blade_xy)*1.1 - blade_vel/mag(blade_vel) * c/4 ast = "simple,head_width={},tail_width={},head_length={}".format( head_width*8, linewidth/16, head_length*8) ax.annotate(r"", xy=xy, xycoords="data", xytext=(25, -15), textcoords="offset points", arrowprops=dict(arrowstyle=ast, ec="none", connectionstyle="arc3,rad=0.6", color="k")) plot_label(r"$M$", xy[0], xy[1], 0.1, 0.1, sign=-1, dist=0.66) return {"u_infty": u_infty, "blade_vel": blade_vel, "rel_vel": rel_vel} def plot_alpha(ax=None, tsr=2.0, theta=None, alpha_ss=None, **kwargs): """Plot angle of attack versus azimuthal angle.""" if theta is not None: theta %= 360 if ax is None: fig, ax = plt.subplots() df = calc_cft_ctorque(tsr=tsr) ax.plot(df.theta, df.alpha_deg, **kwargs) ax.set_ylabel(r"$\alpha$ (degrees)") ax.set_xlabel(r"$\theta$ (degrees)") ax.set_xlim((0, 360)) ylim = np.round(df.alpha_deg.max() + 5) ax.set_ylim((-ylim, ylim)) if theta is not None: f = interp1d(df.theta, df.alpha_deg) ax.plot(theta, f(theta), "ok") if alpha_ss is not None: ax.hlines((alpha_ss, -alpha_ss), 0, 360, linestyles="dashed") def plot_rel_vel_mag(ax=None, tsr=2.0, theta=None, **kwargs): """Plot relative velocity magnitude versus azimuthal angle.""" if theta is not None: theta %= 360 if ax is None: fig, ax = plt.subplots() df = calc_cft_ctorque(tsr=tsr) ax.plot(df.theta, df.rel_vel_mag, **kwargs) ax.set_ylabel(r"$|\vec{U}_\mathrm{rel}|$") ax.set_xlabel(r"$\theta$ (degrees)") ax.set_xlim((0, 360)) if theta is not None: f = interp1d(df.theta, df.rel_vel_mag) ax.plot(theta, f(theta), "ok") def plot_alpha_relvel_all(tsrs=np.arange(1.5, 6.1, 1.0), save=False): """Plot angle of attack and relative velocity magnitude for a list of TSRs. Figure will have two subplots in a single row. """ fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(7.5, 3.0)) cm = plt.cm.get_cmap("Reds") for tsr in tsrs: color = cm(tsr/np.max(tsrs)) plot_alpha(ax=ax1, tsr=tsr, label=r"$\lambda = {}$".format(tsr), color=color) plot_rel_vel_mag(ax=ax2, tsr=tsr, color=color) [a.set_xticks(np.arange(0, 361, 60)) for a in (ax1, ax2)] ax1.legend(loc=(0.17, 1.1), ncol=len(tsrs)) ax1.set_ylim((-45, 45)) ax1.set_yticks(np.arange(-45, 46, 15)) ax2.set_ylabel(r"$|\vec{U}_\mathrm{rel}|/U_\infty$") fig.tight_layout() if save: fig.savefig("figures/alpha_deg_urel_geom.pdf", bbox_inches="tight") def plot_ctorque(ax=None, tsr=2.0, theta=None, **kwargs): """Plot torque coefficient versus azimuthal angle.""" theta %= 360 if ax is None: fig, ax = plt.subplots() df = calc_cft_ctorque(tsr=tsr) ax.plot(df.theta, df.ctorque, **kwargs) ax.set_ylabel("Torque coeff.") ax.set_xlabel(r"$\theta$ (degrees)") ax.set_xlim((0, 360)) if theta is not None: f = interp1d(df.theta, df.ctorque) ax.plot(theta, f(theta), "ok") def plot_diagram(fig=None, ax=None, theta_deg=0.0, tsr=2.0, label=False, save=False, axis="on", full_view=True): """Plot full vector diagram.""" if ax is None: fig, ax = plt.subplots(figsize=(6, 6)) plot_blade_path(ax) if label: # Create dashed line for x-axis ax.plot((-0.5, 0.5), (0, 0), linestyle="dashed", color="k", zorder=1) plot_foil(ax, c=0.3, theta_deg=theta_deg) plot_radius(ax, theta_deg) plot_center(ax) plot_vectors(fig, ax, theta_deg, tsr, label=label) # Figure formatting if full_view: ax.set_xlim((-1, 1)) ax.set_ylim((-1, 1)) ax.set_aspect(1) ax.set_xticks([]) ax.set_yticks([]) ax.axis(axis) if save: fig.savefig("figures/cft-vectors.pdf") def plot_all(theta_deg=0.0, tsr=2.0, scale=1.0, full_view=True): """Create diagram and plots of kinematics in a single figure.""" fig = plt.figure(figsize=(7.5*scale, 4.75*scale)) # Draw vector diagram ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=3) plot_diagram(fig, ax1, theta_deg, tsr, axis="on", full_view=full_view) # Plot angle of attack ax2 = plt.subplot2grid((3, 3), (0, 2)) plot_alpha(ax2, tsr=tsr, theta=theta_deg, alpha_ss=18, color=light_blue) # Plot relative velocity magnitude ax3 = plt.subplot2grid((3, 3), (1, 2)) plot_rel_vel_mag(ax3, tsr=tsr, theta=theta_deg, color=tan) # Plot torque coefficient ax4 = plt.subplot2grid((3, 3), (2, 2)) plot_ctorque(ax4, tsr=tsr, theta=theta_deg, color=purple) fig.tight_layout() return fig def make_frame(t): """Make a frame for a movie.""" sec_per_rev = 5.0 deg = t/sec_per_rev*360 return mplfig_to_npimage(plot_all(deg, scale=2.0)) def make_animation(filetype="mp4", fps=30): """Make animation video.""" if not os.path.isdir("videos"): os.mkdir("videos") animation = VideoClip(make_frame, duration=5.0) if "mp4" in filetype.lower(): animation.write_videofile("videos/cft-animation.mp4", fps=fps) elif "gif" in filetype.lower(): animation.write_gif("videos/cft-animation.gif", fps=fps) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="Create cross-flow turbine \ vector diagrams.") parser.add_argument("create", choices=["figure", "diagram", "animation"], help="Either create a static figure or animation") parser.add_argument("--angle", type=float, default=60.0, help="Angle (degrees) to create figure") parser.add_argument("--show", action="store_true", default=False) parser.add_argument("--save", "-s", action="store_true", default=False, help="Save figure") args = parser.parse_args() if args.save: if not os.path.isdir("figures"): os.mkdir("figures") if args.create == "diagram": set_sns(font_scale=2) plot_diagram(theta_deg=args.angle, label=True, axis="off", save=args.save) elif args.create == "figure": set_sns() plot_alpha_relvel_all(save=args.save) elif args.create == "animation": set_sns(font_scale=2) from moviepy.editor import VideoClip from moviepy.video.io.bindings import mplfig_to_npimage make_animation() if args.show: plt.show()
mit
ilo10/scikit-learn
examples/applications/svm_gui.py
287
11161
""" ========== Libsvm GUI ========== A simple graphical frontend for Libsvm mainly intended for didactic purposes. You can create data points by point and click and visualize the decision region induced by different kernels and parameter settings. To create positive examples click the left mouse button; to create negative examples click the right button. If all examples are from the same class, it uses a one-class SVM. """ from __future__ import division, print_function print(__doc__) # Author: Peter Prettenhoer <peter.prettenhofer@gmail.com> # # License: BSD 3 clause import matplotlib matplotlib.use('TkAgg') from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg from matplotlib.figure import Figure from matplotlib.contour import ContourSet import Tkinter as Tk import sys import numpy as np from sklearn import svm from sklearn.datasets import dump_svmlight_file from sklearn.externals.six.moves import xrange y_min, y_max = -50, 50 x_min, x_max = -50, 50 class Model(object): """The Model which hold the data. It implements the observable in the observer pattern and notifies the registered observers on change event. """ def __init__(self): self.observers = [] self.surface = None self.data = [] self.cls = None self.surface_type = 0 def changed(self, event): """Notify the observers. """ for observer in self.observers: observer.update(event, self) def add_observer(self, observer): """Register an observer. """ self.observers.append(observer) def set_surface(self, surface): self.surface = surface def dump_svmlight_file(self, file): data = np.array(self.data) X = data[:, 0:2] y = data[:, 2] dump_svmlight_file(X, y, file) class Controller(object): def __init__(self, model): self.model = model self.kernel = Tk.IntVar() self.surface_type = Tk.IntVar() # Whether or not a model has been fitted self.fitted = False def fit(self): print("fit the model") train = np.array(self.model.data) X = train[:, 0:2] y = train[:, 2] C = float(self.complexity.get()) gamma = float(self.gamma.get()) coef0 = float(self.coef0.get()) degree = int(self.degree.get()) kernel_map = {0: "linear", 1: "rbf", 2: "poly"} if len(np.unique(y)) == 1: clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()], gamma=gamma, coef0=coef0, degree=degree) clf.fit(X) else: clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C, gamma=gamma, coef0=coef0, degree=degree) clf.fit(X, y) if hasattr(clf, 'score'): print("Accuracy:", clf.score(X, y) * 100) X1, X2, Z = self.decision_surface(clf) self.model.clf = clf self.model.set_surface((X1, X2, Z)) self.model.surface_type = self.surface_type.get() self.fitted = True self.model.changed("surface") def decision_surface(self, cls): delta = 1 x = np.arange(x_min, x_max + delta, delta) y = np.arange(y_min, y_max + delta, delta) X1, X2 = np.meshgrid(x, y) Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()]) Z = Z.reshape(X1.shape) return X1, X2, Z def clear_data(self): self.model.data = [] self.fitted = False self.model.changed("clear") def add_example(self, x, y, label): self.model.data.append((x, y, label)) self.model.changed("example_added") # update decision surface if already fitted. self.refit() def refit(self): """Refit the model if already fitted. """ if self.fitted: self.fit() class View(object): """Test docstring. """ def __init__(self, root, controller): f = Figure() ax = f.add_subplot(111) ax.set_xticks([]) ax.set_yticks([]) ax.set_xlim((x_min, x_max)) ax.set_ylim((y_min, y_max)) canvas = FigureCanvasTkAgg(f, master=root) canvas.show() canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1) canvas.mpl_connect('button_press_event', self.onclick) toolbar = NavigationToolbar2TkAgg(canvas, root) toolbar.update() self.controllbar = ControllBar(root, controller) self.f = f self.ax = ax self.canvas = canvas self.controller = controller self.contours = [] self.c_labels = None self.plot_kernels() def plot_kernels(self): self.ax.text(-50, -60, "Linear: $u^T v$") self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$") self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$") def onclick(self, event): if event.xdata and event.ydata: if event.button == 1: self.controller.add_example(event.xdata, event.ydata, 1) elif event.button == 3: self.controller.add_example(event.xdata, event.ydata, -1) def update_example(self, model, idx): x, y, l = model.data[idx] if l == 1: color = 'w' elif l == -1: color = 'k' self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0) def update(self, event, model): if event == "examples_loaded": for i in xrange(len(model.data)): self.update_example(model, i) if event == "example_added": self.update_example(model, -1) if event == "clear": self.ax.clear() self.ax.set_xticks([]) self.ax.set_yticks([]) self.contours = [] self.c_labels = None self.plot_kernels() if event == "surface": self.remove_surface() self.plot_support_vectors(model.clf.support_vectors_) self.plot_decision_surface(model.surface, model.surface_type) self.canvas.draw() def remove_surface(self): """Remove old decision surface.""" if len(self.contours) > 0: for contour in self.contours: if isinstance(contour, ContourSet): for lineset in contour.collections: lineset.remove() else: contour.remove() self.contours = [] def plot_support_vectors(self, support_vectors): """Plot the support vectors by placing circles over the corresponding data points and adds the circle collection to the contours list.""" cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1], s=80, edgecolors="k", facecolors="none") self.contours.append(cs) def plot_decision_surface(self, surface, type): X1, X2, Z = surface if type == 0: levels = [-1.0, 0.0, 1.0] linestyles = ['dashed', 'solid', 'dashed'] colors = 'k' self.contours.append(self.ax.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)) elif type == 1: self.contours.append(self.ax.contourf(X1, X2, Z, 10, cmap=matplotlib.cm.bone, origin='lower', alpha=0.85)) self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k', linestyles=['solid'])) else: raise ValueError("surface type unknown") class ControllBar(object): def __init__(self, root, controller): fm = Tk.Frame(root) kernel_group = Tk.Frame(fm) Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel, value=1, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel, value=2, command=controller.refit).pack(anchor=Tk.W) kernel_group.pack(side=Tk.LEFT) valbox = Tk.Frame(fm) controller.complexity = Tk.StringVar() controller.complexity.set("1.0") c = Tk.Frame(valbox) Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(c, width=6, textvariable=controller.complexity).pack( side=Tk.LEFT) c.pack() controller.gamma = Tk.StringVar() controller.gamma.set("0.01") g = Tk.Frame(valbox) Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT) g.pack() controller.degree = Tk.StringVar() controller.degree.set("3") d = Tk.Frame(valbox) Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT) d.pack() controller.coef0 = Tk.StringVar() controller.coef0.set("0") r = Tk.Frame(valbox) Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT) Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT) r.pack() valbox.pack(side=Tk.LEFT) cmap_group = Tk.Frame(fm) Tk.Radiobutton(cmap_group, text="Hyperplanes", variable=controller.surface_type, value=0, command=controller.refit).pack(anchor=Tk.W) Tk.Radiobutton(cmap_group, text="Surface", variable=controller.surface_type, value=1, command=controller.refit).pack(anchor=Tk.W) cmap_group.pack(side=Tk.LEFT) train_button = Tk.Button(fm, text='Fit', width=5, command=controller.fit) train_button.pack() fm.pack(side=Tk.LEFT) Tk.Button(fm, text='Clear', width=5, command=controller.clear_data).pack(side=Tk.LEFT) def get_parser(): from optparse import OptionParser op = OptionParser() op.add_option("--output", action="store", type="str", dest="output", help="Path where to dump data.") return op def main(argv): op = get_parser() opts, args = op.parse_args(argv[1:]) root = Tk.Tk() model = Model() controller = Controller(model) root.wm_title("Scikit-learn Libsvm GUI") view = View(root, controller) model.add_observer(view) Tk.mainloop() if opts.output: model.dump_svmlight_file(opts.output) if __name__ == "__main__": main(sys.argv)
bsd-3-clause
Geosyntec/wqio
docs/conf.py
2
10120
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # wqio documentation build configuration file, created by # sphinx-quickstart on Sun May 22 14:36:00 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import sphinx import matplotlib as mpl mpl.use("agg") import seaborn clean_bkgd = {"axes.facecolor": "none", "figure.facecolor": "none"} seaborn.set(style="ticks", rc=clean_bkgd) numpydoc_show_class_members = False autodoc_member_order = "bysource" # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. sys.path.insert(0, os.path.abspath("sphinxext")) extensions = [ "sphinx.ext.autodoc", "sphinx.ext.doctest", "sphinx.ext.intersphinx", "sphinx.ext.todo", "sphinx.ext.mathjax", "sphinx.ext.viewcode", #'plot_generator', #'plot_directive', "numpydoc", "ipython_directive", "ipython_console_highlighting", "sphinx_gallery.gen_gallery", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # Include the example source for plots in API docs plot_include_source = True plot_formats = [("png", 90)] plot_html_show_formats = False plot_html_show_source_link = False # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "wqio" copyright = "2016, Paul Hobson (Geosyntec Consultants)" author = "Paul Hobson (Geosyntec Consultants)" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "0.5.1" # The full version, including alpha/beta/rc tags. release = "0.5.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # "<project> v<release> documentation" by default. html_title = 'wqio v0.5.1' # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = "wqiodoc" # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, "wqio.tex", "wqio Documentation", "Paul Hobson (Geosyntec Consultants)", "manual", ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [(master_doc, "wqio", "wqio Documentation", [author], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( master_doc, "wqio", "wqio Documentation", author, "wqio", "One line description of project.", "Miscellaneous", ) ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {"https://docs.python.org/": None}
bsd-3-clause
armanpazouki/chrono
src/demos/python/demo_crank_plot.py
1
5655
#------------------------------------------------------------------------------ # Name: pychrono example # Purpose: # # Author: Alessandro Tasora # # Created: 1/01/2019 # Copyright: (c) ProjectChrono 2019 #------------------------------------------------------------------------------ import pychrono.core as chrono import pychrono.irrlicht as chronoirr import matplotlib.pyplot as plt import numpy as np print ("Example: create a slider crank and plot results"); # Change this path to asset path, if running from other working dir. # It must point to the data folder, containing GUI assets (textures, fonts, meshes, etc.) chrono.SetChronoDataPath("../../../data/") # --------------------------------------------------------------------- # # Create the simulation system and add items # mysystem = chrono.ChSystemNSC() # Some data shared in the following crank_center = chrono.ChVectorD(-1,0.5,0) crank_rad = 0.4 crank_thick = 0.1 rod_length = 1.5 # Create four rigid bodies: the truss, the crank, the rod, the piston. # Create the floor truss mfloor = chrono.ChBodyEasyBox(3, 1, 3, 1000) mfloor.SetPos(chrono.ChVectorD(0,-0.5,0)) mfloor.SetBodyFixed(True) mysystem.Add(mfloor) # Create the flywheel crank mcrank = chrono.ChBodyEasyCylinder(crank_rad, crank_thick, 1000) mcrank.SetPos(crank_center + chrono.ChVectorD(0, 0, -0.1)) # Since ChBodyEasyCylinder creates a vertical (y up) cylinder, here rotate it: mcrank.SetRot(chrono.Q_ROTATE_Y_TO_Z) mysystem.Add(mcrank) # Create a stylized rod mrod = chrono.ChBodyEasyBox(rod_length, 0.1, 0.1, 1000) mrod.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length/2 , 0, 0)) mysystem.Add(mrod) # Create a stylized piston mpiston = chrono.ChBodyEasyCylinder(0.2, 0.3, 1000) mpiston.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length, 0, 0)) mpiston.SetRot(chrono.Q_ROTATE_Y_TO_X) mysystem.Add(mpiston) # Now create constraints and motors between the bodies. # Create crank-truss joint: a motor that spins the crank flywheel my_motor = chrono.ChLinkMotorRotationSpeed() my_motor.Initialize(mcrank, # the first connected body mfloor, # the second connected body chrono.ChFrameD(crank_center)) # where to create the motor in abs.space my_angularspeed = chrono.ChFunction_Const(chrono.CH_C_PI) # ang.speed: 180°/s my_motor.SetMotorFunction(my_angularspeed) mysystem.Add(my_motor) # Create crank-rod joint mjointA = chrono.ChLinkLockRevolute() mjointA.Initialize(mrod, mcrank, chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad,0,0) )) mysystem.Add(mjointA) # Create rod-piston joint mjointB = chrono.ChLinkLockRevolute() mjointB.Initialize(mpiston, mrod, chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0) )) mysystem.Add(mjointB) # Create piston-truss joint mjointC = chrono.ChLinkLockPrismatic() mjointC.Initialize(mpiston, mfloor, chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0), chrono.Q_ROTATE_Z_TO_X) ) mysystem.Add(mjointC) # --------------------------------------------------------------------- # # Create an Irrlicht application to visualize the system # myapplication = chronoirr.ChIrrApp(mysystem, 'PyChrono example', chronoirr.dimension2du(1024,768)) myapplication.AddTypicalSky() myapplication.AddTypicalLogo(chrono.GetChronoDataPath() + 'logo_pychrono_alpha.png') myapplication.AddTypicalCamera(chronoirr.vector3df(1,1,3), chronoirr.vector3df(0,1,0)) myapplication.AddTypicalLights() # ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items # in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes. # If you need a finer control on which item really needs a visualization proxy in # Irrlicht, just use application.AssetBind(myitem); on a per-item basis. myapplication.AssetBindAll(); # ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets # that you added to the bodies into 3D shapes, they can be visualized by Irrlicht! myapplication.AssetUpdateAll(); # --------------------------------------------------------------------- # # Run the simulation # # Initialize these lists to store values to plot. array_time = [] array_angle = [] array_pos = [] array_speed = [] myapplication.SetTimestep(0.005) # Run the interactive simulation loop while(myapplication.GetDevice().run()): # for plotting, append instantaneous values: array_time.append(mysystem.GetChTime()) array_angle.append(my_motor.GetMotorRot()) array_pos.append(mpiston.GetPos().x) array_speed.append(mpiston.GetPos_dt().x) # here happens the visualization and step time integration myapplication.BeginScene() myapplication.DrawAll() myapplication.DoStep() myapplication.EndScene() # stop simulation after 2 seconds if mysystem.GetChTime() > 2: myapplication.GetDevice().closeDevice() # Use matplotlib to make two plots when simulation ended: fig, (ax1, ax2) = plt.subplots(2, sharex = True) ax1.plot(array_angle, array_pos) ax1.set(ylabel='position [m]') ax1.grid() ax2.plot(array_angle, array_speed, 'r--') ax2.set(ylabel='speed [m]',xlabel='angle [rad]') ax2.grid() # trick to plot \pi on x axis of plots instead of 1 2 3 4 etc. plt.xticks(np.linspace(0, 2*np.pi, 5),['0','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$'])
bsd-3-clause
sevenian3/ChromaStarPy
LevelPopsGasServer.py
1
55996
# -*- coding: utf-8 -*- """ Created on Mon Apr 24 14:13:47 2017 @author: ishort """ import math import Useful import ToolBox #import numpy #JB# #from matplotlib.pyplot import plot, title, show, scatter #storage for fits (not all may be used) uw = [] uwa = [] uwb = [] uwStage = [] uwbStage = [] uwu = [] uwl = [] uua=[] uub=[] """ #a function to create a cubic function fit extrapolation def cubicFit(x,y): coeffs = numpy.polyfit(x,y,3) #returns an array of coefficents for the cubic fit of the form #Ax^3 + Bx^2 + Cx + D as [A,B,C,D] return coeffs #this will work for any number of data points! def valueFromFit(fit,x): #return the value y for a given fit, at point x return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3]) #holds the five temperature at which we have partition function data """ masterTemp = [130, 500, 3000, 8000, 10000] #JB# #def levelPops(lam0In, logNStage, chiL, log10UwStage, gwL, numDeps, temp): def levelPops(lam0In, logNStage, chiL, logUw, gwL, numDeps, temp): """ Returns depth distribution of occupation numbers in lower level of b-b transition, // Input parameters: // lam0 - line centre wavelength in nm // logNStage - log_e density of absorbers in relevent ion stage (cm^-3) // logFlu - log_10 oscillator strength (unitless) // chiL - energy of lower atomic E-level of b-b transition in eV // Also needs atsmopheric structure information: // numDeps // temp structure """ c = Useful.c() logC = Useful.logC() k = Useful.k() logK = Useful.logK() logH = Useful.logH() logEe = Useful.logEe() logMe = Useful.logMe() ln10 = math.log(10.0) logE = math.log10(math.e); #// for debug output log2pi = math.log(2.0 * math.pi) log2 = math.log(2.0) #//double logNl = logNlIn * ln10; // Convert to base e #// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us #// Convert to natural logs: #double thisLogUw, Ttheta; thisLogUw = 0.0 # //default initialization #logUw = [ 0.0 for i in range(5) ] logE10 = math.log(10.0) #print("log10UwStage ", log10UwStage) #for kk in range(len(logUw)): # logUw[kk] = logE10*log10UwStage[kk] #// lburns new loop logGwL = math.log(gwL) #//System.out.println("chiL before: " + chiL); #// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs! #////For testing with Ca II lines using gS3 internal line list only: #//boolean ionized = true; #//if (ionized) { #// //System.out.println("ionized, doing chiL - chiI: " + ionized); #// // chiL = chiL - chiI; #// chiL = chiL - 6.113; #// } #// // #//Log of line-center wavelength in cm logLam0 = math.log(lam0In) #// * 1.0e-7); #// energy of b-b transition logTransE = logH + logC - logLam0 #//ergs if (chiL <= 0.0): chiL = 1.0e-49 logChiL = math.log(chiL) + Useful.logEv() #// Convert lower E-level from eV to ergs logBoltzFacL = logChiL - Useful.logK() #// Pre-factor for exponent of excitation Boltzmann factor boltzFacL = math.exp(logBoltzFacL) boltzFacGround = 0.0 / k #//I know - its zero, but let's do it this way anyway' #// return a 1D numDeps array of logarithmic number densities #// level population of lower level of bb transition (could be in either stage I or II!) logNums = [ 0.0 for i in range(numDeps)] #double num, logNum, expFac; #JB# #print("thisLogUw:",numpy.shape(logUw)) logUwFit = ToolBox.cubicFit(masterTemp,logUw)#u(T) fit uw.append(logUwFit) #JB# for id in range(numDeps): #//Determine temperature dependenet partition functions Uw: #Ttheta = 5040.0 / temp[0][id] #//NEW Determine temperature dependent partition functions Uw: lburns thisTemp = temp[0][id] """ if (Ttheta >= 1.0): thisLogUw = logUw[0] if (Ttheta <= 0.5): thisLogUw = logUw[1] if (Ttheta > 0.5 and Ttheta < 1.0): thisLogUw = ( logUw[1] * (Ttheta - 0.5)/(1.0 - 0.5) ) \ + ( logUw[0] * (1.0 - Ttheta)/(1.0 - 0.5) ) """ #JB# thisLogUw = ToolBox.valueFromFit(logUwFit,thisTemp)#u(T) value extrapolated #JB# if (thisTemp >= 10000.0): thisLogUw = logUw[4] if (thisTemp <= 130.0): thisLogUw = logUw[0] """ if (thisTemp > 130 and thisTemp <= 500): thisLogUw = logUw[1] * (thisTemp - 130)/(500 - 130) \ + logUw[0] * (500 - thisTemp)/(500 - 130) if (thisTemp > 500 and thisTemp <= 3000): thisLogUw = logUw[2] * (thisTemp - 500)/(3000 - 500) \ + logUw[1] * (3000 - thisTemp)/(3000 - 500) if (thisTemp > 3000 and thisTemp <= 8000): thisLogUw = logUw[3] * (thisTemp - 3000)/(8000 - 3000) \ + logUw[2] * (8000 - thisTemp)/(8000 - 3000) if (thisTemp > 8000 and thisTemp < 10000): thisLogUw = logUw[4] * (thisTemp - 8000)/(10000 - 8000) \ + logUw[3] * (10000 - thisTemp)/(10000 - 8000) """ #print("logUw ", logUw, " thisLogUw ", thisLogUw) #//System.out.println("LevPops: ionized branch taken, ionized = " + ionized); #// Take stat weight of ground state as partition function: logNums[id] = logNStage[id] - boltzFacL / temp[0][id] + logGwL - thisLogUw #// lower level of b-b transition #print("LevelPopsServer.stagePops id ", id, " logNStage[id] ", logNStage[id], " boltzFacL ", boltzFacL, " temp[0][id] ", temp[0][id], " logGwL ", logGwL, " thisLogUw ", thisLogUw, " logNums[id] ", logNums[id]); #// System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id]: " + id + " " #// + Math.exp(logNums[0][id]) + " " #// + Math.exp(logNums[1][id]) + " " #// + Math.exp(logNums[2][id]) + " " #// + Math.exp(logNums[3][id])); #//System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id], logNums[4][id]: " + id + " " #// + logE * (logNums[0][id]) + " " #// + logE * (logNums[1][id]) + " " #// + logE * (logNums[2][id]) + " " # // + logE * (logNums[3][id]) + " " #// + logE * (logNums[4][id]) ); #//System.out.println("LevelPops: id, logIonFracI, logIonFracII: " + id + " " + logE*logIonFracI + " " + logE*logIonFracII #// + "logNum, logNumI, logNums[0][id], logNums[1][id] " #// + logE*logNum + " " + logE*logNumI + " " + logE*logNums[0][id] + " " + logE*logNums[1][id]); #//System.out.println("LevelPops: id, logIonFracI: " + id + " " + logE*logIonFracI #// + "logNums[0][id], boltzFacL/temp[0][id], logNums[2][id]: " #// + logNums[0][id] + " " + boltzFacL/temp[0][id] + " " + logNums[2][id]); #//id loop #stop #print (uw) return logNums #//This version - ionization equilibrium *WITHOUT* molecules - logNum is TOTAL element population #def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \ # numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \ # numDeps, temp): def stagePops(logNum, Ne, chiIArr, logUw, \ numDeps, temp): #line 1: //species A data - ionization equilibrium of A #line 2: //data for set of species "B" - molecular equlibrium for set {AB} """Ionization equilibrium routine WITHOUT molecule formation: // Returns depth distribution of ionization stage populations // Input parameters: // logNum - array with depth-dependent total element number densities (cm^-3) // chiI1 - ground state ionization energy of neutral stage // chiI2 - ground state ionization energy of singly ionized stage // Also needs atsmopheric structure information: // numDeps // temp structure // rho structure // Atomic element A is the one whose ionization fractions are being computed // """ ln10 = math.log(10.0) logE = math.log10(math.e) #// for debug output log2pi = math.log(2.0 * math.pi) log2 = math.log(2.0) numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated #// var numMols = dissEArr.length; #// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us #// Convert to natural logs: #double Ttheta, thisTemp; #//Default initializations: #//We need one more stage in size of saha factor than number of stages we're actualy populating thisLogUw = [ 0.0 for i in range(numStages+1) ] for i in range(numStages+1): thisLogUw[i] = 0.0 logE10 = math.log(10.0) #//atomic ionization stage Boltzmann factors: #double logChiI, logBoltzFacI; boltzFacI = [ 0.0 for i in range(numStages) ] #print("numStages ", numStages, " Useful.logEv ", Useful.logEv()) for i in range(numStages): #print("i ", i, " chiIArr ", chiIArr[i]) logChiI = math.log(chiIArr[i]) + Useful.logEv() logBoltzFacI = logChiI - Useful.logK() boltzFacI[i] = math.exp(logBoltzFacI) logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) #// return a 2D 5 x numDeps array of logarithmic number densities #// Row 0: neutral stage ground state population #// Row 1: singly ionized stage ground state population #// Row 2: doubly ionized stage ground state population #// Row 3: triply ionized stage ground state population #// Row 4: quadruply ionized stage ground state population #double[][] logNums = new double[numStages][numDeps]; logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ] #//We need one more stage in size of saha factor than number of stages we're actualy populating #// for index accounting pirposes #// For atomic ionization stages: logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ] saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ] #// logIonFrac = [ 0.0 for i in range(numStages) ] #double expFac, logNe; #// Now - molecular variables: thisLogUwA = 0.0 #// element A #thisLogQwAB = math.log(300.0) #//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A #// for molecule formation: logUwA = [ 0.0 for i in range(5) ] #JB# uua=[] #uub=[] #qwab=[] for iStg in range(numStages): currentUwArr=list(logUw[iStg])#u(T) determined values UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit uua.append(UwFit) #print(logUw[iStg]) for id in range(numDeps): #//// reduce or enhance number density by over-all Rosseland opcity scale parameter #// #//Row 1 of Ne is log_e Ne in cm^-3 logNe = Ne[1][id] #//Determine temperature dependent partition functions Uw: thisTemp = temp[0][id] #Ttheta = 5040.0 / thisTemp #JB# #use temps and partition values to create a function #then use said function to extrapolate values for all points thisLogUw[numStages] = 0.0 for iStg in range(numStages): thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated #JB# #// NEW Determine temperature dependent partition functions Uw: lburns if (thisTemp <= 130.0): for iStg in range(numStages): thisLogUw[iStg] = logUw[iStg][0] #for iMol in range(numMols): # thisLogUwB[iMol] = logUwB[iMol][0] if (thisTemp >= 10000.0): for iStg in range(numStages): thisLogUw[iStg] = logUw[iStg][4] #for iMol in range(numMols): # thisLogUwB[iMol] = logUwB[iMol][4] #//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A #// for molecule formation: thisLogUwA = thisLogUw[0]; #//Ionization stage Saha factors: for iStg in range(numStages): #print("iStg ", iStg) logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg] saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg]) #//Compute log of denominator is ionization fraction, f_stage denominator = 1.0 #//default initialization - leading term is always unity #//ion stage contributions: for jStg in range(1, numStages+1): addend = 1.0 #//default initialization for product series for iStg in range(jStg): #//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg); addend = addend * saha[iStg+1][iStg] denominator = denominator + addend #// logDenominator = math.log(denominator) logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I for jStg in range(1, numStages): addend = 0.0 #//default initialization for product series for iStg in range(jStg): #//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg); addend = addend + logSaha[iStg+1][iStg] logIonFrac[jStg] = addend - logDenominator for iStg in range(numStages): logNums[iStg][id] = logNum[id] + logIonFrac[iStg] #//id loop return logNums; #//end method stagePops #end method levelPops #def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \ # numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \ # numDeps, temp): def stagePops2(logNum, Ne, chiIArr, logUw, \ numMols, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \ numDeps, temp): #line 1: //species A data - ionization equilibrium of A #line 2: //data for set of species "B" - molecular equlibrium for set {AB} """Ionization equilibrium routine that accounts for molecule formation: // Returns depth distribution of ionization stage populations // Input parameters: // logNum - array with depth-dependent total element number densities (cm^-3) // chiI1 - ground state ionization energy of neutral stage // chiI2 - ground state ionization energy of singly ionized stage // Also needs atsmopheric structure information: // numDeps // temp structure // rho structure // Atomic element A is the one whose ionization fractions are being computed // Element B refers to array of other species with which A forms molecules AB """ ln10 = math.log(10.0) logE = math.log10(math.e) #// for debug output log2pi = math.log(2.0 * math.pi) log2 = math.log(2.0) numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated #// var numMols = dissEArr.length; #// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us #// Convert to natural logs: #double Ttheta, thisTemp; #//Default initializations: #//We need one more stage in size of saha factor than number of stages we're actualy populating thisLogUw = [ 0.0 for i in range(numStages+1) ] for i in range(numStages+1): thisLogUw[i] = 0.0 logE10 = math.log(10.0) #//atomic ionization stage Boltzmann factors: #double logChiI, logBoltzFacI; boltzFacI = [ 0.0 for i in range(numStages) ] #print("numStages ", numStages, " Useful.logEv ", Useful.logEv()) for i in range(numStages): #print("i ", i, " chiIArr ", chiIArr[i]) logChiI = math.log(chiIArr[i]) + Useful.logEv() logBoltzFacI = logChiI - Useful.logK() boltzFacI[i] = math.exp(logBoltzFacI) logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) #// return a 2D 5 x numDeps array of logarithmic number densities #// Row 0: neutral stage ground state population #// Row 1: singly ionized stage ground state population #// Row 2: doubly ionized stage ground state population #// Row 3: triply ionized stage ground state population #// Row 4: quadruply ionized stage ground state population #double[][] logNums = new double[numStages][numDeps]; logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ] #//We need one more stage in size of saha factor than number of stages we're actualy populating #// for index accounting pirposes #// For atomic ionization stages: logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ] saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ] #// logIonFrac = [ 0.0 for i in range(numStages) ] #double expFac, logNe; #// Now - molecular variables: #//Treat at least one molecule - if there are really no molecules for an atomic species, #//there will be one phantom molecule in the denominator of the ionization fraction #//with an impossibly high dissociation energy ifMols = True if (numMols == 0): ifMols = False numMols = 1 #//This should be inherited, but let's make sure: dissEArr[0] = 19.0 #//eV #//Molecular partition functions - default initialization: #double[] thisLogUwB = new double[numMols]; thisLogUwB = [ 0.0 for i in range(numMols) ] for iMol in range(numMols): thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B thisLogUwA = 0.0 #// element A thisLogQwAB = math.log(300.0) #//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A #// for molecule formation: logUwA = [ 0.0 for i in range(5) ] if (numMols > 0): for kk in range(len(logUwA)): logUwA[kk] = logUw[0][kk] #// lburns #//} #//// Molecular partition functions: #//Molecular dissociation Boltzmann factors: boltzFacIAB = [ 0.0 for i in range(numMols) ] logMolSahaFac = [ 0.0 for i in range(numMols) ] #//if (numMols > 0){ #double logDissE, logBoltzFacIAB; for iMol in range(numMols): logDissE = math.log(dissEArr[iMol]) + Useful.logEv() logBoltzFacIAB = logDissE - Useful.logK() boltzFacIAB[iMol] = math.exp(logBoltzFacIAB) logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH()) #//console.log("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]); #//} #// For molecular species: logSahaMol = [ 0.0 for i in range(numMols) ] invSahaMol = [ 0.0 for i in range(numMols) ] #JB# uua=[] uub=[] qwab=[] for iStg in range(numStages): currentUwArr=list(logUw[iStg])#u(T) determined values UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit uua.append(UwFit) #print(logUw[iStg]) for iMol in range(numMols): currentUwBArr=list(logUwB[iMol])#u(T) determined values UwBFit = ToolBox.cubicFit(masterTemp,currentUwBArr)#u(T) fit uub.append(UwBFit) for id in range(numDeps): #//// reduce or enhance number density by over-all Rosseland opcity scale parameter #// #//Row 1 of Ne is log_e Ne in cm^-3 logNe = Ne[1][id] #//Determine temperature dependent partition functions Uw: thisTemp = temp[0][id] #Ttheta = 5040.0 / thisTemp #JB# #use temps and partition values to create a function #then use said function to extrapolate values for all points thisLogUw[numStages] = 0.0 for iStg in range(numStages): thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated for iMol in range(numMols): thisLogUwB[iMol] = ToolBox.valueFromFit(uub[iMol],thisTemp)#u(T) value extrapolated #JB# #// NEW Determine temperature dependent partition functions Uw: lburns if (thisTemp <= 130.0): for iStg in range(numStages): thisLogUw[iStg] = logUw[iStg][0] for iMol in range(numMols): thisLogUwB[iMol] = logUwB[iMol][0] if (thisTemp >= 10000.0): for iStg in range(numStages): thisLogUw[iStg] = logUw[iStg][4] for iMol in range(numMols): thisLogUwB[iMol] = logUwB[iMol][4] for iMol in range(numMols): if (thisTemp < 3000.0): thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \ + ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) ) if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ): thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \ + ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) ) if ( thisTemp > 8000.0 ): thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \ + ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) ) #// iMol loop #//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A #// for molecule formation: thisLogUwA = thisLogUw[0]; #//Ionization stage Saha factors: for iStg in range(numStages): #print("iStg ", iStg) logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg] saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg]) #//Molecular Saha factors: for iMol in range(numMols): logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUwB[iMol] + thisLogUwA - thisLogQwAB #//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI): logSahaMol[iMol] = -1.0 * logSahaMol[iMol] invSahaMol[iMol] = math.exp(logSahaMol[iMol]) #//Compute log of denominator is ionization fraction, f_stage denominator = 1.0 #//default initialization - leading term is always unity #//ion stage contributions: for jStg in range(1, numStages+1): addend = 1.0 #//default initialization for product series for iStg in range(jStg): #//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg); addend = addend * saha[iStg+1][iStg] denominator = denominator + addend #//molecular contribution if (ifMols == True): for iMol in range(numMols): denominator = denominator + invSahaMol[iMol] #// logDenominator = math.log(denominator) logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I for jStg in range(1, numStages): addend = 0.0 #//default initialization for product series for iStg in range(jStg): #//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg); addend = addend + logSaha[iStg+1][iStg] logIonFrac[jStg] = addend - logDenominator for iStg in range(numStages): logNums[iStg][id] = logNum[id] + logIonFrac[iStg] #//id loop return logNums; #//end method stagePops def stagePops3(logNum, Ne, chiIArr, logUw, numDeps, temp): #Version for ChromaStarPyGas: logNum is now *neutral stage* population from Phil # Bennett's GAS package #line 1: //species A data - ionization equilibrium of A #line 2: //data for set of species "B" - molecular equlibrium for set {AB} """Ionization equilibrium routine that accounts for molecule formation: // Returns depth distribution of ionization stage populations // Input parameters: // logNum - array with depth-dependent neutral stage number densities (cm^-3) // chiI1 - ground state ionization energy of neutral stage // chiI2 - ground state ionization energy of singly ionized stage // Also needs atsmopheric structure information: // numDeps // temp structure // rho structure // Atomic element A is the one whose ionization fractions are being computed // Element B refers to array of other species with which A forms molecules AB """ ln10 = math.log(10.0) logE = math.log10(math.e) #// for debug output log2pi = math.log(2.0 * math.pi) log2 = math.log(2.0) numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated #// var numMols = dissEArr.length; #// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us #// Convert to natural logs: #double Ttheta, thisTemp; #//Default initializations: #//We need one more stage in size of saha factor than number of stages we're actualy populating thisLogUw = [ 0.0 for i in range(numStages+1) ] for i in range(numStages+1): thisLogUw[i] = 0.0 logE10 = math.log(10.0) #//atomic ionization stage Boltzmann factors: #double logChiI, logBoltzFacI; boltzFacI = [ 0.0 for i in range(numStages) ] #print("numStages ", numStages, " Useful.logEv ", Useful.logEv()) for i in range(numStages): #print("i ", i, " chiIArr ", chiIArr[i]) logChiI = math.log(chiIArr[i]) + Useful.logEv() logBoltzFacI = logChiI - Useful.logK() boltzFacI[i] = math.exp(logBoltzFacI) logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) #// return a 2D 5 x numDeps array of logarithmic number densities #// Row 0: neutral stage ground state population #// Row 1: singly ionized stage ground state population #// Row 2: doubly ionized stage ground state population #// Row 3: triply ionized stage ground state population #// Row 4: quadruply ionized stage ground state population #double[][] logNums = new double[numStages][numDeps]; logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ] #//We need one more stage in size of saha factor than number of stages we're actualy populating #// for index accounting pirposes #// For atomic ionization stages: #logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ] #saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ] #// #logIonFrac = [ 0.0 for i in range(numStages) ] #double expFac, logNe; #JB# uua=[] uub=[] qwab=[] for iStg in range(numStages): currentUwArr=list(logUw[iStg])#u(T) determined values UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit uua.append(UwFit) #print(logUw[iStg]) for id in range(numDeps): #//// reduce or enhance number density by over-all Rosseland opcity scale parameter #// #//Row 1 of Ne is log_e Ne in cm^-3 logNe = Ne[1][id] #//Determine temperature dependent partition functions Uw: thisTemp = temp[0][id] #Ttheta = 5040.0 / thisTemp #JB# #use temps and partition values to create a function #then use said function to extrapolate values for all points thisLogUw[numStages] = 0.0 for iStg in range(numStages): thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated #JB# #// NEW Determine temperature dependent partition functions Uw: lburns if (thisTemp <= 130.0): for iStg in range(numStages): thisLogUw[iStg] = logUw[iStg][0] if (thisTemp >= 10000.0): for iStg in range(numStages): thisLogUw[iStg] = logUw[iStg][4] #//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A #// for molecule formation: #thisLogUwA = thisLogUw[0]; #//Ionization stage Saha factors: logNums[0][id] = logNum[id] for iStg in range(1, numStages): #print("iStg ", iStg) thisLogSaha = logSahaFac - logNe - (boltzFacI[iStg-1] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg] - thisLogUw[iStg-1] #saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg]) logNums[iStg][id] = logNums[iStg-1][id] + thisLogSaha #//id loop return logNums; #//end method stagePops #def sahaRHS(chiI, log10UwUArr, log10UwLArr, temp): def sahaRHS(chiI, logUwU, logUwL, temp): """RHS of partial pressure formulation of Saha equation in standard form (N_U*P_e/N_L on LHS) // Returns depth distribution of LHS: Phi(T) === N_U*P_e/N_L (David Gray notation) // Input parameters: // chiI - ground state ionization energy of lower stage // log10UwUArr, log10UwLArr - array of temperature-dependent partition function for upper and lower ionization stage // Also needs atsmopheric structure information: // numDeps // temp structure // // Atomic element "A" is the one whose ionization fractions are being computed // Element "B" refers to array of other species with which A forms molecules "AB" """ ln10 = math.log(10.0) logE = math.log10(math.e) #// for debug output log2pi = math.log(2.0 * math.pi) log2 = math.log(2.0) #// var numMols = dissEArr.length; #// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us #// Convert to natural logs: #double Ttheta, thisTemp; #//Default initializations: #//We need one more stage in size of saha factor than number of stages we're actualy populating thisLogUwU = 0.0 thisLogUwL = 0.0 logE10 = math.log(10.0) #//We need one more stage in size of saha factor than number of stages we're actualy populating #logUwU = [0.0 for i in range(5)] #logUwL = [0.0 for i in range(5)] for kk in range(len(logUwL)): logUwU[kk] = logUwL[kk] # logUwL[kk] = logE10*log10UwLArr[kk] #//System.out.println("chiL before: " + chiL); #// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs! #//atomic ionization stage Boltzmann factors: #double logChiI, logBoltzFacI; #double boltzFacI; logChiI = math.log(chiI) + Useful.logEv() logBoltzFacI = logChiI - Useful.logK() boltzFacI = math.exp(logBoltzFacI) #//Extra factor of k to get k^5/2 in the P_e formulation of Saha Eq. logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) + Useful.logK() #//double[] logLHS = new double[numDeps]; #double logLHS; #// For atomic ionization stages: #double logSaha, saha, expFac; #// for (int id = 0; id < numDeps; id++) { #// #//Determine temperature dependent partition functions Uw: thisTemp = temp[0] #Ttheta = 5040.0 / thisTemp """ if (Ttheta >= 1.0): thisLogUwU = logUwU[0] thisLogUwL = logUwL[0] if (Ttheta <= 0.5): thisLogUwU = logUwU[1] thisLogUwL = logUwL[1] if (Ttheta > 0.5 and Ttheta < 1.0): thisLogUwU = ( logUwU[1] * (Ttheta - 0.5)/(1.0 - 0.5) ) + ( logUwU[0] * (1.0 - Ttheta)/(1.0 - 0.5) ) thisLogUwL = ( logUwL[1] * (Ttheta - 0.5)/(1.0 - 0.5) ) + ( logUwL[0] * (1.0 - Ttheta)/(1.0 - 0.5) ) """ #JB# currentUwUArr=list(logUwU)#u(T) determined values UwUFit = ToolBox.cubicFit(masterTemp,currentUwUArr)#u(T) fit thisLogUwU = ToolBox.valueFromFit(UwUFit,thisTemp)#u(T) value extrapolated currentUwLArr=list(logUwL)#u(T) determined values UwLFit = ToolBox.cubicFit(masterTemp,currentUwLArr)#u(T) fit thisLogUwL = ToolBox.valueFromFit(UwLFit,thisTemp)#u(T) value extrapolated #JB# #will need to do this one in Main as it goes through its own loop of temp #if thisTemp == superTemp[0][len(superTemp[0])]: # uwu.append(UwUFit) # uwl.append(UwLFit) # #JB# if (thisTemp <= 130.0): thisLogUwU = logUwU[0] thisLogUwL = logUwL[0] if (thisTemp >= 10000.0): thisLogUwU = logUwU[4] thisLogUwL = logUwL[4] """ if (thisTemp > 130 and thisTemp <= 500): thisLogUwU = logUwU[1] * (thisTemp - 130)/(500 - 130) \ + logUwU[0] * (500 - thisTemp)/(500 - 130) thisLogUwL = logUwL[1] * (thisTemp - 130)/(500 - 130) \ + logUwL[0] * (500 - thisTemp)/(500 - 130) if (thisTemp > 500 and thisTemp <= 3000): thisLogUwU = logUwU[2] * (thisTemp - 500)/(3000 - 500) \ + logUwU[1] * (3000 - thisTemp)/(3000 - 500) thisLogUwL = logUwL[2] * (thisTemp - 500)/(3000 - 500) \ + logUwL[1] * (3000 - thisTemp)/(3000 - 500) if (thisTemp > 3000 and thisTemp <= 8000): thisLogUwU = logUwU[3] * (thisTemp - 3000)/(8000 - 3000) \ + logUwU[2] * (8000 - thisTemp)/(8000 - 3000) thisLogUwL = logUwL[3] * (thisTemp - 3000)/(8000 - 3000) \ + logUwL[2] * (8000 - thisTemp)/(8000 - 3000) if (thisTemp > 8000 and thisTemp < 10000): thisLogUwU = logUwU[4] * (thisTemp - 8000)/(10000 - 8000) \ + logUwU[3] * (10000 - thisTemp)/(10000 - 8000) thisLogUwL = logUwL[4] * (thisTemp - 8000)/(10000 - 8000) \ + logUwL[3] * (10000 - thisTemp)/(10000 - 8000) if (thisTemp >= 10000): thisLogUwU = logUwU[4] thisLogUwL = logUwL[4] """ #//Ionization stage Saha factors: #//Need T_kin^5/2 in the P_e formulation of Saha Eq. logSaha = logSahaFac - (boltzFacI /temp[0]) + (5.0 * temp[1] / 2.0) + thisLogUwU - thisLogUwL #// saha = Math.exp(logSaha); #//logLHS[id] = logSaha; logLHS = logSaha; #// } //id loop return logLHS; #JB #return [logLHS,[[UwUFit,thisLogUwU],[UwLFit,thisLogUwL]]] #// # } //end method sahaRHS #def molPops(nmrtrLogNumB, nmrtrDissE, log10UwA, nmrtrLog10UwB, nmrtrLogQwAB, nmrtrLogMuAB, \ # numMolsB, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \ # logGroundRatio, numDeps, temp): def molPops(nmrtrLogNumB, nmrtrDissE, logUwA, nmrtrLogUwB, nmrtrLogQwAB, nmrtrLogMuAB, \ numMolsB, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \ logGroundRatio, numDeps, temp): # line 1: //species A data - ionization equilibrium of A # //data for set of species "B" - molecular equlibrium for set {AB} """Diatomic molecular equilibrium routine that accounts for molecule formation: // Returns depth distribution of molecular population // Input parameters: // logNum - array with depth-dependent total element number densities (cm^-3) // chiI1 - ground state ionization energy of neutral stage // chiI2 - ground state ionization energy of singly ionized stage // Also needs atsmopheric structure information: // numDeps // temp structure // rho structure // // Atomic element "A" is the one kept on the LHS of the master fraction, whose ionization fractions are included // in the denominator of the master fraction // Element "B" refers to array of other sintpecies with which A forms molecules "AB" """ logE = math.log10(math.e) #// for debug output #//System.out.println("molPops: nmrtrDissE " + nmrtrDissE + " log10UwA " + log10UwA[0] + " " + log10UwA[1] + " nmrtrLog10UwB " + #// nmrtrLog10UwB[0] + " " + nmrtrLog10UwB[1] + " nmrtrLog10QwAB " + logE*nmrtrLogQwAB[2] + " nmrtrLogMuAB " + logE*nmrtrLogMuAB #// + " numMolsB " + numMolsB + " dissEArr " + dissEArr[0] + " log10UwBArr " + log10UwBArr[0][0] + " " + log10UwBArr[0][1] + " log10QwABArr " + #// logE*logQwABArr[0][2] + " logMuABArr " + logE*logMuABArr[0]); #//System.out.println("Line: nmrtrLog10UwB[0] " + logE*nmrtrLog10UwB[0] + " nmrtrLog10UwB[1] " + logE*nmrtrLog10UwB[1]); ln10 = math.log(10.0) log2pi = math.log(2.0 * math.pi) log2 = math.log(2.0) logE10 = math.log(10.0) #// Convert to natural logs: #double Ttheta, thisTemp; #//Treat at least one molecule - if there are really no molecules for an atomic species, #//there will be one phantom molecule in the denominator of the ionization fraction #//with an impossibly high dissociation energy if (numMolsB == 0): numMolsB = 1 #//This should be inherited, but let's make sure: dissEArr[0] = 29.0 #//eV #//var molPops = function(logNum, numeratorLogNumB, numeratorDissE, numeratorLog10UwA, numeratorLog10QwAB, numeratorLogMuAB, //species A data - ionization equilibrium of A #//Molecular partition functions - default initialization: thisLogUwB = [0.0 for i in range(numMolsB)] for iMol in range(numMolsB): thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B thisLogUwA = 0.0 #// element A nmrtrThisLogUwB = 0.0 #// element A thisLogQwAB = math.log(300.0) nmrtrThisLogQwAB = math.log(300.0) #//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A #// for molecule formation: #logUwA = [0.0 for i in range(5)] #nmrtrLogUwB = [0.0 for i in range(5)] #for kk in range(len(logUwA)): #logUwA[kk] = logE10*log10UwA[kk] #nmrtrLogUwB[kk] = logE10*nmrtrLog10UwB[kk] #// lburns #// Array of elements B for all molecular species AB: #double[][] logUwB = new double[numMolsB][2]; #logUwB = [ [ 0.0 for i in range(5) ] for j in range(numMolsB) ] #//if (numMolsB > 0){ #for iMol in range(numMolsB): # for kk in range(5): # logUwB[iMol][kk] = logE10*log10UwBArr[iMol][kk] # // lburns new loop #//} #// Molecular partition functions: #// double nmrtrLogQwAB = logE10*nmrtrLog10QwAB; #// double[] logQwAB = new double[numMolsB]; #// //if (numMolsB > 0){ #// for (int iMol = 0; iMol < numMolsB; iMol++){ #// logQwAB[iMol] = logE10*log10QwABArr[iMol]; #// } # //} #//Molecular dissociation Boltzmann factors: nmrtrBoltzFacIAB = 0.0 nmrtrLogMolSahaFac = 0.0 logDissE = math.log(nmrtrDissE) + Useful.logEv() #//System.out.println("logDissE " + logE*logDissE) logBoltzFacIAB = logDissE - Useful.logK() #//System.out.println("logBoltzFacIAB " + logE*logBoltzFacIAB); nmrtrBoltzFacIAB = math.exp(logBoltzFacIAB) nmrtrLogMolSahaFac = (3.0 / 2.0) * (log2pi + nmrtrLogMuAB + Useful.logK() - 2.0 * Useful.logH()) #//System.out.println("nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac); #//System.out.println("nmrtrDissE " + nmrtrDissE + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrLogMuAB " + logE*nmrtrLogMuAB + " nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac); boltzFacIAB = [0.0 for i in range(numMolsB)] logMolSahaFac = [0.0 for i in range(numMolsB)] #//if (numMolsB > 0){ for iMol in range(numMolsB): logDissE = math.log(dissEArr[iMol]) + Useful.logEv() logBoltzFacIAB = logDissE - Useful.logK() boltzFacIAB[iMol] = math.exp(logBoltzFacIAB) logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH()) #//System.out.println("logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]); #//System.out.println("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]); #//double[] logNums = new double[numDeps] #//} #// For molecular species: #double nmrtrSaha, nmrtrLogSahaMol, nmrtrLogInvSahaMol; //, nmrtrInvSahaMol; logMolFrac = [0.0 for i in range(numDeps)] logSahaMol = [0.0 for i in range(numMolsB)] invSahaMol = [0.0 for i in range(numMolsB)] #JB# currentUwAArr=list(logUwA)#u(T) determined values UwAFit = ToolBox.cubicFit(masterTemp, currentUwAArr)#u(T) fit nmrtrLogUwBArr=list(nmrtrLogUwB)#u(T) determined values nmrtrLogUwBFit = ToolBox.cubicFit(masterTemp, nmrtrLogUwBArr)#u(T) fit #uwa.append(UwAFit) #uwb.append(nmrtrLogUwBFit) uwbFits=[] qwabFit = [] for iMol in range(numMolsB): currentUwBArr=list(logUwB[iMol]) UwBFit = ToolBox.cubicFit(masterTemp, currentUwBArr) uwbFits.append(UwBFit) currentLogQwABArr=list(logQwABArr[iMol])#u(T) determined values QwABFit = ToolBox.cubicFit(masterTemp, currentLogQwABArr)#u(T) fit qwabFit.append(QwABFit) #nmrtrQwABArr=list(nmrtrLogQwAB)#u(T) determined values #nmrtrQwABFit = ToolBox.cubicFit(masterTemp, nmrtrQwABArr)#u(T) fit #for Mols in range(numMolsB): # currentLogUwBArr=list(logUwB[Mols])#u(T) determined values # UwBFit=cubicFit(masterTemp,currentLogUwBArr)#u(T) fit #JB# #// temps=[] #valb=[] #vala=[] #valnb=[] #valqab=[] #valnmrtrqwb=[] #// System.out.println("molPops: id nmrtrLogNumB logNumBArr[0] logGroundRatio"); for id in range(numDeps): #//System.out.format("%03d, %21.15f, %21.15f, %21.15f, %n", id, logE*nmrtrLogNumB[id], logE*logNumB[0][id], logE*logGroundRatio[id]); #//// reduce or enhance number density by over-all Rosseland opcity scale parameter #//Determine temparature dependent partition functions Uw: thisTemp = temp[0][id] temps.append(thisTemp) #Ttheta = 5040.0 / thisTemp """ if (Ttheta >= 1.0): thisLogUwA = logUwA[0] nmrtrThisLogUwB = nmrtrLogUwB[0] for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][0] if (Ttheta <= 0.5): thisLogUwA = logUwA[1] nmrtrThisLogUwB = nmrtrLogUwB[1] for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][1] if (Ttheta > 0.5 and Ttheta < 1.0): thisLogUwA = ( logUwA[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \ + ( logUwA[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) ) nmrtrThisLogUwB = ( nmrtrLogUwB[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \ + ( nmrtrLogUwB[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) ) for iMol in range(numMolsB): thisLogUwB[iMol] = ( logUwB[iMol][1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \ + ( logUwB[iMol][0] * ((1.0 - Ttheta)/(1.0 - 0.5)) ) """ #JB# thisLogUwA = float(ToolBox.valueFromFit(UwAFit,thisTemp))#u(T) value extrapolated #vala.append(thisLogUwA) nmrtrThisLogUwB = float(ToolBox.valueFromFit(nmrtrLogUwBFit,thisTemp))#u(T) value extrapolated #valnb.append(nmrtrThisLogUwB) #for iMol in range(numMolsB): # thisLogUwB[iMol]=logUwB[iMol] for iMol in range(numMolsB): thisLogUwB[iMol] = ToolBox.valueFromFit(uwbFits[iMol],thisTemp)#u(T) value extrapolated #valb.append(thisLogUwB[iMol]) #// NEW Determine temperature dependent partition functions Uw: lburns thisTemp = temp[0][id] if (thisTemp <= 130.0): thisLogUwA = logUwA[0] nmrtrThisLogUwB = nmrtrLogUwB[0] for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][0] if (thisTemp >= 10000.0): thisLogUwA = logUwA[4] nmrtrThisLogUwB = nmrtrLogUwB[4] for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][4] """ if (thisTemp > 130 and thisTemp <= 500): thisLogUwA = logUwA[1] * (thisTemp - 130)/(500 - 130) \ + logUwA[0] * (500 - thisTemp)/(500 - 130) nmrtrThisLogUwB = nmrtrLogUwB[1] * (thisTemp - 130)/(500 - 130) \ + nmrtrLogUwB[0] * (500 - thisTemp)/(500 - 130) for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][1] * (thisTemp - 130)/(500 - 130) \ + logUwB[iMol][0] * (500 - thisTemp)/(500 - 130) if (thisTemp > 500 and thisTemp <= 3000): thisLogUwA = logUwA[2] * (thisTemp - 500)/(3000 - 500) \ + logUwA[1] * (3000 - thisTemp)/(3000 - 500) nmrtrThisLogUwB = nmrtrLogUwB[2] * (thisTemp - 500)/(3000 - 500) \ + nmrtrLogUwB[1] * (3000 - thisTemp)/(3000 - 500) for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][2] * (thisTemp - 500)/(3000 - 500) \ + logUwB[iMol][1] * (3000 - thisTemp)/(3000 - 500) if (thisTemp > 3000 and thisTemp <= 8000): thisLogUwA = logUwA[3] * (thisTemp - 3000)/(8000 - 3000) \ + logUwA[2] * (8000 - thisTemp)/(8000 - 3000) nmrtrThisLogUwB = nmrtrLogUwB[3] * (thisTemp - 3000)/(8000 - 3000) \ + nmrtrLogUwB[2] * (8000 - thisTemp)/(8000 - 3000) for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][3] * (thisTemp - 3000)/(8000 - 3000) \ + logUwB[iMol][2] * (8000 - thisTemp)/(8000 - 3000) if (thisTemp > 8000 and thisTemp < 10000): thisLogUwA = logUwA[4] * (thisTemp - 8000)/(10000 - 8000) \ + logUwA[3] * (10000 - thisTemp)/(10000 - 8000) nmrtrThisLogUwB = nmrtrLogUwB[4] * (thisTemp - 8000)/(10000 - 8000) \ + nmrtrLogUwB[3] * (10000 - thisTemp)/(10000 - 8000) for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][4] * (thisTemp - 8000)/(10000 - 8000) \ + logUwB[iMol][3] * (10000 - thisTemp)/(10000 - 8000) if (thisTemp >= 10000): thisLogUwA = logUwA[4] nmrtrThisLogUwB = nmrtrLogUwB[4] for iMol in range(numMolsB): thisLogUwB[iMol] = logUwB[iMol][4] """ #iMol loops for Q's for iMol in range(numMolsB): if (thisTemp < 3000.0): thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \ + ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) ) if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ): thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \ + ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) ) if ( thisTemp > 8000.0 ): thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \ + ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) ) if (thisTemp < 3000.0): nmrtrThisLogQwAB = ( nmrtrLogQwAB[1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \ + ( nmrtrLogQwAB[2] * (thisTemp - 500.0)/(3000.0 - 500.0) ) if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ): nmrtrThisLogQwAB = ( nmrtrLogQwAB[2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \ + ( nmrtrLogQwAB[3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) ) if ( thisTemp > 8000.0 ): nmrtrThisLogQwAB = ( nmrtrLogQwAB[3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \ + ( nmrtrLogQwAB[4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) ) #//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A #// for molecule formation: # //Ionization stage Saha factors: #//System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id]); # // if (id == 16){ # // System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id] + " pp nmrtB " + (logE*(nmrtrLogNumB[id]+temp[1][id]+Useful.logK())) + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrLogQwAB " + logE*nmrtrThisLogQwAB); # //System.out.println("nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + logE*nmrtrThisLogQwAB); # // } nmrtrLogSahaMol = nmrtrLogMolSahaFac - nmrtrLogNumB[id] - (nmrtrBoltzFacIAB / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + nmrtrThisLogUwB + thisLogUwA - nmrtrThisLogQwAB nmrtrLogInvSahaMol = -1.0 * nmrtrLogSahaMol #//System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol); #//nmrtrInvSahaMol = Math.exp(nmrtrLogSahaMol); #// if (id == 16){ #// System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol); #// } #// if (id == 16){ #// System.out.println("nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + nmrtrThisLogQwAB); #// System.out.println("nmrtrLogSahaMol " + logE*nmrtrLogSahaMol); // + " nmrtrInvSahaMol " + nmrtrInvSahaMol); #// } #//Molecular Saha factors: for iMol in range(numMolsB): #//System.out.println("iMol " + iMol + " id " + id + " logNumB[iMol][id] " + logE*nmrtrLogNumB[id]); #//System.out.println("iMol " + iMol + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " thisLogUwA " + logE*thisLogUwA + " thisLogQwAB " + logE*thisLogQwAB); logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + float(thisLogUwB[iMol]) + thisLogUwA - thisLogQwAB #//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI): logSahaMol[iMol] = -1.0 * logSahaMol[iMol] invSahaMol[iMol] = math.exp(logSahaMol[iMol]) #//TEST invSahaMol[iMol] = 1.0e-99; //test #// if (id == 16){ #// System.out.println("iMol " + iMol + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " logQwAB[iMol] " + logE*thisLogQwAB + " logNumB[iMol][id] " + logE*logNumB[iMol][id] + " logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]); #// System.out.println("iMol " + iMol + " logSahaMol " + logE*logSahaMol[iMol] + " invSahaMol[iMol] " + invSahaMol[iMol]); #// } #//Compute log of denominator is ionization fraction, f_stage # //default initialization # // - ratio of total atomic particles in all ionization stages to number in ground state: denominator = math.exp(logGroundRatio[id]) #//default initialization - ratio of total atomic particles in all ionization stages to number in ground state #//molecular contribution for iMol in range(numMolsB): #// if (id == 16){ #// System.out.println("invSahaMol[iMol] " + invSahaMol[iMol] + " denominator " + denominator); #// } denominator = denominator + invSahaMol[iMol] #// logDenominator = math.log(denominator) #//System.out.println("logGroundRatio[id] " + logE*logGroundRatio[id] + " logDenominator " + logE*logDenominator); #// if (id == 16){ #// System.out.println("id " + id + " logGroundRatio " + logGroundRatio[id] + " logDenominator " + logDenominator); #// } #//if (id == 36){ #// System.out.println("logDenominator " + logE*logDenominator); #// } #//var logDenominator = Math.log( 1.0 + saha21 + (saha32 * saha21) + (saha43 * saha32 * saha21) + (saha54 * saha43 * saha32 * saha21) ); logMolFrac[id] = nmrtrLogInvSahaMol - logDenominator #// if (id == 16){ #// System.out.println("id " + id + " logMolFrac[id] " + logE*logMolFrac[id]); #// } #//logNums[id] = logNum[id] + logMolFrac; #} //id loop #JB - check (never used)# #print(uwa) #print(uwb) #title("logUwA") """ plot(temps,vala) tempT=[] for t in masterTemp: tempT.append(valueFromFit(UwAFit,t)) scatter(masterTemp,(tempT)) show() #title("nmrtrlogUwB") plot(temps,valnb) tempT=[] for t in masterTemp: tempT.append(valueFromFit(nmrtrLogUwBFit,t)) scatter(masterTemp,(tempT)) show() #title("logUwB") plot(temps,valb) tempT=[] for t in masterTemp: tempT.append(valueFromFit(UwBFit,t)) scatter(masterTemp,(tempT)) show() #title("logQwAB") plot(temps,valqab) tempT=[] for t in masterTemp: tempT.append(valueFromFit(QwABFit,t)) scatter(masterTemp,(tempT)) show() #title("nmrtrlogQwAB") plot(temps,valnmrtrqwb) tempT=[] for t in masterTemp: tempT.append(valueFromFit(nmrtrQwABFit,t)) scatter(masterTemp,(tempT)) show() """ #JB# return logMolFrac #//end method stagePops
mit
joshloyal/scikit-learn
benchmarks/bench_plot_lasso_path.py
84
4005
"""Benchmarks of Lasso regularization path computation using Lars and CD The input data is mostly low rank but is a fat infinite tail. """ from __future__ import print_function from collections import defaultdict import gc import sys from time import time import numpy as np from sklearn.linear_model import lars_path from sklearn.linear_model import lasso_path from sklearn.datasets.samples_generator import make_regression def compute_bench(samples_range, features_range): it = 0 results = defaultdict(lambda: []) max_it = len(samples_range) * len(features_range) for n_samples in samples_range: for n_features in features_range: it += 1 print('====================') print('Iteration %03d of %03d' % (it, max_it)) print('====================') dataset_kwargs = { 'n_samples': n_samples, 'n_features': n_features, 'n_informative': n_features / 10, 'effective_rank': min(n_samples, n_features) / 10, #'effective_rank': None, 'bias': 0.0, } print("n_samples: %d" % n_samples) print("n_features: %d" % n_features) X, y = make_regression(**dataset_kwargs) gc.collect() print("benchmarking lars_path (with Gram):", end='') sys.stdout.flush() tstart = time() G = np.dot(X.T, X) # precomputed Gram matrix Xy = np.dot(X.T, y) lars_path(X, y, Xy=Xy, Gram=G, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (with Gram)'].append(delta) gc.collect() print("benchmarking lars_path (without Gram):", end='') sys.stdout.flush() tstart = time() lars_path(X, y, method='lasso') delta = time() - tstart print("%0.3fs" % delta) results['lars_path (without Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (with Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=True) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (with Gram)'].append(delta) gc.collect() print("benchmarking lasso_path (without Gram):", end='') sys.stdout.flush() tstart = time() lasso_path(X, y, precompute=False) delta = time() - tstart print("%0.3fs" % delta) results['lasso_path (without Gram)'].append(delta) return results if __name__ == '__main__': from mpl_toolkits.mplot3d import axes3d # register the 3d projection import matplotlib.pyplot as plt samples_range = np.linspace(10, 2000, 5).astype(np.int) features_range = np.linspace(10, 2000, 5).astype(np.int) results = compute_bench(samples_range, features_range) max_time = max(max(t) for t in results.values()) fig = plt.figure('scikit-learn Lasso path benchmark results') i = 1 for c, (label, timings) in zip('bcry', sorted(results.items())): ax = fig.add_subplot(2, 2, i, projection='3d') X, Y = np.meshgrid(samples_range, features_range) Z = np.asarray(timings).reshape(samples_range.shape[0], features_range.shape[0]) # plot the actual surface ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8) # dummy point plot to stick the legend to since surface plot do not # support legends (yet?) # ax.plot([1], [1], [1], color=c, label=label) ax.set_xlabel('n_samples') ax.set_ylabel('n_features') ax.set_zlabel('Time (s)') ax.set_zlim3d(0.0, max_time * 1.1) ax.set_title(label) # ax.legend() i += 1 plt.show()
bsd-3-clause
kyoren/https-github.com-h2oai-h2o-3
py2/h2o_gbm.py
30
16328
import re, random, math import h2o_args import h2o_nodes import h2o_cmd from h2o_test import verboseprint, dump_json, check_sandbox_for_errors def plotLists(xList, xLabel=None, eListTitle=None, eList=None, eLabel=None, fListTitle=None, fList=None, fLabel=None, server=False): if h2o_args.python_username!='kevin': return # Force matplotlib to not use any Xwindows backend. if server: import matplotlib matplotlib.use('Agg') import pylab as plt print "xList", xList print "eList", eList print "fList", fList font = {'family' : 'normal', 'weight' : 'normal', 'size' : 26} ### plt.rc('font', **font) plt.rcdefaults() if eList: if eListTitle: plt.title(eListTitle) plt.figure() plt.plot (xList, eList) plt.xlabel(xLabel) plt.ylabel(eLabel) plt.draw() plt.savefig('eplot.jpg',format='jpg') # Image.open('testplot.jpg').save('eplot.jpg','JPEG') if fList: if fListTitle: plt.title(fListTitle) plt.figure() plt.plot (xList, fList) plt.xlabel(xLabel) plt.ylabel(fLabel) plt.draw() plt.savefig('fplot.jpg',format='jpg') # Image.open('fplot.jpg').save('fplot.jpg','JPEG') if eList or fList: plt.show() # pretty print a cm that the C def pp_cm(jcm, header=None): # header = jcm['header'] # hack col index header for now..where do we get it? header = ['"%s"'%i for i in range(len(jcm[0]))] # cm = ' '.join(header) cm = '{0:<8}'.format('') for h in header: cm = '{0}|{1:<8}'.format(cm, h) cm = '{0}|{1:<8}'.format(cm, 'error') c = 0 for line in jcm: lineSum = sum(line) if c < 0 or c >= len(line): raise Exception("Error in h2o_gbm.pp_cm. c: %s line: %s len(line): %s jcm: %s" % (c, line, len(line), dump_json(jcm))) print "c:", c, "line:", line errorSum = lineSum - line[c] if (lineSum>0): err = float(errorSum) / lineSum else: err = 0.0 fl = '{0:<8}'.format(header[c]) for num in line: fl = '{0}|{1:<8}'.format(fl, num) fl = '{0}|{1:<8.2f}'.format(fl, err) cm = "{0}\n{1}".format(cm, fl) c += 1 return cm def pp_cm_summary(cm): # hack cut and past for now (should be in h2o_gbm.py? scoresList = cm totalScores = 0 totalRight = 0 # individual scores can be all 0 if nothing for that output class # due to sampling classErrorPctList = [] predictedClassDict = {} # may be missing some? so need a dict? for classIndex,s in enumerate(scoresList): classSum = sum(s) if classSum == 0 : # why would the number of scores for a class be 0? # in any case, tolerate. (it shows up in test.py on poker100) print "classIndex:", classIndex, "classSum", classSum, "<- why 0?" else: if classIndex >= len(s): print "Why is classindex:", classIndex, 'for s:"', s else: # H2O should really give me this since it's in the browser, but it doesn't classRightPct = ((s[classIndex] + 0.0)/classSum) * 100 totalRight += s[classIndex] classErrorPct = 100 - classRightPct classErrorPctList.append(classErrorPct) ### print "s:", s, "classIndex:", classIndex print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct # gather info for prediction summary for pIndex,p in enumerate(s): if pIndex not in predictedClassDict: predictedClassDict[pIndex] = p else: predictedClassDict[pIndex] += p totalScores += classSum print "Predicted summary:" # FIX! Not sure why we weren't working with a list..hack with dict for now for predictedClass,p in predictedClassDict.items(): print str(predictedClass)+":", p # this should equal the num rows in the dataset if full scoring? (minus any NAs) print "totalScores:", totalScores print "totalRight:", totalRight if totalScores != 0: pctRight = 100.0 * totalRight/totalScores else: pctRight = 0.0 print "pctRight:", "%5.2f" % pctRight pctWrong = 100 - pctRight print "pctWrong:", "%5.2f" % pctWrong return pctWrong # I just copied and changed GBM to GBM. Have to update to match GBM params and responses def pickRandGbmParams(paramDict, params): colX = 0 randomGroupSize = random.randint(1,len(paramDict)) for i in range(randomGroupSize): randomKey = random.choice(paramDict.keys()) randomV = paramDict[randomKey] randomValue = random.choice(randomV) params[randomKey] = randomValue # compare this glm to last one. since the files are concatenations, # the results should be similar? 10% of first is allowed delta def compareToFirstGbm(self, key, glm, firstglm): # if isinstance(firstglm[key], list): # in case it's not a list allready (err is a list) verboseprint("compareToFirstGbm key:", key) verboseprint("compareToFirstGbm glm[key]:", glm[key]) # key could be a list or not. if a list, don't want to create list of that list # so use extend on an empty list. covers all cases? if type(glm[key]) is list: kList = glm[key] firstkList = firstglm[key] elif type(glm[key]) is dict: raise Exception("compareToFirstGLm: Not expecting dict for " + key) else: kList = [glm[key]] firstkList = [firstglm[key]] for k, firstk in zip(kList, firstkList): # delta must be a positive number ? delta = .1 * abs(float(firstk)) msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg) self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current") def goodXFromColumnInfo(y, num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None, colTypeDict=None, colNameDict=None, keepPattern=None, key=None, timeoutSecs=120, forRF=False, noPrint=False): y = str(y) # if we pass a key, means we want to get the info ourselves here if key is not None: (missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \ h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False, max_column_display=99999999, timeoutSecs=timeoutSecs) num_cols = len(colNameDict) # now remove any whose names don't match the required keepPattern if keepPattern is not None: keepX = re.compile(keepPattern) else: keepX = None x = range(num_cols) # need to walk over a copy, cause we change x xOrig = x[:] ignore_x = [] # for use by RF for k in xOrig: name = colNameDict[k] # remove it if it has the same name as the y output if str(k)== y: # if they pass the col index as y if not noPrint: print "Removing %d because name: %s matches output %s" % (k, str(k), y) x.remove(k) # rf doesn't want it in ignore list # ignore_x.append(k) elif name == y: # if they pass the name as y if not noPrint: print "Removing %d because name: %s matches output %s" % (k, name, y) x.remove(k) # rf doesn't want it in ignore list # ignore_x.append(k) elif keepX is not None and not keepX.match(name): if not noPrint: print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern) x.remove(k) ignore_x.append(k) # missing values reports as constant also. so do missing first. # remove all cols with missing values # could change it against num_rows for a ratio elif k in missingValuesDict: value = missingValuesDict[k] if not noPrint: print "Removing %d with name: %s because it has %d missing values" % (k, name, value) x.remove(k) ignore_x.append(k) elif k in constantValuesDict: value = constantValuesDict[k] if not noPrint: print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value)) x.remove(k) ignore_x.append(k) # this is extra pruning.. # remove all cols with enums, if not already removed elif k in enumSizeDict: value = enumSizeDict[k] if not noPrint: print "Removing %d %s because it has enums of size: %d" % (k, name, value) x.remove(k) ignore_x.append(k) if not noPrint: print "x has", len(x), "cols" print "ignore_x has", len(ignore_x), "cols" x = ",".join(map(str,x)) ignore_x = ",".join(map(str,ignore_x)) if not noPrint: print "\nx:", x print "\nignore_x:", ignore_x if forRF: return ignore_x else: return x def showGBMGridResults(GBMResult, expectedErrorMax, classification=True): # print "GBMResult:", dump_json(GBMResult) jobs = GBMResult['jobs'] print "GBM jobs:", jobs for jobnum, j in enumerate(jobs): _distribution = j['_distribution'] model_key = j['destination_key'] job_key = j['job_key'] # inspect = h2o_cmd.runInspect(key=model_key) # print "jobnum:", jobnum, dump_json(inspect) gbmTrainView = h2o_cmd.runGBMView(model_key=model_key) print "jobnum:", jobnum, dump_json(gbmTrainView) if classification: cms = gbmTrainView['gbm_model']['cms'] cm = cms[-1]['_arr'] # take the last one print "GBM cms[-1]['_predErr']:", cms[-1]['_predErr'] print "GBM cms[-1]['_classErr']:", cms[-1]['_classErr'] pctWrongTrain = pp_cm_summary(cm); if pctWrongTrain > expectedErrorMax: raise Exception("Should have < %s error here. pctWrongTrain: %s" % (expectedErrorMax, pctWrongTrain)) errsLast = gbmTrainView['gbm_model']['errs'][-1] print "\nTrain", jobnum, job_key, "\n==========\n", "pctWrongTrain:", pctWrongTrain, "errsLast:", errsLast print "GBM 'errsLast'", errsLast print pp_cm(cm) else: print "\nTrain", jobnum, job_key, "\n==========\n", "errsLast:", errsLast print "GBMTrainView errs:", gbmTrainView['gbm_model']['errs'] def simpleCheckGBMView(node=None, gbmv=None, noPrint=False, **kwargs): if not node: node = h2o_nodes.nodes[0] if 'warnings' in gbmv: warnings = gbmv['warnings'] # catch the 'Failed to converge" for now for w in warnings: if not noPrint: print "\nwarning:", w if ('Failed' in w) or ('failed' in w): raise Exception(w) if 'cm' in gbmv: cm = gbmv['cm'] # only one else: if 'gbm_model' in gbmv: gbm_model = gbmv['gbm_model'] else: raise Exception("no gbm_model in gbmv? %s" % dump_json(gbmv)) cms = gbm_model['cms'] print "number of cms:", len(cms) print "FIX! need to add reporting of h2o's _perr per class error" # FIX! what if regression. is rf only classification? print "cms[-1]['_arr']:", cms[-1]['_arr'] print "cms[-1]['_predErr']:", cms[-1]['_predErr'] print "cms[-1]['_classErr']:", cms[-1]['_classErr'] ## print "cms[-1]:", dump_json(cms[-1]) ## for i,c in enumerate(cms): ## print "cm %s: %s" % (i, c['_arr']) cm = cms[-1]['_arr'] # take the last one scoresList = cm used_trees = gbm_model['N'] errs = gbm_model['errs'] print "errs[0]:", errs[0] print "errs[-1]:", errs[-1] print "errs:", errs # if we got the ntree for comparison. Not always there in kwargs though! param_ntrees = kwargs.get('ntrees',None) if (param_ntrees is not None and used_trees != param_ntrees): raise Exception("used_trees should == param_ntree. used_trees: %s" % used_trees) if (used_trees+1)!=len(cms) or (used_trees+1)!=len(errs): raise Exception("len(cms): %s and len(errs): %s should be one more than N %s trees" % (len(cms), len(errs), used_trees)) totalScores = 0 totalRight = 0 # individual scores can be all 0 if nothing for that output class # due to sampling classErrorPctList = [] predictedClassDict = {} # may be missing some? so need a dict? for classIndex,s in enumerate(scoresList): classSum = sum(s) if classSum == 0 : # why would the number of scores for a class be 0? does GBM CM have entries for non-existent classes # in a range??..in any case, tolerate. (it shows up in test.py on poker100) if not noPrint: print "class:", classIndex, "classSum", classSum, "<- why 0?" else: # H2O should really give me this since it's in the browser, but it doesn't classRightPct = ((s[classIndex] + 0.0)/classSum) * 100 totalRight += s[classIndex] classErrorPct = round(100 - classRightPct, 2) classErrorPctList.append(classErrorPct) ### print "s:", s, "classIndex:", classIndex if not noPrint: print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct # gather info for prediction summary for pIndex,p in enumerate(s): if pIndex not in predictedClassDict: predictedClassDict[pIndex] = p else: predictedClassDict[pIndex] += p totalScores += classSum #**************************** if not noPrint: print "Predicted summary:" # FIX! Not sure why we weren't working with a list..hack with dict for now for predictedClass,p in predictedClassDict.items(): print str(predictedClass)+":", p # this should equal the num rows in the dataset if full scoring? (minus any NAs) print "totalScores:", totalScores print "totalRight:", totalRight if totalScores != 0: pctRight = 100.0 * totalRight/totalScores else: pctRight = 0.0 pctWrong = 100 - pctRight print "pctRight:", "%5.2f" % pctRight print "pctWrong:", "%5.2f" % pctWrong #**************************** # more testing for GBMView # it's legal to get 0's for oobe error # if sample_rate = 1 sample_rate = kwargs.get('sample_rate', None) validation = kwargs.get('validation', None) if (sample_rate==1 and not validation): pass elif (totalScores<=0 or totalScores>5e9): raise Exception("scores in GBMView seems wrong. scores:", scoresList) varimp = gbm_model['varimp'] treeStats = gbm_model['treeStats'] if not treeStats: raise Exception("treeStats not right?: %s" % dump_json(treeStats)) # print "json:", dump_json(gbmv) data_key = gbm_model['_dataKey'] model_key = gbm_model['_key'] classification_error = pctWrong if not noPrint: if 'minLeaves' not in treeStats or not treeStats['minLeaves']: raise Exception("treeStats seems to be missing minLeaves %s" % dump_json(treeStats)) print """ Leaves: {0} / {1} / {2} Depth: {3} / {4} / {5} Err: {6:0.2f} % """.format( treeStats['minLeaves'], treeStats['meanLeaves'], treeStats['maxLeaves'], treeStats['minDepth'], treeStats['meanDepth'], treeStats['maxDepth'], classification_error, ) ### modelInspect = node.inspect(model_key) dataInspect = h2o_cmd.runInspect(key=data_key) check_sandbox_for_errors() return (round(classification_error,2), classErrorPctList, totalScores)
apache-2.0
nigroup/pypet
pypet/tests/profiling/speed_analysis/storage_analysis/avg_runtima_as_function_of_length_plot_times.py
2
3376
__author__ = 'robert' from pypet import Environment, Trajectory from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config import os import matplotlib.pyplot as plt import numpy as np import time import numpy as np import scipy.sparse as spsp from pycallgraph import PyCallGraph, Config, GlobbingFilter from pycallgraph.output import GraphvizOutput from pycallgraph.color import Color class CustomOutput(GraphvizOutput): def node_color(self, node): value = float(node.time.fraction) return Color.hsv(value / 2 + .5, value, 0.9) def edge_color(self, edge): value = float(edge.time.fraction) return Color.hsv(value / 2 + .5, value, 0.7) def job(traj): traj.f_ares('$set.$', 42, comment='A result') def get_runtime(length): filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5') with Environment(filename = filename, log_levels=20, report_progress=(0.0000002, 'progress', 50), overwrite_file=True, purge_duplicate_comments=False, log_stdout=False, summary_tables=False, small_overview_tables=False) as env: traj = env.v_traj traj.par.f_apar('x', 0, 'parameter') traj.f_explore({'x': range(length)}) max_run = 100 for idx in range(len(traj)): if idx > max_run: traj.f_get_run_information(idx, copy=False)['completed'] = 1 traj.f_store() if not os.path.isdir('./tmp'): os.mkdir('tmp') graphviz = CustomOutput() graphviz.output_file = './tmp/run_profile_storage_%d.png' % len(traj) service_filter = GlobbingFilter(include=['*storageservice.*']) config = Config(groups=True, verbose=True) config.trace_filter = service_filter print('RUN PROFILE') with PyCallGraph(config=config, output=graphviz): # start = time.time() # env.f_run(job) # end = time.time() for irun in range(100): traj._make_single_run(irun+len(traj)/2) # Measure start time traj._set_start() traj.f_ares('$set.$', 42, comment='A result') traj._set_finish() traj._store_final(store_data=2) traj._finalize_run() print('STARTING_to_PLOT') print('DONE RUN PROFILE') # dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))] # total = end - start # return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj) def main(): lengths = [1000, 1000000] runtimes = [get_runtime(x) for x in lengths] # avg_runtimes = [x[0] for x in runtimes] # summed_runtime = [x[1] for x in runtimes] # plt.subplot(2, 1, 1) # plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2) # plt.xlabel('Runs') # plt.ylabel('t[s]') # plt.title('Average Runtime per single run') # plt.grid() # plt.subplot(2, 1, 2) # plt.loglog(lengths, summed_runtime, linewidth=2) # plt.grid() # plt.xlabel('Runs') # plt.ylabel('t[s]') # plt.title('Total runtime of experiment') # plt.savefig('avg_runtime_as_func_of_lenght_100') # plt.show() if __name__ == '__main__': main()
bsd-3-clause
zmlabe/IceVarFigs
Scripts/SeaIce/NSIDCseaice_quartiles.py
1
7079
""" Reads in current year's Arctic sea ice extent from Sea Ice Index 3 (NSIDC) Website : ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/ Author : Zachary M. Labe Date : 5 September 2016 """ ### Import modules import numpy as np import urllib.request import urllib as UL import datetime import matplotlib.pyplot as plt ### Directory and time directoryfigure = './Figures/' now = datetime.datetime.now() currentmn = str(now.month) currentdy = str(now.day) currentyr = str(now.year) currenttime = currentmn + '_' + currentdy + '_' + currentyr currentdoy = now.timetuple().tm_yday ### Load url url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \ 'N_seaice_extent_daily_v3.0.csv' ### Read file raw_data = UL.request.urlopen(url) dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',', usecols=[0,1,2,3,4]) print('\nCompleted: Read sea ice data!') ### Set missing data to nan dataset[np.where(dataset==-9999)] = np.nan ### Variables year = dataset[:,0] month = dataset[:,1] day = dataset[:,2] ice = dataset[:,3] missing = dataset[:,4] ### Call present year yr2018 = np.where(year == 2018)[0] ice18 = ice[yr2018] ### Ice Conversion iceval = ice18 * 1e6 ### Printing info print('\n----- NSIDC Arctic Sea Ice -----') print('Current Date =', now.strftime("%Y-%m-%d %H:%M"), '\n') print('SIE Date = %s/%s/%s' % (int(month[-1]),int(day[-1]),int(year[-1]))) print('Current SIE = %s km^2 \n' % (iceval[-1])) print('1-day change SIE = %s km^2' % (iceval[-1]-iceval[-2])) print('7-day change SIE = %s km^2 \n' % (iceval[-1]-iceval[-8])) ########################################################################### ########################################################################### ########################################################################### ### Reads in 1981-2010 means ### Load url url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \ 'N_seaice_extent_climatology_1981-2010_v3.0.csv' ### Read file raw_data2 = UL.request.urlopen(url2) dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',', usecols=[0,1,2,3,4,5,6,7]) ### Create variables doy = dataset2[:,0] meanice = dataset2[:,1] * 1e6 std = dataset2[:,2] ### Quartiles quartile10 = dataset2[:,3] quartile25 = dataset2[:,4] quartile50 = dataset2[:,5] quartile75 = dataset2[:,6] quartile90 = dataset2[:,7] ### Anomalies currentanom = iceval[-1]-meanice[currentdoy-2] ### Printing info print('Current anomaly = %s km^2 \n' % currentanom) ### Selected other years for comparisons yr2007 = np.where(year == 2007)[0] yr2012 = np.where(year == 2012)[0] yr2016 = np.where(year == 2016)[0] sie7 = ice[yr2007] sie12 = ice[yr2012] sie16 = ice[yr2016] ########################################################################### ########################################################################### ########################################################################### ### Create plot plt.rc('text',usetex=True) plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']}) plt.rc('savefig',facecolor='black') plt.rc('axes',edgecolor='white') plt.rc('xtick',color='white') plt.rc('ytick',color='white') plt.rc('axes',labelcolor='white') plt.rc('axes',facecolor='black') fig = plt.figure() ax = plt.subplot(111) xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul', r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan'] plt.xticks(np.arange(0,361,30.4),xlabels,rotation=0) ylabels = map(str,np.arange(2,19,2)) plt.yticks(np.arange(2,19,2),ylabels) plt.ylim([2,18]) plt.xlim([0,360]) strmonth = xlabels[int(currentmn)-1] asof = strmonth + ' ' + currentdy + ', ' + currentyr ### Adjust axes in time series plots def adjust_spines(ax, spines): for loc, spine in ax.spines.items(): if loc in spines: spine.set_position(('outward', 5)) else: spine.set_color('none') if 'left' in spines: ax.yaxis.set_ticks_position('left') else: ax.yaxis.set_ticks([]) if 'bottom' in spines: ax.xaxis.set_ticks_position('bottom') else: ax.xaxis.set_ticks([]) ax.tick_params('both',length=5.5,width=2,which='major') adjust_spines(ax, ['left','bottom']) ax.spines['top'].set_color('none') ax.spines['right'].set_color('none') ax.spines['bottom'].set_linewidth(2) ax.spines['left'].set_linewidth(2) upper2std = (meanice/1e6)+(std*2) lower2std = (meanice/1e6)-(std*2) ax.grid(zorder=1,color='w',alpha=0.2) plt.plot(ice18,linewidth=1.8,color='aqua',zorder=9,label=r'Current Year (2018)') plt.plot(doy,upper2std,color='white',alpha=0.7,zorder=3,linewidth=0.1) plt.plot(doy,lower2std,color='white',alpha=0.7,zorder=4,linewidth=0.1) plt.plot(doy,quartile10,color='m',alpha=0.7,zorder=3,linewidth=0.4) plt.plot(doy,quartile25,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4) plt.plot(doy,quartile75,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4) plt.plot(doy,quartile90,color='m',alpha=0.7,zorder=3,linewidth=0.4) ax.fill_between(doy, lower2std, upper2std, facecolor='white', alpha=0.35, label=r'$\pm$2 standard deviations',zorder=2) plt.plot(doy,quartile50,color='gold',alpha=1,zorder=3,linewidth=2, label=r'Median (1981-2010)') ax.fill_between(doy, quartile90, quartile75, facecolor='m', alpha=0.55, label=r'10-90th percentiles',zorder=2) ax.fill_between(doy, quartile10, quartile25, facecolor='m', alpha=0.55, zorder=2) ax.fill_between(doy, quartile25, quartile50, facecolor='cornflowerblue', alpha=0.6, zorder=2) ax.fill_between(doy, quartile50, quartile75, facecolor='cornflowerblue', alpha=0.6, label=r'25-75th percentiles',zorder=2) plt.scatter(doy[currentdoy-3],ice[-1],s=10,color='aqua',zorder=9) plt.ylabel(r'\textbf{Extent} [$\times$10$^{6}$ km$^2$]',fontsize=15, color='darkgrey') le = plt.legend(shadow=False,fontsize=6,loc='upper left', bbox_to_anchor=(0.473, 1.011),fancybox=True,ncol=2) for text in le.get_texts(): text.set_color('w') plt.title(r'\textbf{ARCTIC SEA ICE}', fontsize=21,color='darkgrey') plt.text(doy[currentdoy]-5,ice[-1]-1.35,r'\textbf{2018}', fontsize=13.5,rotation='horizontal',ha='left',color='aqua') plt.text(0.5,3.1,r'\textbf{DATA:} National Snow \& Ice Data Center, Boulder CO', fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey') plt.text(0.5,2.6,r'\textbf{SOURCE:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/', fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey') plt.text(0.5,2.1,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)', fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey') fig.subplots_adjust(top=0.91) ### Save figure plt.savefig(directoryfigure + 'nsidc_sie_quartiles_currentyear.png',dpi=300)
mit
csyhuang/hn2016_falwa
hn2016_falwa/beta_version.py
1
20465
def input_jk_output_index(j,k,kmax): return j*(kmax) + k def extrap1d(interpolator): xs = interpolator.x ys = interpolator.y def pointwise(x): if x < xs[0]: return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0]) elif x > xs[-1]: return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2]) else: return interpolator(x) def ufunclike(xs): from scipy import array return array(map(pointwise, array(xs))) return ufunclike def solve_uref_both_bc(tstamp, zmum, FAWA_cos, ylat, ephalf2, Delta_PT, zm_PT, Input_B0, Input_B1, use_real_Data=True, plot_all_ref_quan=False): """ Compute equivalent latitude and wave activity on a barotropic sphere. Parameters ---------- tstamp : string Time stamp of the snapshot of the field. znum : ndarray Zonal mean wind. FAWA_cos : ndarray Zonal mean finite-amplitude wave activity. ylat : sequence or array_like 1-d numpy array of latitude (in degree) with equal spacing in ascending order; dimension = nlat. ephalf2 : ndarray Epsilon in Nakamura and Solomon (2010). Delta_PT : ndarray \Delta \Theta in Nakamura and Solomon (2010); upper-boundary conditions. zm_PT : ndarray Zonal mean potential temperature. Input_B0 : sequence or array_like Zonal-mean surface wave activity for the lowest layer (k=0). Part of the lower-boundary condition. Input_B1 : sequence or array_like Zonal-mean surface wave activity for the second lowest layer (k=1). Part of the lower-boundary condition. use_real_Data : boolean Whether to use input data to compute the reference states. By detault True. If false, randomly generated arrays will be used. plot_all_ref_quan : boolean Whether to plot the solved reference states using matplotlib library. By default False. For debugging. Returns ------- u_MassCorr_regular_noslip : ndarray 2-d numpy array of mass correction \Delta u in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat). u_Ref_regular_noslip : ndarray 2-d numpy array of zonal wind reference state u_ref in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat). T_MassCorr_regular_noslip : ndarray 2-d numpy array of adjustment in reference temperature \Delta T in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat). T_Ref_regular_noslip : ndarray 2-d numpy array of adjustment in reference temperature T_ref in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat). u_MassCorr_regular_adiab : ndarray 2-d numpy array of mass correction \Delta u in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat). u_Ref_regular_adiab : ndarray 2-d numpy array of zonal wind reference state u_ref in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat). T_MassCorr_regular_adiab : ndarray 2-d numpy array of adjustment in reference temperature \Delta T in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat). T_Ref_regular_adiab : ndarray 2-d numpy array of adjustment in reference temperature T_ref in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat). """ # zm_PT = zonal mean potential temperature # Import necessary modules from math import pi, exp from scipy import interpolate from scipy.sparse import csc_matrix from scipy.sparse.linalg import spsolve from copy import copy import numpy as np import itertools if plot_all_ref_quan: import matplotlib.pyplot as plt # === Parameters (should be input externally. To be modified) === dz = 1000. # vertical z spacing (m) aa = 6378000. # planetary radius r0 = 287. # gas constant hh = 7000. # scale height cp = 1004. # specific heat rkappa = r0/cp om = 7.29e-5 # angular velocity of the earth # === These changes with input variables' dimensions === nlat = FAWA_cos.shape[-1] jmax1 = nlat//4 dm = 1./float(jmax1+1) # gaussian latitude spacing gl = np.array([(j+1)*dm for j in range(jmax1)]) # This is sin / mu gl_2 = np.array([j*dm for j in range(jmax1+2)]) # This is sin / mu cosl = np.sqrt(1.-gl**2) #cosl_2 = np.sqrt(1.-gl_2**2) alat = np.arcsin(gl)*180./pi alat_2 = np.arcsin(gl_2)*180./pi dmdz = (dm/dz) # **** Get from input these parameters **** kmax = FAWA_cos.shape[0] #height = np.array([i for i in range(kmax)]) # in [km] # **** Initialize Coefficients **** c_a = np.zeros((jmax1, kmax)) c_b = np.zeros((jmax1, kmax)) c_c = np.zeros((jmax1, kmax)) c_d = np.zeros((jmax1, kmax)) c_e = np.zeros((jmax1, kmax)) c_f = np.zeros((jmax1, kmax)) # --- Initialize interpolated variables --- zmu1 = np.zeros((jmax1, kmax)) cx1 = np.zeros((jmax1, kmax)) cor1 = np.zeros((jmax1, kmax)) ephalf = np.zeros((jmax1, kmax)) Delta_PT1 = np.zeros((jmax1+2)) zm_PT1 = np.zeros((jmax1, kmax)) Input_B0_1 = np.zeros((jmax1+2)) Input_B1_1 = np.zeros((jmax1+2)) # --- Define Epsilon as a function of y and z --- # **** Interpolate to gaussian latitude **** if use_real_Data: # print 'use_real_Data' for vv1,vvm in zip([zmu1,cx1,zm_PT1] , [zmum,FAWA_cos,zm_PT]): f_toGaussian = interpolate.interp1d(ylat[:],vvm[:,:].T,axis=0, kind='linear') #[jmax x kmax] vv1[:,:] = f_toGaussian(alat[:]) #vv1[:,:] = vvm[:,:] #vv1[-1,:] = vvm[:,-1] # --- Interpolation of ephalf --- f_ep_toGaussian = interpolate.interp1d(ylat[:],ephalf2[:,:].T,axis=0, kind='linear') #[jmax x kmax] ephalf[:,:] = f_ep_toGaussian(alat[:]) # --- Interpolation of Delta_PT --- #f_DT_toGaussian = extrap1d( interpolate.interp1d(ylat[:],Delta_PT[:], kind='linear') ) # This is txt in Noboru's code f_DT_toGaussian = interpolate.interp1d(ylat[:],Delta_PT[:], kind='linear',fill_value='extrapolate') Delta_PT1[:] = f_DT_toGaussian(alat_2[:]) # --- Interpolation of Input_B0_1 --- #f_B0_toGaussian = extrap1d( interpolate.interp1d(ylat[:],Input_B0[:], kind='linear') ) # This is txt in Noboru's code f_B0_toGaussian = interpolate.interp1d(ylat[:],Input_B0[:], kind='linear',fill_value='extrapolate') # This is txt in Noboru's code Input_B0_1[:] = f_B0_toGaussian(alat_2[:]) # --- Interpolation of Input_B1_1 --- # f_B1_toGaussian = extrap1d( interpolate.interp1d(ylat[:],Input_B1[:], kind='linear') ) # This is txt in Noboru's code f_B1_toGaussian = interpolate.interp1d(ylat[:],Input_B1[:], kind='linear',fill_value='extrapolate') # This is txt in Noboru's code Input_B1_1[:] = f_B1_toGaussian(alat_2[:]) else: # Use random matrix here just to test! zmu1 = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*1.e-8 cx1 = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*1.e-8 #cor1 = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*1.e-8 # --- Added on Aug 1, 2016 --- cor1 = 2.*om*gl[:,np.newaxis] * np.ones((jmax1, kmax)) #cor1[0] = cor1[1]*0.5 # OLD: qxx0 = -cx1*cosl[:,np.newaxis]/cor1 #qxx0 = np.empty((jmax1, kmax)) qxx0 = -cx1/cor1 # Input of LWA has cosine. c_f[0,:] = qxx0[1,:] - 2*qxx0[0,:] c_f[-1,:] = qxx0[-2,:] - 2*qxx0[-1,:] c_f[1:-1,:] = qxx0[:-2,:] + qxx0[2:,:] - 2*qxx0[1:-1,:] #c_f[:,0] = 0.0 # --- Aug 9: Lower Adiabatic boundary conditions --- Input_dB0 = np.zeros((jmax1)) Input_dB1 = np.zeros((jmax1)) uz1 = np.zeros((jmax1)) # prefac = - r0 * cosl[1:-1]**2 * dz / (cor1[1:-1,-2]**2 * aa**2 * hh * dm**2) * exp(-rkappa*(kmax-2.)/7.) # OLD: Input_dB0[:] = Input_B0_1[:-2]*cosl_2[:-2] + Input_B0_1[2:]*cosl_2[2:] - 2*Input_B0_1[1:-1]*cosl_2[1:-1] Input_dB0[:] = Input_B0_1[:-2] + Input_B0_1[2:] - 2*Input_B0_1[1:-1] # OLD: Input_dB1[:] = Input_B1_1[:-2]*cosl_2[:-2] + Input_B1_1[2:]*cosl_2[2:] - 2*Input_B1_1[1:-1]*cosl_2[1:-1] Input_dB1[:] = Input_B1_1[:-2] + Input_B1_1[2:] - 2*Input_B1_1[1:-1] # This is supposed to be correct but gave weird results. uz1[:] = - r0 * cosl[:]**2 * Input_dB1[:] * 2*dz / (cor1[:,1]**2 * aa**2 * hh * dm**2) * exp(-rkappa*(1.)/7.) \ - r0 * cosl[:]**2 * Input_dB0[:] * 2*dz / (cor1[:,0]**2 * aa**2 * hh * dm**2) * exp(-rkappa*(0.)/7.) # **** Upper Boundary Condition (Come back later) **** uz2 = np.zeros((jmax1)) dDelta_PT1 = (Delta_PT1[2:]-Delta_PT1[:-2]) # Numerical trick: Replace uz2[1] with an extrapolated value # Original correct one: # uz2[1:-1] = - r0 * cosl[1:-1]**2 * exp(-rkappa*(kmax-2.)/7.) * dDelta_PT1 / (cor1[1:-1,-2]**2 * aa * hh * dmdz) uz2[:] = - r0 * cosl[:]**2 * exp(-rkappa*(kmax-2.)/7.) * dDelta_PT1 / (cor1[:,-2]**2 * aa * hh * dmdz) # **** Initialize the coefficients a,b,c,d,e,f **** c_a[:,:] = 1.0 c_b[:,:] = 1.0 c_c[:,1:-1] = dmdz**2 *ephalf[:,1:-1]*exp(-dz/(2*hh)) # This one should be correct c_d[:,1:-1] = dmdz**2 *ephalf[:,0:-2]*exp(dz/(2*hh)) # Check convention of ephalf c_e[:,1:-1] = -(c_a[:,1:-1]+c_b[:,1:-1]+c_c[:,1:-1]+c_d[:,1:-1]) b = np.zeros((jmax1*kmax)) row_index=[] col_index=[] coeff = [] jrange = range(jmax1) krange = range(1,kmax-1) for j, k in itertools.product(jrange, krange): # for j in range(jmax1): # for k in range(1,kmax-1): ind = input_jk_output_index(j,k,kmax) b[ind] = c_f[j,k] if (j<jmax1-1): # A[ind,input_jk_output_index(j+1,k,kmax)] = c_a[j,k] row_index.append(ind) col_index.append(input_jk_output_index(j+1,k,kmax)) coeff.append(c_a[j,k]) if (j>0): # A[ind,input_jk_output_index(j-1,k,kmax)] = c_b[j,k] row_index.append(ind) col_index.append(input_jk_output_index(j-1,k,kmax)) coeff.append(c_b[j,k]) # A[ind,input_jk_output_index(j,k+1,kmax)] = c_c[j,k] row_index.append(ind) col_index.append(input_jk_output_index(j,k+1,kmax)) coeff.append(c_c[j,k]) # A[ind,input_jk_output_index(j,k-1,kmax)] = c_d[j,k] row_index.append(ind) col_index.append(input_jk_output_index(j,k-1,kmax)) coeff.append(c_d[j,k]) # A[ind,input_jk_output_index(j,k,kmax)] = c_e[j,k] row_index.append(ind) col_index.append(input_jk_output_index(j,k,kmax)) coeff.append(c_e[j,k]) # ==== Upper boundary condition - thermal wind ==== # for j in range(1,jmax1-1): for j in range(jmax1): ind1 = input_jk_output_index(j,kmax-1,kmax) b[ind1] = uz2[j] #- r0 * cosl[j]**2 * exp(-rkappa*(kmax-2.)/7.) * (Delta_PT1[j+1]-Delta_PT1[j-1])/ (cor1[j,-2]**2 * aa * hh * dmdz) # A[ind1,ind1] = 1.0 row_index.append(ind1) col_index.append(ind1) coeff.append(1.0) # A[ind1,input_jk_output_index(j,kmax-3,kmax)] = -1.0 row_index.append(ind1) col_index.append(input_jk_output_index(j,kmax-3,kmax)) coeff.append(-1.0) # Try sparse matrix # print 'try sparse matrix' # A = csc_matrix((coeff_noslip, (row_index, col_index)), shape=(jmax1*kmax,jmax1*kmax)) # print 'shape of A=',A.shape # print 'Does it work?' # # csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)]) # where data, row_ind and col_ind satisfy the relationship a[row_ind[k], col_ind[k]] = data[k]. # A[ind1,input_jk_output_index(j,kmax-3,kmax)] = -1.0 #uz2[1:-1] = - r0 * cosl[1:-1]**2 * exp(-rkappa*(kmax-2.)/7.) * (Delta_PT1[2:]-Delta_PT1[:-2]) / (cor1[1:-1,-2]**2 * aa * hh * dmdz) # === Make a copy to deal with adiabatic boundary condition === # A: no-slip # A_adiab: adiabatic boundary conditions row_index_adiab = copy(row_index) col_index_adiab = copy(col_index) coeff_adiab = copy(coeff) b_adiab = np.copy(b) # print 'does it work till here?' # A_adiab = np.copy(A) # ==== Lower boundary condition - adiabatic (k=0) ==== for j in range(jmax1): ind0 = input_jk_output_index(j,0,kmax) b_adiab[ind0] = uz1[j] # A_adiab[ind0,ind0] = -1.0 # k=0 row_index_adiab.append(ind0) col_index_adiab.append(ind0) coeff_adiab.append(-1.0) # A_adiab[ind0,input_jk_output_index(j,2,kmax)] = 1.0 # k=2 row_index_adiab.append(ind0) col_index_adiab.append(input_jk_output_index(j,2,kmax)) coeff_adiab.append(1.0) A_adiab = csc_matrix((coeff_adiab, (row_index_adiab, col_index_adiab)), shape=(jmax1*kmax,jmax1*kmax)) # ==== Lower boundary condition - no-slip (k=0) ==== for j in range(jmax1): ind = input_jk_output_index(j,0,kmax) b[ind] = zmu1[j,0]*cosl[j]/cor1[j,0] # A[ind,ind] = 1.0 row_index.append(ind) col_index.append(ind) coeff.append(1.0) A = csc_matrix((coeff, (row_index, col_index)), shape=(jmax1*kmax,jmax1*kmax)) # print 'is it ok till here????' # === Solving the linear system === u2_adiab = spsolve(A_adiab, b_adiab) u2 = spsolve(A, b) # === Mapping back to 2D matrix === u_adiab = np.zeros((jmax1+2,kmax)) u = np.zeros((jmax1+2,kmax)) for j in range(jmax1): for k in range(kmax): u_adiab[j+1,k] = u2_adiab[j*kmax + k] u[j+1,k] = u2[j*kmax + k] u_MassCorr_adiab = np.zeros_like(u_adiab) u_MassCorr_noslip = np.zeros_like(u) # u_MassCorr[1:-1,:] = u[1:-1,:] * cor1[1:-1,:] / cosl[1:-1,np.newaxis] u_MassCorr_adiab[1:-1,:] = u_adiab[1:-1,:] * cor1 / cosl[:,np.newaxis] u_MassCorr_noslip[1:-1,:] = u[1:-1,:] * cor1 / cosl[:,np.newaxis] # --- Initialize T_MassCorr to be output --- u_Ref_regular_adiab = np.zeros_like(zmum) u_Ref_regular_noslip = np.zeros_like(zmum) u_MassCorr_regular_adiab = np.zeros_like(zmum) u_MassCorr_regular_noslip = np.zeros_like(zmum) T_Ref_regular_adiab = np.zeros_like(zmum) T_Ref_regular_noslip = np.zeros_like(zmum) T_MassCorr_regular_adiab = np.zeros_like(zmum) T_MassCorr_regular_noslip = np.zeros_like(zmum) for u_MassCorr,u_MassCorr_regular,u_Ref_regular,T_MassCorr_regular,T_Ref_regular,BCstring in \ zip([u_MassCorr_adiab,u_MassCorr_noslip],\ [u_MassCorr_regular_adiab,u_MassCorr_regular_noslip],\ [u_Ref_regular_adiab,u_Ref_regular_noslip],\ [T_MassCorr_regular_adiab,T_MassCorr_regular_noslip],\ [T_Ref_regular_adiab,T_Ref_regular_noslip],\ ['Adiabatic','Noslip']): # ---- Back out temperature correction here ----- T_MassCorr = np.zeros_like(u_MassCorr) for k in range(1,kmax-2): for j in range(2,jmax1,2): # This is temperature not potential temperature!!! Need to check. # print 'alat['+str(j)+']=',alat[j] # T_MassCorr[j,k] = T_MassCorr[j-2,k] - (2.*om*gl[j])*aa*hh*dmdz / (r0 * cosl[j]) * (u_MassCorr[j,k+1]-u_MassCorr[j,k-1]) T_MassCorr[j,k] = T_MassCorr[j-2,k] - (2.*om*gl[j-1])*aa*hh*dmdz / (r0 * cosl[j-1]) * (u_MassCorr[j-1,k+1]-u_MassCorr[j-1,k-1]) # ---- First do interpolation (gl is regular grid) ---- # f_Todd = interpolate.interp1d(gl[:-1:2],T_MassCorr[1:-1:2,k]) #[jmax x kmax] #f_Todd = interpolate.interp1d(gl_2[::2],T_MassCorr[::2,k]) #[jmax x kmax] #f_Todd_ex = extrap1d(f_Todd) f_Todd = interpolate.interp1d(gl_2[::2],T_MassCorr[::2,k], kind='linear',fill_value='extrapolate') T_MassCorr[:,k] = f_Todd(gl_2[:]) # T_MassCorr[:,k] = f_Todd_ex(gl_2[:]) # Get all the points interpolated # ---- Then do domain average ---- T_MC_mean = np.mean(T_MassCorr[:,k]) T_MassCorr[:,k] -= T_MC_mean # --- First, interpolate MassCorr back to regular grid first --- f_u_MassCorr = interpolate.interp1d(alat_2,u_MassCorr,axis=0, kind='linear') #[jmax x kmax] u_MassCorr_regular[:,-nlat//2:] = f_u_MassCorr(ylat[-nlat//2:]).T f_T_MassCorr = interpolate.interp1d(alat_2,T_MassCorr,axis=0, kind='linear') #[jmax x kmax] T_MassCorr_regular[:,-nlat//2:] = f_T_MassCorr(ylat[-nlat//2:]).T u_Ref = zmum[:,-nlat//2:] - u_MassCorr_regular[:,-nlat//2:] T_ref = zm_PT[:,-nlat//2:] * np.exp(-np.arange(kmax)/7. * rkappa)[:,np.newaxis] - T_MassCorr_regular[:,-nlat//2:] u_Ref_regular[:,-nlat//2:] = u_Ref T_Ref_regular[:,-nlat//2:] = T_ref # #plot_all_ref_quan = False if plot_all_ref_quan: # --- height coordinate --- height = np.array([i for i in range(kmax)]) # in [km] # --- Colorbar scale --- contour_int = np.arange(-120,145,5) dT_contour_int = np.arange(-120,81,5) T_contour_int = np.arange(160,321,5) # --- Start plotting figure --- fig = plt.subplots(figsize=(12,12)) plt.subplot(221) plt.contourf(ylat[-nlat//2:],height[:-2],u_MassCorr_regular[:-2,-nlat//2:],contour_int) plt.colorbar() c1=plt.contour(ylat[-nlat//2:],height[:-2],u_MassCorr_regular[:-2,-nlat//2:],contour_int[::2],colors='k') plt.clabel(c1,c1.levels,inline=True, fmt='%d', fontsize=10) plt.title('$\Delta$ u '+tstamp) plt.ylabel('height (km)') plt.subplot(222) plt.contourf(ylat[-nlat//2:],height[:-2],u_Ref[:-2,:],contour_int) plt.colorbar() c2=plt.contour(ylat[-nlat//2:],height[:-2],u_Ref[:-2,:],contour_int[::2],colors='k') plt.clabel(c2,c2.levels,inline=True, fmt='%d', fontsize=10) plt.title('$u_{REF}$ ('+BCstring+' BC)') plt.subplot(223) plt.contourf(ylat[-nlat//2:],height[:-2],T_MassCorr_regular[:-2,-nlat//2:],dT_contour_int) plt.colorbar() c3=plt.contour(ylat[-nlat//2:],height[:-2],T_MassCorr_regular[:-2,-nlat//2:],dT_contour_int,colors='k') plt.clabel(c3,c3.levels,inline=True, fmt='%d', fontsize=10) plt.title('$\Delta$ T') plt.ylabel('height (km)') plt.subplot(224) plt.contourf(ylat[-nlat//2:],height[:-2],T_ref[:-2,:],T_contour_int) plt.colorbar() c4=plt.contour(ylat[-nlat//2:],height[:-2],T_ref[:-2,:],T_contour_int[::2],colors='k') plt.clabel(c4,c4.levels,inline=True, fmt='%d', fontsize=10) plt.title('$T_{REF}$') plt.ylabel('height (km)') plt.tight_layout() plt.show() #plt.savefig('/home/csyhuang/Dropbox/Research-code/Sep12_test3_'+BCstring+'_'+tstamp+'.png') plt.close() # This is for only outputing Delta_u and Uref for no-slip and adiabatic boundary conditions. return u_MassCorr_regular_noslip,u_Ref_regular_noslip,T_MassCorr_regular_noslip,T_Ref_regular_noslip, u_MassCorr_regular_adiab,u_Ref_regular_adiab,T_MassCorr_regular_adiab,T_Ref_regular_adiab # --- As a test whether the function Solve_Uref is working --- if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np nlat = 121 kmax = 49 jmax1 = nlat # The codes below is just for testing purpose tstamp = 'random' ylat = np.linspace(-90,90,121,endpoint=True) t1 = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001 t2 = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001 t3 = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001 Delta_PT = np.random.rand(nlat)+np.ones((nlat))*0.001 zm_PT = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001 Input_B0 = np.random.rand(nlat)+np.ones((nlat))*0.001 Input_B1 = np.random.rand(nlat)+np.ones((nlat))*0.001 eh = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*0.001 Delta_PT = np.sort(np.random.rand(jmax1)) xxx = solve_uref_both_bc(tstamp,t1,t2,ylat,t3,Delta_PT,zm_PT,Input_B0,Input_B1,use_real_Data=True) print(xxx)
mit
microhh/microhh
kernel_tuner/statistics.py
5
1185
import matplotlib.pyplot as pl import numpy as np import json import glob pl.close('all') pl.ion() def get_timings(kernel_name, gridsizes): dt = np.zeros_like(gridsizes, dtype=float) for i,gridsize in enumerate(gridsizes): with open( '{0}_{1:03d}.json'.format(kernel_name, gridsize) ) as f: data = json.load(f) timings = data[0] fastest = 1e9 for timing in timings: fastest = min(fastest, timing['time']) dt[i] = fastest return dt if __name__ == '__main__': gridsize = np.arange(32, 513, 32) normal = get_timings('diff_c_g', gridsize) smem = get_timings('diff_c_g_smem', gridsize) fac = gridsize**3 pl.figure(figsize=(8,4)) pl.subplot(121) pl.plot(gridsize, normal/fac, 'k-x', label='non smem') pl.plot(gridsize, smem /fac, 'r-x', label='smem') pl.ylim(0, 2e-7) pl.ylabel('time/gridpoint (s)') pl.xlabel('gridpoints (-)') pl.legend() pl.grid() pl.subplot(122) pl.plot(gridsize, normal/smem, 'k-x') pl.ylabel('non_smem/smem (-)') pl.xlabel('gridpoints (-)') pl.grid() pl.tight_layout()
gpl-3.0
altairpearl/scikit-learn
examples/manifold/plot_mds.py
88
2731
""" ========================= Multi-dimensional scaling ========================= An illustration of the metric and non-metric MDS on generated noisy data. The reconstructed points using the metric MDS and non metric MDS are slightly shifted to avoid overlapping. """ # Author: Nelle Varoquaux <nelle.varoquaux@gmail.com> # License: BSD print(__doc__) import numpy as np from matplotlib import pyplot as plt from matplotlib.collections import LineCollection from sklearn import manifold from sklearn.metrics import euclidean_distances from sklearn.decomposition import PCA n_samples = 20 seed = np.random.RandomState(seed=3) X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float) X_true = X_true.reshape((n_samples, 2)) # Center the data X_true -= X_true.mean() similarities = euclidean_distances(X_true) # Add noise to the similarities noise = np.random.rand(n_samples, n_samples) noise = noise + noise.T noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0 similarities += noise mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed, dissimilarity="precomputed", n_jobs=1) pos = mds.fit(similarities).embedding_ nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12, dissimilarity="precomputed", random_state=seed, n_jobs=1, n_init=1) npos = nmds.fit_transform(similarities, init=pos) # Rescale the data pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum()) npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum()) # Rotate the data clf = PCA(n_components=2) X_true = clf.fit_transform(X_true) pos = clf.fit_transform(pos) npos = clf.fit_transform(npos) fig = plt.figure(1) ax = plt.axes([0., 0., 1., 1.]) s = 100 plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0, label='True Position') plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS') plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS') plt.legend(scatterpoints=1, loc='best', shadow=False) similarities = similarities.max() / similarities * 100 similarities[np.isinf(similarities)] = 0 # Plot the edges start_idx, end_idx = np.where(pos) # a sequence of (*line0*, *line1*, *line2*), where:: # linen = (x0, y0), (x1, y1), ... (xm, ym) segments = [[X_true[i, :], X_true[j, :]] for i in range(len(pos)) for j in range(len(pos))] values = np.abs(similarities) lc = LineCollection(segments, zorder=0, cmap=plt.cm.Blues, norm=plt.Normalize(0, values.max())) lc.set_array(similarities.flatten()) lc.set_linewidths(0.5 * np.ones(len(segments))) ax.add_collection(lc) plt.show()
bsd-3-clause
timpalpant/KaggleTSTextClassification
scripts/plot_feature_label_correlations.py
1
1976
#!/usr/bin/env python ''' Compute mutual information between individual features and labels ''' import argparse import matplotlib.pyplot as plt from matplotlib.backends.backend_pdf import PdfPages from common import * def opts(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('features', type=load_npz, help='Training data features (npz)') parser.add_argument('labels', type=load_npz, help='Training data labels (npz)') parser.add_argument('output', help='Output file with plots (pdf)') return parser if __name__ == "__main__": args = opts().parse_args() print "Loading labels" labels = args.labels['labels'] header = args.labels['header'] pdf = PdfPages(args.output) #print "Plotting boolean features conditioned on labels" #bf = args.features['bfeatures'] #n = bf.shape[1] #m = np.zeros((n,11)) #m[:,0] = np.sum(bf==-1, axis=0) #m[:,1] = np.sum(bf==0, axis=0) #m[:,2] = np.sum(bf==1, axis=0) #fig = plt.figure() #pdf.savefig(fig) #plt.close() print "Plotting float features conditioned on labels" ff = args.features['ffeatures'] n = ff.shape[1] x = np.arange(n) for i, l in enumerate(labels.T): print "label %d" % i for j, f in enumerate(ff.T): print "...ffeature %d" % j fig = plt.figure() plt.hist(f[l], normed=True, label='P(f | l)', color='blue', alpha=0.4, range=(f.min(),f.max()), bins=25) plt.hist(f[np.logical_not(l)], normed=True, label='P(f | ~l)', color='green', alpha=0.4, range=(f.min(),f.max()), bins=25) plt.xlim(f.min(), f.max()) plt.xlabel('f') plt.ylabel('P(f)') plt.title('FFeature %d, Label %s' % (j, header[i])) pdf.savefig(fig) plt.close() pdf.close()
gpl-3.0
harlowja/networkx
examples/algorithms/blockmodel.py
32
3009
#!/usr/bin/env python # encoding: utf-8 """ Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network: @article{, title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections}, volume = {6}, shorttitle = {Social Networks of Drug Users in {High-Risk} Sites}, url = {http://dx.doi.org/10.1023/A:1015457400897}, doi = {10.1023/A:1015457400897}, number = {2}, journal = {{AIDS} and Behavior}, author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul}, month = jun, year = {2002}, pages = {193--206} } """ __author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>', 'Aric Hagberg <hagberg@lanl.gov>']) from collections import defaultdict import networkx as nx import numpy from scipy.cluster import hierarchy from scipy.spatial import distance import matplotlib.pyplot as plt def create_hc(G): """Creates hierarchical cluster of graph G from distance matrix""" path_length=nx.all_pairs_shortest_path_length(G) distances=numpy.zeros((len(G),len(G))) for u,p in path_length.items(): for v,d in p.items(): distances[u][v]=d # Create hierarchical cluster Y=distance.squareform(distances) Z=hierarchy.complete(Y) # Creates HC using farthest point linkage # This partition selection is arbitrary, for illustrive purposes membership=list(hierarchy.fcluster(Z,t=1.15)) # Create collection of lists for blockmodel partition=defaultdict(list) for n,p in zip(list(range(len(G))),membership): partition[p].append(n) return list(partition.values()) if __name__ == '__main__': G=nx.read_edgelist("hartford_drug.edgelist") # Extract largest connected component into graph H H=nx.connected_component_subgraphs(G)[0] # Makes life easier to have consecutively labeled integer nodes H=nx.convert_node_labels_to_integers(H) # Create parititions with hierarchical clustering partitions=create_hc(H) # Build blockmodel graph BM=nx.blockmodel(H,partitions) # Draw original graph pos=nx.spring_layout(H,iterations=100) fig=plt.figure(1,figsize=(6,10)) ax=fig.add_subplot(211) nx.draw(H,pos,with_labels=False,node_size=10) plt.xlim(0,1) plt.ylim(0,1) # Draw block model with weighted edges and nodes sized by number of internal nodes node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()] edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)] # Set positions to mean of positions of internal nodes from original graph posBM={} for n in BM: xy=numpy.array([pos[u] for u in BM.node[n]['graph']]) posBM[n]=xy.mean(axis=0) ax=fig.add_subplot(212) nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False) plt.xlim(0,1) plt.ylim(0,1) plt.axis('off') plt.savefig('hartford_drug_block_model.png')
bsd-3-clause
MannyGrewal/Manny.CIFAR
Manny.CIFAR/CIFAR/CIFARPlotter.py
1
1321
import math import numpy as np import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec import pylab ######################################################################## # 2017 - Manny Grewal # Purpose of this class is to visualise a list of images from the CIFAR dataset # How many columns to show in a grid MAX_COLS = 5 #PlotImages method takes an list of Images and their respective labels in the second parameter #Then it renders them using matplotlib imshow method in a 5 column matrix def PlotImages(arrayImages,arrayClassLabels,reShapeRequired=False): totalImages=len(arrayImages) if(reShapeRequired==True): arrayImages = np.reshape(arrayImages, (totalImages,32,32,3)) totalRows= math.ceil(totalImages/MAX_COLS) fig = plt.figure(figsize=(5,5)) gs = gridspec.GridSpec(totalImages, MAX_COLS) # set the space between subplots and the position of the subplots in the figure gs.update(wspace=0.1, hspace=0.4, left = 0.1, right = 0.7, bottom = 0.1, top = 0.9) arrayIndex=0 for g in gs: if(arrayIndex<totalImages): axes=plt.subplot(g) axes.set_axis_off() axes.set_title(arrayClassLabels[arrayIndex]) axes.imshow(arrayImages[arrayIndex]) arrayIndex+=1 #plt.show()
mit
mailhexu/pyDFTutils
pyDFTutils/vasp/procar_reader.py
2
3569
#!/usr/bin/env python from numpy import zeros,inner import numpy as np import re from pyDFTutils.ase_utils import symbol_number import matplotlib.pyplot as plt def fix_line(line): line=re.sub("(\d)-(\d)", r'\1 -\2',line) return line class procar_reader(): def __init__(self,fname='PROCAR'): self.read(fname=fname) def get_dos(self,iion,orb_name,iband): dos=inner(self.dos_array[iion,self.orb_dict[orb_name],iband,:],self.weight) return dos def filter_band(self,iion,orb_name,thr=0.01): """ return a energy array 2D. energy(iband,nkpt). """ band_ids=[] for iband in range(self.nbands): d=self.get_dos(iion,orb_name,iband) print(d) if d>thr: band_ids.append(iband) return self.energy[np.array(band_ids,dtype=int)] def plot_band(self,iion,orb_name,thr=0.01): earray=self.filter_band(iion,orb_name,thr=thr) for k_e in earray: plt.plot(k_e) plt.ylim(-3,2) #plt.show() def plot_band_alpha(self,iion,orb_name,color='k'): for iband in range(self.nbands): d=self.get_dos(iion,orb_name,iband) print(d) plt.plot(self.energy[iband],color,linewidth=d*50,alpha=0.5) def read(self,fname='PROCAR'): lines=open(fname).readlines() iline=0 self.has_phase=bool(lines[iline].rfind('phase')) iline=1 p=lines[iline].split() self.nkpts=int(p[3]) self.nbands=int(p[7]) self.nions=int(p[11]) self.dos_label=lines[7].split()[1:-1] self.norb=len(self.dos_label) self.orb_dict=dict(list(zip(self.dos_label,list(range(self.norb))))) print(self.orb_dict) self.dos_array=zeros((self.nions,self.norb,self.nbands,self.nkpts),dtype='float') self.weight=zeros(self.nkpts,dtype='float') self.energy=zeros((self.nbands,self.nkpts)) self.band_occ=zeros((self.nbands,self.nkpts)) self.kpts=zeros((self.nkpts,3)) iline+=1 for ikpts in range(self.nkpts): iline+=1 line_k=fix_line( lines[iline]).split() self.kpts[ikpts]=[float(x) for x in line_k[3:6]] self.weight[ikpts]=float(line_k[-1]) iline+=2 for iband in range(self.nbands): line_b=lines[iline].split() self.energy[iband,ikpts]=float(line_b[4]) self.band_occ[iband,ikpts]=float(line_b[7]) iline+=3 for iion in range(self.nions): #print iline line_dos=lines[iline].strip().split() #print iline #print line_dos self.dos_array[iion,:,iband,ikpts]=[float(x) for x in line_dos[1:-1]] iline+=1 #if self.has_phase: #iline+=1+self.nions*2 iline+=3 self.efermi=np.max(self.energy[self.band_occ>0.5]) print(self.efermi) self.energy=self.energy-self.efermi def test(iion=0,orb_name='dx2',thr=0.005): p=procar_reader() #for e in p.filter_band(0,orb_name,thr=thr): # plt.plot(e,'.',color='green') #for e in p.filter_band(1,'dx2',thr=thr): # plt.plot(e,'-',color='red') #plt.plot(p.filter_band(iion,'dz2',thr=thr)) p.plot_band_alpha(1,'dx2',color='r') p.plot_band_alpha(1,'dz2',color='g') plt.ylim(-5,5) plt.show() if __name__=='__main__': test()
lgpl-3.0
shiinoandra/wavegano
Program/Wavegano/Wavegano/Wavegano.py
1
21939
import operation as op import random import math import Helper import GRDEI import RDE import GDE import Wave import os import numpy import matplotlib.pyplot as plt numpy.set_printoptions(threshold=numpy.nan) #def encode(payload_path,cover_path,threshold,segment_size,partition_segment_size,method): # file_name = cover_path.split("\\")[len(cover_path.split("\\"))-1] # path = cover_path.replace(file_name,'') # print(" PROSES ENCODING ") # print(" METODE YANG DIGUNAKAN : " + method) # payload = Helper.payloadIO.open(payload_path) # bin_payload = op.operation.stringToBinary(payload) # print "besar payload : " # payload_size = len(bin_payload) # print(payload_size) # medium = Helper.WavIO.open(cover_path) # print "bitrate: " # print(medium.bitrate) # samples = op.operation.numToBinary(medium.samples) # (M1,M2,Partisi) = op.operation.intel_partition(samples,partition_segment_size) # intM1 = op.operation.binaryTonum(M1) # intM2 = op.operation.binaryTonum(M2) # if method == "GDE" : # kapasitas_M1 = GDE.checkCapacity(intM1,segment_size,threshold) # kapasitas_M2 = GDE.checkCapacity(intM2,segment_size,threshold) # elif method == "GRDEI": # kapasitas_M1 = GRDEI.checkCapacity(intM1,segment_size,threshold) # kapasitas_M2 = GRDEI.checkCapacity(intM2,segment_size,threshold) # print(" kapasitas segmen 1 : " + str(kapasitas_M1)) # print(" kapasitas segmen 2 : " + str(kapasitas_M2)) # capacity = kapasitas_M1+kapasitas_M2 # print "Kapasitas penyimpanan :" # print(capacity) # if capacity >= payload_size: # print "stegano dapat dilakukan" # payload_seg1 = bin_payload[:kapasitas_M1] # payload_seg2 = bin_payload[kapasitas_M1:len(bin_payload)] # if method == "GDE" : # (encoded_1,locMap_1) = GDE.encode(intM1,payload_seg1,segment_size,threshold) # (encoded_2,locMap_2) = GDE.encode(intM2,payload_seg2,segment_size,threshold) # elif method == "GRDEI": # (encoded_1,locMap_1,reduceMap_1) = GRDEI.encode(intM1,payload_seg1,segment_size,threshold) # (encoded_2,locMap_2,reduceMap_2) = GRDEI.encode(intM2,payload_seg2,segment_size,threshold) # encoded_1 = op.operation.numToBinary(encoded_1) # encoded_2 = op.operation.numToBinary(encoded_2) # for i in range(len(encoded_1)): # encoded_1[i]=encoded_1[i][8:16] # for i in range(len(encoded_2)): # encoded_2[i]=encoded_2[i][8:16] # encoded_1_bin = numpy.array(encoded_1,dtype=int) # encoded_2_bin = numpy.array(encoded_2,dtype=int) # _M = op.operation.reconstructPartition(encoded_1_bin,encoded_2_bin,Partisi) # _M_int2= numpy.asarray(op.operation.binaryTonum(_M),dtype=numpy.uint16) # _M_int = numpy.asarray(op.operation.binaryTonum(_M),dtype=numpy.int16) # new_wav = Wave.Wave(method+"_encoded_"+str(file_name)) # new_wav.samples = _M_int # new_wav.bitrate = medium.bitrate # time_axis = numpy.linspace(0,len(medium.samples)/medium.bitrate,num=len(medium.samples)) # plt.subplot(3,1,1) # plt.title("perbandingan wav asli dan hasil encode") # plt.plot(time_axis,numpy.array(medium.samples,dtype= "int16")) # plt.ylabel("WAV asli") # plt.subplot(3,1,2) # plt.plot(time_axis,numpy.array(_M_int,dtype= "int16")) # plt.ylabel("Hasil Encode") # plt.subplot(3,1,3) # plt.plot(time_axis,numpy.array(_M_int,dtype= "int16")) # plt.subplot(3,1,3) # plt.plot(time_axis,numpy.array(medium.samples,dtype= "int16")) # plt.ylabel("perbandingan") # plt.xlabel("Waktu (s)") # plt.savefig("original-encoded.png") # #print(medium.samples) # #raw_input() # #print(new_wav.samples) # #new_wav.print_info() # #medium.print_info() # Helper.WavIO.write(path,new_wav) # if method == "GDE" : # return(locMap_1,locMap_2,Partisi) # elif method == "GRDEI": # return(locMap_1,locMap_2,reduceMap_1,reduceMap_2,Partisi) # else: # print "kapasitas tidak mencukupi" # return -1 def encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,Partisi,method): if method == "GDE" : (encoded_1,locMap_1) = GDE.encode(intM1,payload_seg1,segment_size,threshold) (encoded_2,locMap_2) = GDE.encode(intM2,payload_seg2,segment_size,threshold) elif method == "GRDEI": (encoded_1,locMap_1,reduceMap_1) = GRDEI.encode(intM1,payload_seg1,segment_size,threshold) (encoded_2,locMap_2,reduceMap_2) = GRDEI.encode(intM2,payload_seg2,segment_size,threshold) encoded_1 = op.operation.numToBinary(encoded_1) encoded_2 = op.operation.numToBinary(encoded_2) for i in range(len(encoded_1)): encoded_1[i]=encoded_1[i][8:16] for i in range(len(encoded_2)): encoded_2[i]=encoded_2[i][8:16] encoded_1_bin = numpy.array(encoded_1,dtype=int) encoded_2_bin = numpy.array(encoded_2,dtype=int) _M = op.operation.reconstructPartition(encoded_1_bin,encoded_2_bin,Partisi) _M_int2= numpy.asarray(op.operation.binaryTonum(_M),dtype=numpy.uint16) if method == "GDE" : return(_M_int2,locMap_1,locMap_2,Partisi) elif method == "GRDEI": return(_M_int2,locMap_1,locMap_2,reduceMap_1,reduceMap_2,Partisi) def decode(file_path,segment_size,payload_size,method,Partition,locMap_1,locMap_2,reduceMap_1 = None , reduceMap_2 = None): file_name = file_path.split("\\")[len(file_path.split("\\"))-1] path = file_path.replace(file_name,'') print(" PROSES DECODING ") print(" METODE YANG DIGUNAKAN : " ) medium_encoded = Helper.WavIO.open(file_path) print "bitrate: " print(medium_encoded.bitrate) samples_decode = op.operation.numToBinary(medium_encoded.samples) (_M1,_M2,P) = op.operation.intel_partition(samples_decode,0,Partition) _intM1 = op.operation.binaryTonum(_M1) _intM2 = op.operation.binaryTonum(_M2) if method == "GDE" : (decoded_M1,message1) = GDE.decode(_intM1,segment_size,locMap_1) (decoded_M2,message2) = GDE.decode(_intM2,segment_size,locMap_2) elif method == "GRDEI": (decoded_M1,message1) = GRDEI.decode(_intM1,segment_size,locMap_1,reduceMap_1) (decoded_M2,message2) = GRDEI.decode(_intM2,segment_size,locMap_2,reduceMap_2) message_decoded = [] message_decoded.extend(message1) message_decoded.extend(message2) message_decoded = message_decoded[:payload_size] print(len(message_decoded)) message_write = op.operation.revStringToBinary(message_decoded) Helper.payloadIO.write(path+"payload_decoded.txt",message_write) decoded_1 = op.operation.numToBinary(decoded_M1) decoded_2 = op.operation.numToBinary(decoded_M2) for i in range(len(decoded_1)): decoded_1[i]=decoded_1[i][8:16] for i in range(len(decoded_2)): decoded_2[i]=decoded_2[i][8:16] decoded_1_bin = numpy.array(decoded_1,dtype=int) decoded_2_bin = numpy.array(decoded_2,dtype=int) M_awal = op.operation.reconstructPartition(decoded_1_bin,decoded_2_bin,Partition) M__awal = numpy.asarray(op.operation.binaryTonum(M_awal),dtype=numpy.int16) new_wav2 = Wave.Wave(file_name.replace("encoded","decoded")) new_wav2.samples = M__awal new_wav2.bitrate = medium_encoded.bitrate plt.clf() time_axis = numpy.linspace(0,len(medium_encoded.samples)/medium_encoded.bitrate,num=len(medium_encoded.samples)) plt.subplot(2,1,1) plt.title("perbandingan hasil encode dan hasil decode") plt.plot(time_axis,numpy.array(medium_encoded.samples,dtype= "int16")) plt.ylabel("WAV encoded") plt.subplot(2,1,2) plt.plot(time_axis,numpy.array(M__awal,dtype= "int16")) plt.ylabel("WAV Hasil Dencode") plt.xlabel("Waktu (s)") plt.savefig("encoded-decoded.png") Helper.WavIO.write(path,new_wav2) def multilayer_encode(payload_path,cover_path,threshold,segment_size,partition_segment_size,method,n_layer): locmap_list = [] reducemap_list = [] capacities = [] payload_sizes = [] total_capacity = 0 file_name = cover_path.split("\\")[len(cover_path.split("\\"))-1] path = cover_path.replace(file_name,'') print(" PROSES ENCODING ") print(" METODE YANG DIGUNAKAN : " + str(method)) payload = Helper.payloadIO.open(payload_path) bin_payload = op.operation.stringToBinary(payload) print "besar payload : " payload_size = len(bin_payload) print(payload_size) medium = Helper.WavIO.open(cover_path) print "bitrate: " print(medium.bitrate) audio_sample = medium.samples #for i in range(n_layer): # samples = op.operation.numToBinary(audio_sample) # (M1,M2,Partisi) = op.operation.intel_partition(samples,partition_segment_size) # intM1 = op.operation.binaryTonum(M1) # intM2 = op.operation.binaryTonum(M2) # if method == "GDE" : # kapasitas_M1 = GDE.checkCapacity(intM1,segment_size,threshold) # kapasitas_M2 = GDE.checkCapacity(intM2,segment_size,threshold) # elif method == "GRDEI": # kapasitas_M1 = GRDEI.checkCapacity(intM1,segment_size,threshold) # kapasitas_M2 = GRDEI.checkCapacity(intM2,segment_size,threshold) # print(" kapasitas segmen 1 : " + str(kapasitas_M1)) # print(" kapasitas segmen 2 : " + str(kapasitas_M2)) # capacity = kapasitas_M1+kapasitas_M2 # capacities.append((kapasitas_M1,kapasitas_M2)) # print "Kapasitas penyimpanan layer ke "+str(i)+":"+str(capacity) # payload_seg1 = [1 for i in range(kapasitas_M1)] # payload_seg2 = [1 for i in range(kapasitas_M2)] # total_capacity+=capacity # audio_sample = encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,Partisi,method)[0] #print("total kapasitas "+str(total_capacity)) #if(total_capacity > payload_size): # print("stegano dapat dilakukan") #time_axis = numpy.linspace(0,len(medium.samples)/medium.bitrate,num=len(medium.samples)) #plt.subplot(n_layer+1,1,1) #plt.title("perbandingan wav asli dan hasil encode multi-layer") #plt.plot(time_axis,numpy.array(medium.samples,dtype= "int16")) #plt.ylabel("WAV asli") P = op.operation.intel_partition(op.operation.numToBinary(audio_sample),partition_segment_size)[2] payload_counter = 0 for i in range(n_layer): print("layer ke " + str(i)) samples = op.operation.numToBinary(audio_sample) (M1,M2,Partisi) = op.operation.intel_partition(samples,partition_segment_size,P) intM1 = op.operation.binaryTonum(M1) intM2 = op.operation.binaryTonum(M2) if method == "GDE" : kapasitas_M1 = GDE.checkCapacity(intM1,segment_size,threshold) kapasitas_M2 = GDE.checkCapacity(intM2,segment_size,threshold) elif method == "GRDEI": kapasitas_M1 = GRDEI.checkCapacity(intM1,segment_size,threshold) kapasitas_M2 = GRDEI.checkCapacity(intM2,segment_size,threshold) capacities.append((kapasitas_M1,kapasitas_M2)) kapasitas_total_layer = kapasitas_M1+kapasitas_M2 total_capacity += kapasitas_total_layer if((payload_size - payload_counter) > kapasitas_total_layer): payload_i = bin_payload[payload_counter:kapasitas_total_layer] payload_sizes.append(kapasitas_total_layer) payload_counter+=kapasitas_total_layer print(len(payload_i)) else: payload_i = bin_payload[payload_counter:payload_size] payload_sizes.append((payload_size-payload_counter)) payload_counter+=(payload_size-payload_counter) print(len(payload_i)) payload_seg1 = payload_i[:kapasitas_M1] print(len(payload_seg1)) payload_seg2 = payload_i[kapasitas_M1:len(payload_i)] print(len(payload_seg2)) if method == "GDE" : (audio_sample,locMap_1,locMap_2,Partisi)= encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,P,method) locmap_list.append((locMap_1,locMap_2)) elif method == "GRDEI": (audio_sample,locMap_1,locMap_2,reduceMap_1,reduceMap_2,Partisi) = encode(intM1,intM2,payload_seg1,payload_seg2,threshold,segment_size,partition_segment_size,P,method) locmap_list.append((locMap_1,locMap_2)) reducemap_list.append((reduceMap_1,reduceMap_2)) #audio_sample = numpy.asarray(audio_sample2,dtype="uint16") #plt.subplot(n_layer+1,1,i+2) #plt.plot(time_axis,numpy.array(audio_sample,dtype= "int16")) #plt.ylabel("Hasil Encode layer ke "+str(i+1)) #plt.xlabel("Waktu (s)") #plt.savefig("original-multiencode-"+str(n_layer)+".png") if(payload_counter>=payload_size): print("stegano berhasil dilakukan") print(total_capacity) _M_int = numpy.asarray(audio_sample,dtype=numpy.int16) new_wav = Wave.Wave(method+"_encoded_"+str(file_name)) new_wav.samples = _M_int new_wav.bitrate = medium.bitrate Helper.WavIO.write(path,new_wav) if method == "GDE" : return(locmap_list,payload_sizes,P) elif method == "GRDEI": return(locmap_list,reducemap_list,payload_sizes,P) else: print("kapasitas tidak mencukupi") #else: # print "kapasitas tidak mencukupi" # return -1 def multilayer_decode(file_path,segment_size,payload_sizes,Partition,method,n_layer,locmap_list,reducemap_list = None,): full_messages=[] file_name = file_path.split("\\")[len(file_path.split("\\"))-1] path = file_path.replace(file_name,'') print(" PROSES DECODING ") print(" METODE YANG DIGUNAKAN : " ) print(method) medium_encoded = Helper.WavIO.open(file_path) print "bitrate: " print(medium_encoded.bitrate) audio_samples = medium_encoded.samples for i in range (n_layer): print("layer ke " + str(i)) (locmap_i_1,locmap_i_2) = locmap_list.pop() if(method == "GRDEI"): (reducemap_i_1,reducemap_i_2) = reducemap_list.pop() samples = op.operation.numToBinary(audio_samples) (_M1,_M2,P) = op.operation.intel_partition(samples,0,Partition) _intM1 = op.operation.binaryTonum(_M1) _intM2 = op.operation.binaryTonum(_M2) if method == "GDE" : (decoded_M1,message1) = GDE.decode(_intM1,segment_size,locmap_i_1) (decoded_M2,message2) = GDE.decode(_intM2,segment_size,locmap_i_2) elif method == "GRDEI": (decoded_M1,message1) = GRDEI.decode(_intM1,segment_size,locmap_i_1,reducemap_i_1) (decoded_M2,message2) = GRDEI.decode(_intM2,segment_size,locmap_i_2,reducemap_i_2) message_decoded = [] message_decoded.extend(message1) message_decoded.extend(message2) payload_i_size = payload_sizes.pop() full_messages.insert(0,message_decoded[0:payload_i_size]) decoded_1 = op.operation.numToBinary(decoded_M1) decoded_2 = op.operation.numToBinary(decoded_M2) for i in range(len(decoded_1)): decoded_1[i]=decoded_1[i][8:16] for i in range(len(decoded_2)): decoded_2[i]=decoded_2[i][8:16] decoded_1_bin = numpy.array(decoded_1,dtype=int) decoded_2_bin = numpy.array(decoded_2,dtype=int) M_awal = op.operation.reconstructPartition(decoded_1_bin,decoded_2_bin,Partition) audio_samples = numpy.asarray(op.operation.binaryTonum(M_awal),dtype=numpy.uint16) for i in audio_samples: if(i<0): print(i) message_write = [] for i in range(len(full_messages)): message_write.extend(full_messages[i]) message_write = op.operation.revStringToBinary(message_write) Helper.payloadIO.write(path+"payload_decoded.txt",message_write) new_wav = Wave.Wave(file_name.replace("encoded","decoded")) M__awal = numpy.asarray(audio_samples,dtype=numpy.int16) new_wav.samples = M__awal new_wav.bitrate = medium_encoded.bitrate Helper.WavIO.write(path,new_wav) if __name__ == "__main__": #(map1,map2,rmap1,rmap2,p) = encode("D:\\payload.txt","D:\\coba16.wav",100,20,10,"GRDEI") (locmap_list,reducemap_list,payload_sizes,partisi) = multilayer_encode("D:\\payload.txt","D:\\coba16.wav",50,20,10,"GRDEI",20) #print(len(locmap_list)) # print(len(reducemap_list)) multilayer_decode("D:\\GRDEI_encoded_coba16.wav",20,payload_sizes,partisi,"GRDEI",20,locmap_list,reducemap_list) #locmap_list.pop() #(map1_1,map1_2) = locmap_list.pop() #reducemap_list.pop() #(rmap1_1,rmap1_2) = reducemap_list.pop() #decode("D:\\GRDEI_encoded2_coba16.wav",20,1187,"GRDEI",partisi,map1_1,map1_2,rmap1_1,rmap1_2) #print(len(locmap_list),len(reducemap_list)) # # print "data payload : " # #print(payload) # #arr = [13954, 4369, 37385, 3995, 2556, 46896, 13816, 17865, 40433, 42503, 27740, 14980, 22323, 27920, 48381, 40456, 58866, 60412, 36991, 30730, 14601, 31475, 50583, 57144, 18332, 46140, 47181, 62996, 19071, 30753, 55953, 62831, 8814, 44566, 2191, 16703, 36414, 55831, 28696, 43850] # #samples = op.operation.numToBinary(arr) # besar_segmen = 2 # threshold = 200 # ########### DECODING ###############
mit
drandykass/fatiando
gallery/gravmag/eqlayer_transform.py
6
3046
""" Equivalent layer for griding and upward-continuing gravity data ------------------------------------------------------------------------- The equivalent layer is one of the best methods for griding and upward continuing gravity data and much more. The trade-off is that performing this requires an inversion and later forward modeling, which are more time consuming and more difficult to tune than the standard griding and FFT-based approaches. This example uses the equivalent layer in :mod:`fatiando.gravmag.eqlayer` to grid and upward continue some gravity data. There are more advanced methods in the module than the one we are showing here. They can be more efficient but usually require more configuration. """ from __future__ import division, print_function import matplotlib.pyplot as plt from fatiando.gravmag import prism, sphere from fatiando.gravmag.eqlayer import EQLGravity from fatiando.inversion import Damping from fatiando import gridder, utils, mesher # First thing to do is make some synthetic data to test the method. We'll use a # single prism to keep it simple props = {'density': 500} model = [mesher.Prism(-5000, 5000, -200, 200, 100, 4000, props)] # The synthetic data will be generated on a random scatter of points area = [-8000, 8000, -5000, 5000] x, y, z = gridder.scatter(area, 300, z=0, seed=42) # Generate some noisy data from our model gz = utils.contaminate(prism.gz(x, y, z, model), 0.2, seed=0) # Now for the equivalent layer. We must setup a layer of point masses where # we'll estimate a density distribution that fits our synthetic data layer = mesher.PointGrid(area, 500, (20, 20)) # Estimate the density using enough damping so that won't try to fit the error eql = EQLGravity(x, y, z, gz, layer) + 1e-22*Damping(layer.size) eql.fit() # Now we add the estimated densities to our layer layer.addprop('density', eql.estimate_) # and print some statistics of how well the estimated layer fits the data residuals = eql[0].residuals() print("Residuals:") print(" mean:", residuals.mean(), 'mGal') print(" stddev:", residuals.std(), 'mGal') # Now I can forward model gravity data anywhere we want. For interpolation, we # calculate it on a grid. For upward continuation, at a greater height. We can # even combine both into a single operation. x2, y2, z2 = gridder.regular(area, (50, 50), z=-1000) gz_up = sphere.gz(x2, y2, z2, layer) fig, axes = plt.subplots(1, 2, figsize=(8, 6)) ax = axes[0] ax.set_title('Original data') ax.set_aspect('equal') tmp = ax.tricontourf(y/1000, x/1000, gz, 30, cmap='viridis') fig.colorbar(tmp, ax=ax, pad=0.1, aspect=30, orientation='horizontal').set_label('mGal') ax.plot(y/1000, x/1000, 'xk') ax.set_xlabel('y (km)') ax.set_ylabel('x (km)') ax = axes[1] ax.set_title('Gridded and upward continued') ax.set_aspect('equal') tmp = ax.tricontourf(y2/1000, x2/1000, gz_up, 30, cmap='viridis') fig.colorbar(tmp, ax=ax, pad=0.1, aspect=30, orientation='horizontal').set_label('mGal') ax.set_xlabel('y (km)') plt.tight_layout() plt.show()
bsd-3-clause