repo_name
stringlengths
7
92
path
stringlengths
5
129
copies
stringclasses
201 values
size
stringlengths
4
6
content
stringlengths
1.03k
375k
license
stringclasses
15 values
gsmaxwell/phase_offset_rx
gnuradio-core/src/examples/pfb/fmtest.py
17
7785
#!/usr/bin/env python # # Copyright 2009 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, blks2 import sys, math, time try: import scipy from scipy import fftpack except ImportError: print "Error: Program requires scipy (see: www.scipy.org)." sys.exit(1) try: import pylab except ImportError: print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)." sys.exit(1) class fmtx(gr.hier_block2): def __init__(self, lo_freq, audio_rate, if_rate): gr.hier_block2.__init__(self, "build_fm", gr.io_signature(1, 1, gr.sizeof_float), # Input signature gr.io_signature(1, 1, gr.sizeof_gr_complex)) # Output signature fmtx = blks2.nbfm_tx (audio_rate, if_rate, max_dev=5e3, tau=75e-6) # Local oscillator lo = gr.sig_source_c (if_rate, # sample rate gr.GR_SIN_WAVE, # waveform type lo_freq, #frequency 1.0, # amplitude 0) # DC Offset mixer = gr.multiply_cc () self.connect (self, fmtx, (mixer, 0)) self.connect (lo, (mixer, 1)) self.connect (mixer, self) class fmtest(gr.top_block): def __init__(self): gr.top_block.__init__(self) self._nsamples = 1000000 self._audio_rate = 8000 # Set up N channels with their own baseband and IF frequencies self._N = 5 chspacing = 16000 freq = [10, 20, 30, 40, 50] f_lo = [0, 1*chspacing, -1*chspacing, 2*chspacing, -2*chspacing] self._if_rate = 4*self._N*self._audio_rate # Create a signal source and frequency modulate it self.sum = gr.add_cc () for n in xrange(self._N): sig = gr.sig_source_f(self._audio_rate, gr.GR_SIN_WAVE, freq[n], 0.5) fm = fmtx(f_lo[n], self._audio_rate, self._if_rate) self.connect(sig, fm) self.connect(fm, (self.sum, n)) self.head = gr.head(gr.sizeof_gr_complex, self._nsamples) self.snk_tx = gr.vector_sink_c() self.channel = blks2.channel_model(0.1) self.connect(self.sum, self.head, self.channel, self.snk_tx) # Design the channlizer self._M = 10 bw = chspacing/2.0 t_bw = chspacing/10.0 self._chan_rate = self._if_rate / self._M self._taps = gr.firdes.low_pass_2(1, self._if_rate, bw, t_bw, attenuation_dB=100, window=gr.firdes.WIN_BLACKMAN_hARRIS) tpc = math.ceil(float(len(self._taps)) / float(self._M)) print "Number of taps: ", len(self._taps) print "Number of channels: ", self._M print "Taps per channel: ", tpc self.pfb = blks2.pfb_channelizer_ccf(self._M, self._taps) self.connect(self.channel, self.pfb) # Create a file sink for each of M output channels of the filter and connect it self.fmdet = list() self.squelch = list() self.snks = list() for i in xrange(self._M): self.fmdet.append(blks2.nbfm_rx(self._audio_rate, self._chan_rate)) self.squelch.append(blks2.standard_squelch(self._audio_rate*10)) self.snks.append(gr.vector_sink_f()) self.connect((self.pfb, i), self.fmdet[i], self.squelch[i], self.snks[i]) def num_tx_channels(self): return self._N def num_rx_channels(self): return self._M def main(): fm = fmtest() tstart = time.time() fm.run() tend = time.time() if 1: fig1 = pylab.figure(1, figsize=(12,10), facecolor="w") fig2 = pylab.figure(2, figsize=(12,10), facecolor="w") fig3 = pylab.figure(3, figsize=(12,10), facecolor="w") Ns = 10000 Ne = 100000 fftlen = 8192 winfunc = scipy.blackman # Plot transmitted signal fs = fm._if_rate d = fm.snk_tx.data()[Ns:Ns+Ne] sp1_f = fig1.add_subplot(2, 1, 1) X,freq = sp1_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs, window = lambda d: d*winfunc(fftlen), visible=False) X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X))) f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size)) p1_f = sp1_f.plot(f_in, X_in, "b") sp1_f.set_xlim([min(f_in), max(f_in)+1]) sp1_f.set_ylim([-120.0, 20.0]) sp1_f.set_title("Input Signal", weight="bold") sp1_f.set_xlabel("Frequency (Hz)") sp1_f.set_ylabel("Power (dBW)") Ts = 1.0/fs Tmax = len(d)*Ts t_in = scipy.arange(0, Tmax, Ts) x_in = scipy.array(d) sp1_t = fig1.add_subplot(2, 1, 2) p1_t = sp1_t.plot(t_in, x_in.real, "b-o") #p1_t = sp1_t.plot(t_in, x_in.imag, "r-o") sp1_t.set_ylim([-5, 5]) # Set up the number of rows and columns for plotting the subfigures Ncols = int(scipy.floor(scipy.sqrt(fm.num_rx_channels()))) Nrows = int(scipy.floor(fm.num_rx_channels() / Ncols)) if(fm.num_rx_channels() % Ncols != 0): Nrows += 1 # Plot each of the channels outputs. Frequencies on Figure 2 and # time signals on Figure 3 fs_o = fm._audio_rate for i in xrange(len(fm.snks)): # remove issues with the transients at the beginning # also remove some corruption at the end of the stream # this is a bug, probably due to the corner cases d = fm.snks[i].data()[Ns:Ne] sp2_f = fig2.add_subplot(Nrows, Ncols, 1+i) X,freq = sp2_f.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o, window = lambda d: d*winfunc(fftlen), visible=False) #X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X))) X_o = 10.0*scipy.log10(abs(X)) #f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size)) f_o = scipy.arange(0, fs_o/2.0, fs_o/2.0/float(X_o.size)) p2_f = sp2_f.plot(f_o, X_o, "b") sp2_f.set_xlim([min(f_o), max(f_o)+0.1]) sp2_f.set_ylim([-120.0, 20.0]) sp2_f.grid(True) sp2_f.set_title(("Channel %d" % i), weight="bold") sp2_f.set_xlabel("Frequency (kHz)") sp2_f.set_ylabel("Power (dBW)") Ts = 1.0/fs_o Tmax = len(d)*Ts t_o = scipy.arange(0, Tmax, Ts) x_t = scipy.array(d) sp2_t = fig3.add_subplot(Nrows, Ncols, 1+i) p2_t = sp2_t.plot(t_o, x_t.real, "b") p2_t = sp2_t.plot(t_o, x_t.imag, "r") sp2_t.set_xlim([min(t_o), max(t_o)+1]) sp2_t.set_ylim([-1, 1]) sp2_t.set_xlabel("Time (s)") sp2_t.set_ylabel("Amplitude") pylab.show() if __name__ == "__main__": main()
gpl-3.0
smblance/ggplot
ggplot/tests/__init__.py
8
10135
from __future__ import (absolute_import, division, print_function, unicode_literals) import matplotlib as mpl import matplotlib.pyplot as plt from nose.tools import with_setup, make_decorator, assert_true import warnings figsize_orig = mpl.rcParams["figure.figsize"] def setup_package(): mpl.rcParams["figure.figsize"] = (11.0, 8.0) def teardown_package(): mpl.rcParams["figure.figsize"] = figsize_orig import os # Testing framework shamelessly stolen from matplotlib... # Tests which should be run with 'python tests.py' or via 'must be # included here. default_test_modules = [ 'ggplot.tests.test_basic', 'ggplot.tests.test_readme_examples', 'ggplot.tests.test_ggplot_internals', 'ggplot.tests.test_geom', 'ggplot.tests.test_stat', 'ggplot.tests.test_stat_calculate_methods', 'ggplot.tests.test_stat_summary', 'ggplot.tests.test_geom_rect', 'ggplot.tests.test_geom_dotplot', 'ggplot.tests.test_geom_bar', 'ggplot.tests.test_qplot', 'ggplot.tests.test_geom_lines', 'ggplot.tests.test_geom_linerange', 'ggplot.tests.test_geom_pointrange', 'ggplot.tests.test_faceting', 'ggplot.tests.test_stat_function', 'ggplot.tests.test_scale_facet_wrap', 'ggplot.tests.test_scale_log', 'ggplot.tests.test_reverse', 'ggplot.tests.test_ggsave', 'ggplot.tests.test_theme_mpl', 'ggplot.tests.test_colors', 'ggplot.tests.test_chart_components', 'ggplot.tests.test_legend', 'ggplot.tests.test_element_target', 'ggplot.tests.test_element_text', 'ggplot.tests.test_theme', 'ggplot.tests.test_theme_bw', 'ggplot.tests.test_theme_gray', 'ggplot.tests.test_theme_mpl', 'ggplot.tests.test_theme_seaborn' ] _multiprocess_can_split_ = True # Check that the test directories exist if not os.path.exists(os.path.join( os.path.dirname(__file__), 'baseline_images')): raise IOError( 'The baseline image directory does not exist. ' 'This is most likely because the test data is not installed. ' 'You may need to install ggplot from source to get the ' 'test data.') def _assert_same_ggplot_image(gg, name, test_file, tol=17): """Asserts that the ggplot object produces the right image""" fig = gg.draw() return _assert_same_figure_images(fig, name, test_file, tol=tol) class ImagesComparisonFailure(Exception): pass def _assert_same_figure_images(fig, name, test_file, tol=17): """Asserts that the figure object produces the right image""" import os import shutil from matplotlib import cbook from matplotlib.testing.compare import compare_images from nose.tools import assert_is_not_none if not ".png" in name: name = name+".png" basedir = os.path.abspath(os.path.dirname(test_file)) basename = os.path.basename(test_file) subdir = os.path.splitext(basename)[0] baseline_dir = os.path.join(basedir, 'baseline_images', subdir) result_dir = os.path.abspath(os.path.join('result_images', subdir)) if not os.path.exists(result_dir): cbook.mkdirs(result_dir) orig_expected_fname = os.path.join(baseline_dir, name) actual_fname = os.path.join(result_dir, name) def make_test_fn(fname, purpose): base, ext = os.path.splitext(fname) return '%s-%s%s' % (base, purpose, ext) expected_fname = make_test_fn(actual_fname, 'expected') # Save the figure before testing whether the original image # actually exists. This make creating new tests much easier, # as the result image can afterwards just be copied. fig.savefig(actual_fname) if os.path.exists(orig_expected_fname): shutil.copyfile(orig_expected_fname, expected_fname) else: raise Exception("Baseline image %s is missing" % orig_expected_fname) err = compare_images(expected_fname, actual_fname, tol, in_decorator=True) if err: msg = 'images not close: {actual:s} vs. {expected:s} (RMS {rms:.2f})'.format(**err) raise ImagesComparisonFailure(msg) return err def get_assert_same_ggplot(test_file): """Returns a "assert_same_ggplot" function for these test file call it like `assert_same_ggplot = get_assert_same_ggplot(__file__)` """ def curried(*args, **kwargs): kwargs["test_file"] = test_file return _assert_same_ggplot_image(*args, **kwargs) curried.__doc__ = _assert_same_ggplot_image.__doc__ return curried def assert_same_elements(first,second, msg=None): assert_true(len(first) == len(second), "different length") assert_true(all([a==b for a,b in zip(first,second)]), "Unequal: %s vs %s" % (first, second)) def image_comparison(baseline_images=None, tol=17, extensions=None): """ call signature:: image_comparison(baseline_images=['my_figure'], tol=17) Compare images generated by the test with those specified in *baseline_images*, which must correspond else an ImagesComparisonFailure exception will be raised. Keyword arguments: *baseline_images*: list A list of strings specifying the names of the images generated by calls to :meth:`matplotlib.figure.savefig`. *tol*: (default 13) The RMS threshold above which the test is considered failed. """ if baseline_images is None: raise ValueError('baseline_images must be specified') if extensions: # ignored, only for compatibility with matplotlibs decorator! pass def compare_images_decorator(func): import inspect _file = inspect.getfile(func) def decorated(): # make sure we don't carry over bad images from former tests. assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \ "python tests.py -v' and add a '@cleanup' decorator." % \ str(plt.get_fignums()) func() assert len(plt.get_fignums()) == len(baseline_images), "different number of " \ "baseline_images and actuall " \ "plots." for fignum, baseline in zip(plt.get_fignums(), baseline_images): figure = plt.figure(fignum) _assert_same_figure_images(figure, baseline, _file, tol=tol) # also use the cleanup decorator to close any open figures! return make_decorator(cleanup(func))(decorated) return compare_images_decorator def cleanup(func): """Decorator to add cleanup to the testing function @cleanup def test_something(): " ... " Note that `@cleanup` is useful *only* for test functions, not for test methods or inside of TestCase subclasses. """ def _teardown(): plt.close('all') warnings.resetwarnings() #reset any warning filters set in tests return with_setup(setup=_setup, teardown=_teardown)(func) # This is called from the cleanup decorator def _setup(): # The baseline images are created in this locale, so we should use # it during all of the tests. import locale import warnings from matplotlib.backends import backend_agg, backend_pdf, backend_svg try: locale.setlocale(locale.LC_ALL, str('en_US.UTF-8')) except locale.Error: try: locale.setlocale(locale.LC_ALL, str('English_United States.1252')) except locale.Error: warnings.warn( "Could not set locale to English/United States. " "Some date-related tests may fail") mpl.use('Agg', warn=False) # use Agg backend for these tests if mpl.get_backend().lower() != "agg" and mpl.get_backend().lower() != "qt4agg": raise Exception(("Using a wrong matplotlib backend ({0}), which will not produce proper " "images").format(mpl.get_backend())) # These settings *must* be hardcoded for running the comparison # tests mpl.rcdefaults() # Start with all defaults mpl.rcParams['text.hinting'] = True mpl.rcParams['text.antialiased'] = True #mpl.rcParams['text.hinting_factor'] = 8 # Clear the font caches. Otherwise, the hinting mode can travel # from one test to another. backend_agg.RendererAgg._fontd.clear() backend_pdf.RendererPdf.truetype_font_cache.clear() backend_svg.RendererSVG.fontd.clear() # make sure we don't carry over bad plots from former tests assert len(plt.get_fignums()) == 0, "no of open figs: %s -> find the last test with ' " \ "python tests.py -v' and add a '@cleanup' decorator." % \ str(plt.get_fignums()) # This is here to run it like "from ggplot.tests import test; test()" def test(verbosity=1): """run the ggplot test suite""" old_backend = mpl.rcParams['backend'] try: mpl.use('agg') import nose import nose.plugins.builtin from matplotlib.testing.noseclasses import KnownFailure from nose.plugins.manager import PluginManager from nose.plugins import multiprocess # store the old values before overriding plugins = [] plugins.append( KnownFailure() ) plugins.extend( [plugin() for plugin in nose.plugins.builtin.plugins] ) manager = PluginManager(plugins=plugins) config = nose.config.Config(verbosity=verbosity, plugins=manager) # Nose doesn't automatically instantiate all of the plugins in the # child processes, so we have to provide the multiprocess plugin with # a list. multiprocess._instantiate_plugins = [KnownFailure] success = nose.run( defaultTest=default_test_modules, config=config, ) finally: if old_backend.lower() != 'agg': mpl.use(old_backend) return success test.__test__ = False # nose: this function is not a test
bsd-2-clause
will-iam/Variant
script/process/ergodicity_scaling.py
1
4083
#!/usr/bin/python3 # -*- coding:utf-8 -*- import __future__ import parser import sys import matplotlib.pyplot as plt #plt.style.use('ggplot') import numpy as np import operator from collections import * caseSize = (8192, 8192) if parser.args.res: maxAvailableNode = parser.args.res else: maxAvailableNode = 8 sizeDataDict = [] for p in range(0, int(np.log2(maxAvailableNode)) + 1): filterDict = {'nSizeX' : caseSize[0], 'nSizeY' : caseSize[1], 'R' : 64 * 2**p} print filterDict data = parser.getData(filterDict) if len(data): sizeDataDict.append(data) if len(sizeDataDict) == 0: print("No data found.") sys.exit(1) loopTimeDict = dict() for data in sizeDataDict: for key, value in data.items(): keyDict = parser.extractKey(key) Nt = keyDict['Nt'] R = keyDict['R'] if keyDict['Ny'] != caseSize[0] or keyDict['Nx'] != caseSize[1]: print("Error in collected data") sys.exit(1) for run in value: nSDD = run['point'][0] * run['point'][1] # On several nodes, select only pure SDD, which is the best result. if R > 64 and nSDD < R: continue # Don't remove HyperThreading. # We assume that hyperthreading with SDD leads to same results as with SDS. #if R > 64 and nSDD == R and Nt > 1.0: # continue # On a single node, select only pure SDS if R == 64 and nSDD > 1: continue loopT = run['loopTime'] * caseSize[0] * caseSize[1] * keyDict['Ni'] / 1000. if R not in loopTimeDict.keys(): loopTimeDict[R] = list() loopTimeDict[R].append(loopT) # And now, we must plot that fig = plt.figure(0, figsize=(9, 6)) ax = fig.add_subplot(111) #ax = fig.add_subplot(211) #ax.set_xscale('log', basex=2) #ax.set_yscale('log') maxSimulationNumber = 42 xArray = range(1, maxSimulationNumber + 1) ''' #Perfect Scale loopTimeDict[128] = [k / 2. for k in loopTimeDict[64]] loopTimeDict[256] = [k / 4. for k in loopTimeDict[64]] loopTimeDict[512] = [k / 8. for k in loopTimeDict[64]] ''' for r in sorted(loopTimeDict): nodeNeeded = r // 64 minT = np.min(loopTimeDict[r]) print("Min Time %s node(s) = %s" % (nodeNeeded, minT)) totalTimeArray = np.zeros(maxSimulationNumber) for i in xArray: totalTimeArray[i-1] = minT * (1 + (i * nodeNeeded - 1) // maxAvailableNode) ax.plot(xArray, totalTimeArray, '-', label="Batch Size %s" % (r // 64)) parser.outputCurve("ergodicity_scaling-%s.dat" % (r//64), xArray, totalTimeArray) ''' minSize = int(np.sqrt(np.min(syncTimeDict.keys()))) maxSize = int(np.sqrt(np.max(syncTimeDict.keys()))) nodeNumber = (caseSize[0] * caseSize[1] / (maxSize * maxSize)) ''' plt.title('%sx%s batch time with %s node(s) available at the same time.' % (caseSize[0], caseSize[1], maxAvailableNode)) plt.xlabel('Total number of simulation to run') plt.ylabel('Loop Time') plt.legend() ''' bx = fig.add_subplot(212) bx.set_xscale('log', basex=2) bx.plot(sorted(sdsWeakDict), [np.min(v) for k, v in sorted(sdsWeakDict.items(), key=operator.itemgetter(0))], 'g+-', label="SDS scaling") bx.plot(sorted(sddWeakDict), [np.min(v) for k, v in sorted(sddWeakDict.items())], 'b+-', label="SDD scaling") #bx.plot(sorted(hybridWeakDict), [np.min(v) for k, v in sorted(hybridWeakDict.items())], 'y+-', label="Hybrid scaling") bx.plot(sorted(sddWeakDict), [firstValueSDD for k in sorted(sddWeakDict.keys())], 'b--', label="SDD ideal") bx.plot(sorted(sdsWeakDict), [firstValueSDS for k in sorted(sdsWeakDict.keys())], 'g--', label="SDS ideal") for k in sdsWeakDict: bx.plot(np.full(len(sdsWeakDict[k]), k), sdsWeakDict[k], 'g+') for k in sddWeakDict: bx.plot(np.full(len(sddWeakDict[k]), k), sddWeakDict[k], 'b+') plt.title('Weak Scaling from %sx%s to %sx%s' % (initSize, initSize, initSize * 2**((maxPower-1) / 2), initSize * 2**((maxPower-1) / 2)) ) plt.xlabel('Core(s)') plt.ylabel('Loop Time / iteration') plt.legend() ''' plt.show()
mit
pv/scikit-learn
examples/neighbors/plot_nearest_centroid.py
264
1804
""" =============================== Nearest Centroid Classification =============================== Sample usage of Nearest Centroid classification. It will plot the decision boundaries for each class. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn import datasets from sklearn.neighbors import NearestCentroid n_neighbors = 15 # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # Create color maps cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) for shrinkage in [None, 0.1]: # we create an instance of Neighbours Classifier and fit the data. clf = NearestCentroid(shrink_threshold=shrinkage) clf.fit(X, y) y_pred = clf.predict(X) print(shrinkage, np.mean(y == y_pred)) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) plt.title("3-Class classification (shrink_threshold=%r)" % shrinkage) plt.axis('tight') plt.show()
bsd-3-clause
xavierwu/scikit-learn
examples/linear_model/plot_ransac.py
250
1673
""" =========================================== Robust linear model estimation using RANSAC =========================================== In this example we see how to robustly fit a linear model to faulty data using the RANSAC algorithm. """ import numpy as np from matplotlib import pyplot as plt from sklearn import linear_model, datasets n_samples = 1000 n_outliers = 50 X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1, n_informative=1, noise=10, coef=True, random_state=0) # Add outlier data np.random.seed(0) X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1)) y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers) # Fit line using all data model = linear_model.LinearRegression() model.fit(X, y) # Robustly fit linear model with RANSAC algorithm model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression()) model_ransac.fit(X, y) inlier_mask = model_ransac.inlier_mask_ outlier_mask = np.logical_not(inlier_mask) # Predict data of estimated models line_X = np.arange(-5, 5) line_y = model.predict(line_X[:, np.newaxis]) line_y_ransac = model_ransac.predict(line_X[:, np.newaxis]) # Compare estimated coefficients print("Estimated coefficients (true, normal, RANSAC):") print(coef, model.coef_, model_ransac.estimator_.coef_) plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers') plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers') plt.plot(line_X, line_y, '-k', label='Linear regressor') plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor') plt.legend(loc='lower right') plt.show()
bsd-3-clause
giorgiop/scikit-learn
examples/ensemble/plot_adaboost_twoclass.py
347
3268
""" ================== Two-class AdaBoost ================== This example fits an AdaBoosted decision stump on a non-linearly separable classification dataset composed of two "Gaussian quantiles" clusters (see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision boundary and decision scores. The distributions of decision scores are shown separately for samples of class A and B. The predicted class label for each sample is determined by the sign of the decision score. Samples with decision scores greater than zero are classified as B, and are otherwise classified as A. The magnitude of a decision score determines the degree of likeness with the predicted class label. Additionally, a new dataset could be constructed containing a desired purity of class B, for example, by only selecting samples with a decision score above some value. """ print(__doc__) # Author: Noel Dawe <noel.dawe@gmail.com> # # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier from sklearn.datasets import make_gaussian_quantiles # Construct dataset X1, y1 = make_gaussian_quantiles(cov=2., n_samples=200, n_features=2, n_classes=2, random_state=1) X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5, n_samples=300, n_features=2, n_classes=2, random_state=1) X = np.concatenate((X1, X2)) y = np.concatenate((y1, - y2 + 1)) # Create and fit an AdaBoosted decision tree bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=200) bdt.fit(X, y) plot_colors = "br" plot_step = 0.02 class_names = "AB" plt.figure(figsize=(10, 5)) # Plot the decision boundaries plt.subplot(121) x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step), np.arange(y_min, y_max, plot_step)) Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired) plt.axis("tight") # Plot the training points for i, n, c in zip(range(2), class_names, plot_colors): idx = np.where(y == i) plt.scatter(X[idx, 0], X[idx, 1], c=c, cmap=plt.cm.Paired, label="Class %s" % n) plt.xlim(x_min, x_max) plt.ylim(y_min, y_max) plt.legend(loc='upper right') plt.xlabel('x') plt.ylabel('y') plt.title('Decision Boundary') # Plot the two-class decision scores twoclass_output = bdt.decision_function(X) plot_range = (twoclass_output.min(), twoclass_output.max()) plt.subplot(122) for i, n, c in zip(range(2), class_names, plot_colors): plt.hist(twoclass_output[y == i], bins=10, range=plot_range, facecolor=c, label='Class %s' % n, alpha=.5) x1, x2, y1, y2 = plt.axis() plt.axis((x1, x2, y1, y2 * 1.2)) plt.legend(loc='upper right') plt.ylabel('Samples') plt.xlabel('Score') plt.title('Decision Scores') plt.tight_layout() plt.subplots_adjust(wspace=0.35) plt.show()
bsd-3-clause
sinkpoint/dipy
scratch/very_scratch/simulation_comparisons_modified.py
20
13117
import nibabel import os import numpy as np import dipy as dp import dipy.core.generalized_q_sampling as dgqs import dipy.io.pickles as pkl import scipy as sp from matplotlib.mlab import find import dipy.core.sphere_plots as splots import dipy.core.sphere_stats as sphats import dipy.core.geometry as geometry import get_vertices as gv #old SimData files ''' results_SNR030_1fibre results_SNR030_1fibre+iso results_SNR030_2fibres_15deg results_SNR030_2fibres_30deg results_SNR030_2fibres_60deg results_SNR030_2fibres_90deg results_SNR030_2fibres+iso_15deg results_SNR030_2fibres+iso_30deg results_SNR030_2fibres+iso_60deg results_SNR030_2fibres+iso_90deg results_SNR030_isotropic ''' #fname='/home/ian/Data/SimData/results_SNR030_1fibre' ''' file has one row for every voxel, every voxel is repeating 1000 times with the same noise level , then we have 100 different directions. 1000 * 100 is the number of all rows. The 100 conditions are given by 10 polar angles (in degrees) 0, 20, 40, 60, 80, 80, 60, 40, 20 and 0, and each of these with longitude angle 0, 40, 80, 120, 160, 200, 240, 280, 320, 360. ''' #new complete SimVoxels files simdata = ['fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_40_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_20_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_40_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_80_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_20_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_100_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_20_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_80_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_80_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_1_SNR_60_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_60_angle_30_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_60_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_80_angle_15_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_1_SNR_40_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00', 'fibres_2_SNR_40_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7', 'fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00'] simdir = '/home/ian/Data/SimVoxels/' def gq_tn_calc_save(): for simfile in simdata: dataname = simfile print dataname sim_data=np.loadtxt(simdir+dataname) marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt' b_vals_dirs=np.loadtxt(marta_table_fname) bvals=b_vals_dirs[:,0]*1000 gradients=b_vals_dirs[:,1:] gq = dp.GeneralizedQSampling(sim_data,bvals,gradients) gqfile = simdir+'gq/'+dataname+'.pkl' pkl.save_pickle(gqfile,gq) ''' gq.IN gq.__doc__ gq.glob_norm_param gq.QA gq.__init__ gq.odf gq.__class__ gq.__module__ gq.q2odf_params ''' tn = dp.Tensor(sim_data,bvals,gradients) tnfile = simdir+'tn/'+dataname+'.pkl' pkl.save_pickle(tnfile,tn) ''' tn.ADC tn.__init__ tn._getevals tn.B tn.__module__ tn._getevecs tn.D tn.__new__ tn._getndim tn.FA tn.__reduce__ tn._getshape tn.IN tn.__reduce_ex__ tn._setevals tn.MD tn.__repr__ tn._setevecs tn.__class__ tn.__setattr__ tn.adc tn.__delattr__ tn.__sizeof__ tn.evals tn.__dict__ tn.__str__ tn.evecs tn.__doc__ tn.__subclasshook__ tn.fa tn.__format__ tn.__weakref__ tn.md tn.__getattribute__ tn._evals tn.ndim tn.__getitem__ tn._evecs tn.shape tn.__hash__ tn._getD ''' ''' file has one row for every voxel, every voxel is repeating 1000 times with the same noise level , then we have 100 different directions. 100 * 1000 is the number of all rows. At the moment this module is hardwired to the use of the EDS362 spherical mesh. I am assumung (needs testing) that directions 181 to 361 are the antipodal partners of directions 0 to 180. So when counting the number of different vertices that occur as maximal directions we wll map the indices modulo 181. ''' def analyze_maxima(indices, max_dirs, subsets): '''This calculates the eigenstats for each of the replicated batches of the simulation data ''' results = [] for direction in subsets: batch = max_dirs[direction,:,:] index_variety = np.array([len(set(np.remainder(indices[direction,:],181)))]) #normed_centroid, polar_centroid, centre, b1 = sphats.eigenstats(batch) centre, b1 = sphats.eigenstats(batch) # make azimuth be in range (0,360) rather than (-180,180) centre[1] += 360*(centre[1] < 0) #results.append(np.concatenate((normed_centroid, polar_centroid, centre, b1, index_variety))) results.append(np.concatenate((centre, b1, index_variety))) return results #dt_first_directions = tn.evecs[:,:,0].reshape((100,1000,3)) # these are the principal directions for the full set of simulations #gq_tn_calc_save() eds=np.load(os.path.join(os.path.dirname(dp.__file__),'core','matrices','evenly_distributed_sphere_362.npz')) odf_vertices=eds['vertices'] def run_comparisons(sample_data=35): for simfile in [simdata[sample_data]]: dataname = simfile print dataname sim_data=np.loadtxt(simdir+dataname) gqfile = simdir+'gq/'+dataname+'.pkl' gq = pkl.load_pickle(gqfile) tnfile = simdir+'tn/'+dataname+'.pkl' tn = pkl.load_pickle(tnfile) dt_first_directions_in=odf_vertices[tn.IN] dt_indices = tn.IN.reshape((100,1000)) dt_results = analyze_maxima(dt_indices, dt_first_directions_in.reshape((100,1000,3)),range(10,90)) gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000)) gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')] #print gq_first_directions_in.shape gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(10,90)) #for gqi see example dicoms_2_tracks gq.IN[:,0] np.set_printoptions(precision=3, suppress=True, linewidth=200, threshold=5000) out = open('/home/ian/Data/SimVoxels/Out/'+'***_'+dataname,'w') #print np.vstack(dt_results).shape, np.vstack(gq_results).shape results = np.hstack((np.vstack(dt_results), np.vstack(gq_results))) #print results.shape #results = np.vstack(dt_results) print >> out, results[:,:] out.close() #up = dt_batch[:,2]>= 0 #splots.plot_sphere(dt_batch[up], 'batch '+str(direction)) #splots.plot_lambert(dt_batch[up],'batch '+str(direction), centre) #spread = gq.q2odf_params e,v = np.linalg.eigh(np.dot(spread,spread.transpose())) effective_dimension = len(find(np.cumsum(e) > 0.05*np.sum(e))) #95% #rotated = np.dot(dt_batch,evecs) #rot_evals, rot_evecs = np.linalg.eig(np.dot(rotated.T,rotated)/rotated.shape[0]) #eval_order = np.argsort(rot_evals) #rotated = rotated[:,eval_order] #up = rotated[:,2]>= 0 #splot.plot_sphere(rotated[up],'first1000') #splot.plot_lambert(rotated[up],'batch '+str(direction)) def run_gq_sims(sample_data=[35,23,46,39,40,10,37,27,21,20]): results = [] out = open('/home/ian/Data/SimVoxels/Out/'+'npa+fa','w') for j in range(len(sample_data)): sample = sample_data[j] simfile = simdata[sample] dataname = simfile print dataname sim_data=np.loadtxt(simdir+dataname) marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt' b_vals_dirs=np.loadtxt(marta_table_fname) bvals=b_vals_dirs[:,0]*1000 gradients=b_vals_dirs[:,1:] for j in np.vstack((np.arange(100)*1000,np.arange(100)*1000+1)).T.ravel(): # 0,1,1000,1001,2000,2001,... s = sim_data[j,:] gqs = dp.GeneralizedQSampling(s.reshape((1,102)),bvals,gradients,Lambda=3.5) tn = dp.Tensor(s.reshape((1,102)),bvals,gradients,fit_method='LS') t0, t1, t2, npa = gqs.npa(s, width = 5) print >> out, dataname, j, npa, tn.fa()[0] ''' for (i,o) in enumerate(gqs.odf(s)): print i,o for (i,o) in enumerate(gqs.odf_vertices): print i,o ''' #o = gqs.odf(s) #v = gqs.odf_vertices #pole = v[t0[0]] #eqv = dgqs.equatorial_zone_vertices(v, pole, 5) #print 'Number of equatorial vertices: ', len(eqv) #print np.max(o[eqv]),np.min(o[eqv]) #cos_e_pole = [np.dot(pole.T, v[i]) for i in eqv] #print np.min(cos1), np.max(cos1) #print 'equatorial max in equatorial vertices:', t1[0] in eqv #x = np.cross(v[t0[0]],v[t1[0]]) #x = x/np.sqrt(np.sum(x**2)) #print x #ptchv = dgqs.patch_vertices(v, x, 5) #print len(ptchv) #eqp = eqv[np.argmin([np.abs(np.dot(v[t1[0]].T,v[p])) for p in eqv])] #print (eqp, o[eqp]) #print t2[0] in ptchv, t2[0] in eqv #print np.dot(pole.T, v[t1[0]]), np.dot(pole.T, v[t2[0]]) #print ptchv[np.argmin([o[v] for v in ptchv])] #gq_indices = np.array(gq.IN[:,0],dtype='int').reshape((100,1000)) #gq_first_directions_in=odf_vertices[np.array(gq.IN[:,0],dtype='int')] #print gq_first_directions_in.shape #gq_results = analyze_maxima(gq_indices, gq_first_directions_in.reshape((100,1000,3)),range(100)) #for gqi see example dicoms_2_tracks gq.IN[:,0] #np.set_printoptions(precision=6, suppress=True, linewidth=200, threshold=5000) #out = open('/home/ian/Data/SimVoxels/Out/'+'+++_'+dataname,'w') #results = np.hstack((np.vstack(dt_results), np.vstack(gq_results))) #results = np.vstack(dt_results) #print >> out, results[:,:] out.close() run_comparisons() #run_gq_sims()
bsd-3-clause
NunoEdgarGub1/scikit-learn
examples/classification/plot_digits_classification.py
289
2397
""" ================================ Recognizing hand-written digits ================================ An example showing how the scikit-learn can be used to recognize images of hand-written digits. This example is commented in the :ref:`tutorial section of the user manual <introduction>`. """ print(__doc__) # Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org> # License: BSD 3 clause # Standard scientific Python imports import matplotlib.pyplot as plt # Import datasets, classifiers and performance metrics from sklearn import datasets, svm, metrics # The digits dataset digits = datasets.load_digits() # The data that we are interested in is made of 8x8 images of digits, let's # have a look at the first 3 images, stored in the `images` attribute of the # dataset. If we were working from image files, we could load them using # pylab.imread. Note that each image must have the same size. For these # images, we know which digit they represent: it is given in the 'target' of # the dataset. images_and_labels = list(zip(digits.images, digits.target)) for index, (image, label) in enumerate(images_and_labels[:4]): plt.subplot(2, 4, index + 1) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Training: %i' % label) # To apply a classifier on this data, we need to flatten the image, to # turn the data in a (samples, feature) matrix: n_samples = len(digits.images) data = digits.images.reshape((n_samples, -1)) # Create a classifier: a support vector classifier classifier = svm.SVC(gamma=0.001) # We learn the digits on the first half of the digits classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2]) # Now predict the value of the digit on the second half: expected = digits.target[n_samples / 2:] predicted = classifier.predict(data[n_samples / 2:]) print("Classification report for classifier %s:\n%s\n" % (classifier, metrics.classification_report(expected, predicted))) print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted)) images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted)) for index, (image, prediction) in enumerate(images_and_predictions[:4]): plt.subplot(2, 4, index + 5) plt.axis('off') plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest') plt.title('Prediction: %i' % prediction) plt.show()
bsd-3-clause
abalckin/cwavenet
examples/WNvsPWN/show_snr.py
2
2454
#! /usr/bin/python3 import pylab as plb import numpy as np from matplotlib import rc rc('text', usetex=True) rc('text.latex', unicode=True) rc('text.latex', preamble=r'\usepackage[russian]{babel}') #rc('font',**{'family':'serif'}) rc('font',**{'size':'19'}) res = np.loadtxt('result.txt', delimiter=', ')[0:7] #import pdb; pdb.set_trace() #plb.barh(y_pos, performance, xerr=error, align='center', alpha=0.4) #plb.yscale('linear') plb.errorbar(res[:, 1], res[:, 5], yerr=res[:, 6], label='Традиционная вейвлет-сеть', linestyle='--', marker='*', color='black') plb.errorbar(res[:, 1], res[:, 11], yerr=res[:, 12], label='Полиморфная вейвлет-сеть', marker='o', color='green') plb.errorbar(res[:, 1], res[:, 1], yerr=res[:, 2], label='Отношение сигнал/шум для временного ряда $d(t), S$', color='blue') #import pdb; pdb.set_trace() plb.fill_between(res[:, 1], res[:, 1], res[:, 1]-np.max(res[:, 1]), res[:, 1], alpha=0.1, color='blue') plb.xscale('log') plb.legend(loc=0) plb.xlim(res[-1, 1]-0.1, res[0, 1]+20) plb.ylim(0, 670) plb.gca().set_xticks(res[:, 1]) #plb.gca().xaxis.set_major_locator(plb.LogLocator(numticks=50)) plb.gca().xaxis.set_major_formatter(plb.ScalarFormatter()) plb.ylabel('Отношение сигнал/шум для временного ряда $\hat{y}(t), M$') plb.xlabel('Отношение сигнал/шум для временного ряда $d(t), S$') plb.annotate('Область применения вейвлет-сетей', [7, 310]) plb.show() polym_higest=res[:, 11]>res[:, 1] polym_avg=res[polym_higest, 11][1:-2] std_higest=res[:, 5]>res[:, 1] std_avg=res[std_higest, 5][:-2] inp_avg=res[std_higest, 1][:-2] polym_min=res[polym_higest, 11][1:-2]-res[polym_higest, 12][1:-2] polym_max=res[polym_higest, 11][1:-2]+res[polym_higest, 12][1:-2] std_min=res[std_higest, 5][:-2]-res[std_higest, 6][:-2] std_max=res[std_higest, 5][:-2]+res[std_higest, 6][:-2] print('Улучшение в среднем на {}%'.format(np.average((polym_avg-std_avg)/std_avg*100))) print('Улучшение в по диапазону на {0}-{1}%'.format(np.average((polym_min-std_min)/std_min*100), np.average((polym_max-std_max)/std_max*100))) polym_avg_db=10*np.log10(polym_avg-inp_avg) std_avg_db=10*np.log10(std_avg-inp_avg) print('Улучшение в среднем на {}дб'.format(np.average(polym_avg_db-std_avg_db)))
gpl-2.0
linsalrob/EdwardsLab
phage_protein_blast_genera/tax_violin_plots.py
1
2239
""" """ import os import sys import argparse import matplotlib #matplotlib.use('Agg') import matplotlib.pyplot as plt if __name__ == '__main__': parser = argparse.ArgumentParser(description="") parser.add_argument('-f', help='Genome average output file (from genera_per_phage_protein.py', default='/home/redwards/Desktop/gav_all_host.out') parser.add_argument('-n', help='taxonomy name one of: kingdom / phylum / genus / species', default='genus') parser.add_argument('-v', help='verbose output', action="store_true") args = parser.parse_args() ynames = {'kingdom' : 'kingdoms', 'phylum' : 'phyla', 'genus' : 'genera', 'species' : 'species'} col = None colkey = {'kingdom' : 3, 'phylum' : 4, 'genus' : 5, 'species' : 6} if args.n not in colkey: sys.stderr.write("Sorry, taxonomy name must be one of {}\n".format("|".join(list(colkey.keys())))) sys.exit(-1) col = colkey[args.n] want = {'Gut', 'Mouth', 'Nose', 'Skin', 'Lungs'} data = {} with open(args.f, 'r') as fin: for l in fin: p=l.strip().split("\t") if p[2] not in want: p[2] = 'All phages' #continue ## comment or uncomment this to include/exclude all data if p[2] not in data: data[p[2]] = [] data[p[2]].append(float(p[col])) labels = sorted(data.keys()) scores = [] count = 1 ticks = [] for l in labels: scores.append(data[l]) ticks.append(count) count += 1 fig = plt.figure() ax = fig.add_subplot(111) # ax.boxplot(alldata) vp = ax.violinplot(scores, showmeans=True) for i, j in enumerate(vp['bodies']): if i == 0: j.set_color('gray') elif i == 1: j.set_color('sandybrown') else: j.set_color('lightpink') ax.set_xlabel("Body Site") ax.set_ylabel("Average number of {}".format(ynames[args.n])) ax.set_xticks(ticks) ax.set_xticklabels(labels, rotation='vertical') ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() fig.set_facecolor('white') plt.tight_layout() #plt.show() fig.savefig("/home/redwards/Desktop/bodysites.png")
mit
bmazin/ARCONS-pipeline
fluxcal/fluxCal.py
1
29931
#!/bin/python ''' fluxCal.py Created by Seth Meeker on 11-21-2012 Modified on 02-16-2015 to perform absolute fluxCal with point sources Opens ARCONS observation of a spectrophotometric standard star and associated wavelength cal file, reads in all photons and converts to energies. Bins photons to generate a spectrum, then divides this into the known spectrum of the object to create a Sensitivity curve. This curve is then written out to h5 file. Flags are associated with each pixel - see headers/pipelineFlags for descriptions. Note some flags are set here, others are set later on when creating photon lists. ''' import sys,os import tables import numpy as np from scipy import interpolate from scipy.optimize.minpack import curve_fit import matplotlib.pyplot as plt from photometry import LightCurve from util.FileName import FileName from util.ObsFile import ObsFile from util import MKIDStd from util.readDict import readDict from util.utils import rebin from util.utils import gaussianConvolution from util.utils import makeMovie from util.utils import fitBlackbody import hotpix.hotPixels as hp from scipy.optimize.minpack import curve_fit from scipy import interpolate import matplotlib from matplotlib.backends.backend_pdf import PdfPages from headers import pipelineFlags import figureHeader class FluxCal: def __init__(self,paramFile,plots=False,verbose=False): """ Opens flux file, prepares standard spectrum, and calculates flux factors for the file. Method is provided in param file. If 'relative' is selected, an obs file with standard star defocused over the entire array is expected, with accompanying sky file to do sky subtraction. If any other method is provided, 'absolute' will be done by default, wherein a point source is assumed to be present. The obs file is then broken into spectral frames with photometry (psf or aper) performed on each frame to generate the ARCONS observed spectrum. """ self.verbose=verbose self.plots = plots self.params = readDict() self.params.read_from_file(paramFile) run = self.params['run'] sunsetDate = self.params['fluxSunsetLocalDate'] self.fluxTstamp = self.params['fluxTimestamp'] skyTstamp = self.params['skyTimestamp'] wvlSunsetDate = self.params['wvlCalSunsetLocalDate'] wvlTimestamp = self.params['wvlCalTimestamp'] flatCalFileName = self.params['flatCalFileName'] needTimeAdjust = self.params['needTimeAdjust'] self.deadtime = float(self.params['deadtime']) #from firmware pulse detection self.timeSpacingCut = self.params['timeSpacingCut'] bLoadBeammap = self.params.get('bLoadBeammap',False) self.method = self.params['method'] self.objectName = self.params['object'] self.r = float(self.params['energyResolution']) self.photometry = self.params['photometry'] self.centroidRow = self.params['centroidRow'] self.centroidCol = self.params['centroidCol'] self.aperture = self.params['apertureRad'] self.annulusInner = self.params['annulusInner'] self.annulusOuter = self.params['annulusOuter'] self.collectingArea = self.params['collectingArea'] self.startTime = self.params['startTime'] self.intTime = self.params['integrationTime'] fluxFN = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp) self.fluxFileName = fluxFN.obs() self.fluxFile = ObsFile(self.fluxFileName) if self.plots: self.plotSavePath = os.environ['MKID_PROC_PATH']+os.sep+'fluxCalSolnFiles'+os.sep+run+os.sep+sunsetDate+os.sep+'plots'+os.sep if not os.path.exists(self.plotSavePath): os.mkdir(self.plotSavePath) if self.verbose: print "Created directory %s"%self.plotSavePath obsFNs = [fluxFN] self.obsList = [self.fluxFile] if self.startTime in ['',None]: self.startTime=0 if self.intTime in ['',None]: self.intTime=-1 if self.method=="relative": try: print "performing Relative Flux Calibration" skyFN = FileName(run=run,date=sunsetDate,tstamp=skyTstamp) self.skyFileName = skyFN.obs() self.skyFile = ObsFile(self.skyFileName) obsFNs.append(skyFN) self.obsList.append(self.skyFile) except: print "For relative flux calibration a sky file must be provided in param file" self.__del__() else: self.method='absolute' print "performing Absolute Flux Calibration" if self.photometry not in ['aperture','PSF']: self.photometry='PSF' #default to PSF fitting if no valid photometry selected timeMaskFileNames = [fn.timeMask() for fn in obsFNs] timeAdjustFileName = FileName(run=run).timeAdjustments() #make filename for output fluxCalSoln file self.fluxCalFileName = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp).fluxSoln() print "Creating flux cal: %s"%self.fluxCalFileName if wvlSunsetDate != '': wvlCalFileName = FileName(run=run,date=wvlSunsetDate,tstamp=wvlTimestamp).calSoln() if flatCalFileName =='': flatCalFileName=FileName(obsFile=self.fluxFile).flatSoln() #load cal files for flux file and, if necessary, sky file for iObs,obs in enumerate(self.obsList): if bLoadBeammap: print 'loading beammap',os.environ['MKID_BEAMMAP_PATH'] obs.loadBeammapFile(os.environ['MKID_BEAMMAP_PATH']) if wvlSunsetDate != '': obs.loadWvlCalFile(wvlCalFileName) else: obs.loadBestWvlCalFile() obs.loadFlatCalFile(flatCalFileName) obs.setWvlCutoffs(-1,-1) if needTimeAdjust: obs.loadTimeAdjustmentFile(timeAdjustFileName) timeMaskFileName = timeMaskFileNames[iObs] print timeMaskFileName if not os.path.exists(timeMaskFileName): print 'Running hotpix for ',obs hp.findHotPixels(obsFile=obs,outputFileName=timeMaskFileName,fwhm=np.inf,useLocalStdDev=True) print "Flux cal/sky file pixel mask saved to %s"%(timeMaskFileName) obs.loadHotPixCalFile(timeMaskFileName) if self.verbose: print "Loaded hot pixel file %s"%timeMaskFileName #get flat cal binning information since flux cal will need to match it self.wvlBinEdges = self.fluxFile.flatCalFile.root.flatcal.wavelengthBins.read() self.nWvlBins = self.fluxFile.flatWeights.shape[2] self.binWidths = np.empty((self.nWvlBins),dtype=float) self.binCenters = np.empty((self.nWvlBins),dtype=float) for i in xrange(self.nWvlBins): self.binWidths[i] = self.wvlBinEdges[i+1]-self.wvlBinEdges[i] self.binCenters[i] = (self.wvlBinEdges[i]+(self.binWidths[i]/2.0)) if self.method=='relative': print "Extracting ARCONS flux and sky spectra" self.loadRelativeSpectrum() print "Flux Spectrum loaded" self.loadSkySpectrum() print "Sky Spectrum loaded" elif self.method=='absolute': print "Extracting ARCONS point source spectrum" self.loadAbsoluteSpectrum() print "Loading standard spectrum" try: self.loadStdSpectrum(self.objectName) except KeyError: print "Invalid spectrum object name" self.__del__() sys.exit() print "Generating sensitivity curve" self.calculateFactors() print "Sensitivity Curve calculated" print "Writing fluxCal to file %s"%self.fluxCalFileName self.writeFactors(self.fluxCalFileName) if self.plots: self.makePlots() print "Done" def __del__(self): try: self.fluxFile.close() self.calFile.close() except AttributeError:#fluxFile was never defined pass def getDeadTimeCorrection(self, obs): #WRONG RIGHT NOW. NEEDS TO HAVE RAW COUNTS SUMMED, NOT CUBE WHICH EXCLUDES NOISE TAIL if self.verbose: print "Making raw cube to get dead time correction" cubeDict = obs.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=False, fluxWeighted=False) cube= np.array(cubeDict['cube'], dtype=np.double) wvlBinEdges= cubeDict['wvlBinEdges'] effIntTime= cubeDict['effIntTime'] if self.verbose: print "median effective integration time = ", np.median(effIntTime) nWvlBins=len(wvlBinEdges)-1 if self.verbose: print "cube shape ", np.shape(cube) if self.verbose: print "effIntTime shape ", np.shape(effIntTime) #add third dimension to effIntTime for broadcasting effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,)) #put cube into counts/s in each pixel cube /= effIntTime #CALCULATE DEADTIME CORRECTION #NEED TOTAL COUNTS PER SECOND FOR EACH PIXEL TO DO PROPERLY #ASSUMES SAME CORRECTION FACTOR APPLIED FOR EACH WAVELENGTH, MEANING NO WL DEPENDANCE ON DEAD TIME EFFECT DTCorr = np.zeros((np.shape(cube)[0],np.shape(cube)[1]),dtype=float) for f in range(0,np.shape(cube)[2]): #if self.verbose: print cube[:,:,f] #if self.verbose: print '-----------------------' DTCorr += cube[:,:,f] #if self.verbose: print DTCorr #if self.verbose: print '\n=====================\n' #Correct for firmware dead time (100us in 2012 ARCONS firmware) DTCorrNew=DTCorr/(1-DTCorr*self.deadtime) CorrFactors = DTCorrNew/DTCorr #This is what the frames need to be multiplied by to get their true values if self.verbose: print "Dead time correction factors: ", CorrFactors #add third dimension to CorrFactors for broadcasting CorrFactors = np.reshape(CorrFactors,np.shape(CorrFactors)+(1,)) return CorrFactors def loadAbsoluteSpectrum(self): ''' extract the ARCONS measured spectrum of the spectrophotometric standard by breaking data into spectral cube and performing photometry (aper or psf) on each spectral frame ''' if self.verbose:print "Making spectral cube" cubeDict = self.fluxFile.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=True, fluxWeighted=False) cube= np.array(cubeDict['cube'], dtype=np.double) effIntTime= cubeDict['effIntTime'] if self.verbose: print "median effective integration time in flux file cube = ", np.median(effIntTime) if self.verbose: print "cube shape ", np.shape(cube) if self.verbose: print "effIntTime shape ", np.shape(effIntTime) #add third dimension to effIntTime for broadcasting effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,)) #put cube into counts/s in each pixel cube /= effIntTime #get dead time correction factors DTCorr = self.getDeadTimeCorrection(self.fluxFile) cube*=DTCorr #cube now in units of counts/s and corrected for dead time if self.plots and not 'figureHeader' in sys.modules: if self.verbose: print "Saving spectral frames as movie..." movieCube = np.zeros((self.nWvlBins,np.shape(cube)[0],np.shape(cube)[1]),dtype=float) for i in xrange(self.nWvlBins): movieCube[i,:,:] = cube[:,:,i] makeMovie(movieCube,frameTitles=self.binCenters,cbar=True,outName=self.plotSavePath+'FluxCal_Cube_%s.gif'%(self.objectName), normMin=0, normMax=50) if self.verbose: print "Movie saved in %s"%self.plotSavePath LCplot=False #light curve pop-ups not compatible with FLuxCal plotting 2/18/15 #if self.photometry=='PSF': LCplot = False LC = LightCurve.LightCurve(verbose=self.verbose, showPlot=LCplot) self.fluxSpectrum=np.empty((self.nWvlBins),dtype=float) self.skySpectrum=np.zeros((self.nWvlBins),dtype=float) for i in xrange(self.nWvlBins): frame = cube[:,:,i] if self.verbose: print "%s photometry on frame %i of cube, central wvl = %f Angstroms"%(self.photometry,i,self.binCenters[i]) if self.photometry == 'aperture': fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture, annulus_inner = self.annulusInner, annulus_outer = self.annulusOuter, interpolation="linear") self.fluxSpectrum[i] = fDict['flux'] self.skySpectrum[i] = fDict['skyFlux'] print "Sky estimate = ", fDict['skyFlux'] else: fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture) self.fluxSpectrum[i] = fDict['flux'] self.fluxSpectrum=self.fluxSpectrum/self.binWidths/self.collectingArea #spectrum now in counts/s/Angs/cm^2 self.skySpectrum=self.skySpectrum/self.binWidths/self.collectingArea return self.fluxSpectrum, self.skySpectrum def loadRelativeSpectrum(self): self.fluxSpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)] self.fluxEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)] for iRow in xrange(self.nRow): for iCol in xrange(self.nCol): count = self.fluxFile.getPixelCount(iRow,iCol) fluxDict = self.fluxFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1) self.fluxSpectra[iRow][iCol],self.fluxEffTime[iRow][iCol] = fluxDict['spectrum'],fluxDict['effIntTime'] self.fluxSpectra = np.array(self.fluxSpectra) self.fluxEffTime = np.array(self.fluxEffTime) DTCorr = self.getDeadTimeCorrection(self.fluxFile) #print "Bin widths = ",self.binWidths self.fluxSpectra = self.fluxSpectra/self.binWidths/self.fluxEffTime*DTCorr self.fluxSpectrum = self.calculateMedian(self.fluxSpectra) #find median of subtracted spectra across whole array return self.fluxSpectrum def loadSkySpectrum(self): self.skySpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)] self.skyEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)] for iRow in xrange(self.nRow): for iCol in xrange(self.nCol): count = self.skyFile.getPixelCount(iRow,iCol) skyDict = self.skyFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1) self.skySpectra[iRow][iCol],self.skyEffTime[iRow][iCol] = skyDict['spectrum'],skyDict['effIntTime'] self.skySpectra = np.array(self.skySpectra) self.skyEffTime = np.array(self.skyEffTime) DTCorr = self.getDeadTimeCorrection(self.skyFile) self.skySpectra = self.skySpectra/self.binWidths/self.skyEffTime*DTCorr self.skySpectrum = self.calculateMedian(self.skySpectra) #find median of subtracted spectra across whole array return self.skySpectrum def loadStdSpectrum(self, objectName="G158-100"): #import the known spectrum of the calibrator and rebin to the histogram parameters given #must be imported into array with dtype float so division later does not have error std = MKIDStd.MKIDStd() a = std.load(objectName) a = std.countsToErgs(a) #convert std spectrum to ergs/s/Angs/cm^2 for BB fitting and cleaning self.stdWvls = np.array(a[:,0]) self.stdFlux = np.array(a[:,1]) #std object spectrum in ergs/s/Angs/cm^2 if self.plots: #create figure for plotting standard spectrum modifications self.stdFig = plt.figure() self.stdAx = self.stdFig.add_subplot(111) plt.xlim(3500,12000) plt.plot(self.stdWvls,self.stdFlux*1E15,linewidth=1,color='grey',alpha=0.75) convX_rev,convY_rev = self.cleanSpectrum(self.stdWvls,self.stdFlux) convX = convX_rev[::-1] #convolved spectrum comes back sorted backwards, from long wvls to low which screws up rebinning convY = convY_rev[::-1] #rebin cleaned spectrum to flat cal's wvlBinEdges newa = rebin(convX,convY,self.wvlBinEdges) rebinnedWvl = np.array(newa[:,0]) rebinnedFlux = np.array(newa[:,1]) if self.plots: #plot final resampled spectrum plt.plot(convX,convY*1E15,color='blue') plt.step(rebinnedWvl,rebinnedFlux*1E15,color = 'black',where='mid') plt.legend(['%s Spectrum'%self.objectName,'Blackbody Fit','Gaussian Convolved Spectrum','Rebinned Spectrum'],'upper right', numpoints=1) plt.xlabel(ur"Wavelength (\r{A})") plt.ylabel(ur"Flux (10$^{-15}$ ergs s$^{-1}$ cm$^{-2}$ \r{A}$^{-1}$)") plt.ylim(0.9*min(rebinnedFlux)*1E15, 1.1*max(rebinnedFlux)*1E15) plt.savefig(self.plotSavePath+'FluxCal_StdSpectrum_%s.eps'%self.objectName,format='eps') #convert standard spectrum back into counts/s/angstrom/cm^2 newa = std.ergsToCounts(newa) self.binnedSpectrum = np.array(newa[:,1]) def cleanSpectrum(self,x,y): ##=============== BB Fit to extend spectrum beyond 11000 Angstroms ================== fraction = 1.0/3.0 nirX = np.arange(int(x[(1.0-fraction)*len(x)]),20000) T, nirY = fitBlackbody(x,y,fraction=fraction,newWvls=nirX,tempGuess=5600) if self.plots: plt.plot(nirX,nirY*1E15,linestyle='--',linewidth=2, color="black",alpha=0.5) extendedWvl = np.concatenate((x,nirX[nirX>max(x)])) extendedFlux = np.concatenate((y,nirY[nirX>max(x)])) ##======= Gaussian convolution to smooth std spectrum to MKIDs median resolution ======== newX, newY = gaussianConvolution(extendedWvl,extendedFlux,xEnMin=0.005,xEnMax=6.0,xdE=0.001,fluxUnits = "lambda",r=self.r,plots=False) return newX, newY def calculateFactors(self): """ Calculate the sensitivity spectrum: the weighting factors that correct the flat calibrated spectra to the real spectra For relative calibration: First subtract sky spectrum from ARCONS observed spectrum. Then take median of this spectrum as it should be identical across the array, assuming the flat cal has done its job. Then divide this into the known spectrum of the object. For absolute calibration: self.fluxSpectra already has sky subtraction included. Simply divide this spectrum into the known standard spectrum. """ self.subtractedSpectrum = self.fluxSpectrum - self.skySpectrum self.subtractedSpectrum = np.array(self.subtractedSpectrum,dtype=float) #cast as floats so division does not fail later if self.method=='relative': normWvl = 5500 #Angstroms. Choose an arbitrary wvl to normalize the relative correction at ind = np.where(self.wvlBinEdges >= normWvl)[0][0]-1 self.subtractedSpectrum = self.subtractedSpectrum/(self.subtractedSpectrum[ind]) #normalize self.binnedSpectrum = self.binnedSpectrum/(self.binnedSpectrum[ind]) #normalize treated Std spectrum while we are at it #Calculate FluxCal factors self.fluxFactors = self.binnedSpectrum/self.subtractedSpectrum #self.fluxFlags = np.zeros(np.shape(self.fluxFactors),dtype='int') self.fluxFlags = np.empty(np.shape(self.fluxFactors),dtype='int') self.fluxFlags.fill(pipelineFlags.fluxCal['good']) #Initialise flag array filled with 'good' flags. JvE 5/1/2013. #set factors that will cause trouble to 1 #self.fluxFlags[self.fluxFactors == np.inf] = 1 self.fluxFlags[self.fluxFactors == np.inf] = pipelineFlags.fluxCal['infWeight'] #Modified to use flag dictionary - JvE 5/1/2013 self.fluxFactors[self.fluxFactors == np.inf]=1.0 self.fluxFlags[np.isnan(self.fluxFactors)] = pipelineFlags.fluxCal['nanWeight'] #Modified to use flag dictionary - JvE 5/1/2013 self.fluxFactors[np.isnan(self.fluxFactors)]=1.0 self.fluxFlags[self.fluxFactors <= 0]=pipelineFlags.fluxCal['LEzeroWeight'] #Modified to use flag dictionary - JvE 5/1/2013 self.fluxFactors[self.fluxFactors <= 0]=1.0 def calculateMedian(self, spectra): spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins]) wvlMedian = np.empty(self.nWvlBins,dtype=float) for iWvl in xrange(self.nWvlBins): spectrum = spectra2d[:,iWvl] goodSpectrum = spectrum[spectrum != 0]#dead pixels need to be taken out before calculating medians wvlMedian[iWvl] = np.median(goodSpectrum) return wvlMedian def makePlots(self): """ Output all debugging plots of ARCONS sky and object spectra, known calibrator spectrum, and sensitivity curve """ scratchDir = os.getenv('MKID_PROC_PATH') fluxDir = self.plotSavePath fluxCalBase = 'FluxCal_%s'%self.objectName plotFileName = fluxCalBase+".pdf" fullFluxPlotFileName = os.path.join(fluxDir,plotFileName) #uncomment to make some plots for the paper. Proper formatting Will also require figureheader to be imported and for movie making to be turned off self.paperFig = plt.figure() self.paperAx = self.paperFig.add_subplot(111) plt.xlim(4000,11000) plt.plot(self.binCenters,self.fluxFactors,linewidth=3,color='black') plt.xlabel(ur"Wavelength (\r{A})") plt.ylabel(ur"Spectral Calibration Curve") plt.ylim(0,150) plt.savefig(self.plotSavePath+'FluxCal_Sensitivity_%s.eps'%self.objectName,format='eps') #save throughput as a .npz file that other code uses when making paper plots np.savez(self.plotSavePath+'%s_%s_throughput.npz'%(self.objectName.strip(),self.fluxTstamp),throughput=1.0/self.fluxFactors,wvls=self.binCenters) pp = PdfPages(fullFluxPlotFileName) #plt.rcParams['font.size'] = 2 wvls = self.binCenters plt.figure() ax1 = plt.subplot(111) ax1.set_title('ARCONS median flat cal\'d flux in counts') plt.plot(wvls,self.fluxSpectrum) pp.savefig() plt.figure() ax2 = plt.subplot(111) ax2.set_title('ARCONS median flat cal\'d sky in counts') plt.plot(wvls,self.skySpectrum) pp.savefig() plt.figure() ax3 = plt.subplot(111) ax3.set_title('Flux data minus sky in counts') plt.plot(wvls,self.subtractedSpectrum) pp.savefig() plt.figure() ax4 = plt.subplot(111) ax4.set_title('Std Spectrum of %s'%(self.objectName)) plt.plot(self.stdWvls,self.stdFlux) pp.savefig() plt.figure() ax5 = plt.subplot(111) ax5.set_title('Binned Std Spectrum') plt.plot(wvls,self.binnedSpectrum) pp.savefig() plt.figure() ax6 = plt.subplot(111) ax6.set_title('Median Sensitivity Spectrum') ax6.set_xlim((3500,12000)) #ax6.set_ylim((0,5)) plt.plot(wvls,self.fluxFactors) pp.savefig() plt.figure() ax7 = plt.subplot(111) ax7.set_title('1/Sensitivity (Throughput)') ax7.set_xlim((3500,12000)) ax7.set_ylim((0,.04)) plt.plot(wvls,1.0/self.fluxFactors) pp.savefig() plt.figure() ax8 = plt.subplot(111) ax8.set_title('Flux Cal\'d ARCONS Spectrum of Std') plt.plot(wvls,self.fluxFactors*self.subtractedSpectrum) pp.savefig() pp.close() print "Saved Flux Cal plots to %s"%(fullFluxPlotFileName) def writeFactors(self,fluxCalFileName): """ Write flux cal weights to h5 file """ if os.path.isabs(fluxCalFileName) == True: fullFluxCalFileName = fluxCalFileName else: scratchDir = os.getenv('MKID_PROC_PATH') fluxDir = os.path.join(scratchDir,'fluxCalSolnFiles') fullFluxCalFileName = os.path.join(fluxDir,fluxCalFileName) try: fluxCalFile = tables.openFile(fullFluxCalFileName,mode='w') except: print 'Error: Couldn\'t create flux cal file, ',fullFluxCalFileName return calgroup = fluxCalFile.createGroup(fluxCalFile.root,'fluxcal','Table of flux calibration weights by wavelength') caltable = tables.Array(calgroup,'weights',object=self.fluxFactors,title='Flux calibration Weights indexed by wavelengthBin') flagtable = tables.Array(calgroup,'flags',object=self.fluxFlags,title='Flux cal flags indexed by wavelengthBin. 0 is Good') bintable = tables.Array(calgroup,'wavelengthBins',object=self.wvlBinEdges,title='Wavelength bin edges corresponding to third dimension of weights array') fluxCalFile.flush() fluxCalFile.close() print "Finished Flux Cal, written to %s"%(fullFluxCalFileName) def cleanSpectrum_old(self,x,y,objectName): ''' function to take high resolution spectrum of standard star, extend IR coverage with an exponential tail, then rebin down to ARCONS resolution. This function has since been deprecated with the current cleanSpectrum which uses a BB fit to extend IR coverage, and does the rebinning using a gaussian convolution. This is left in for reference. ''' #locations and widths of absorption features in Angstroms #features = [3890,3970,4099,4340,4860,6564,6883,7619] #widths = [50,50,50,50,50,50,50,50] #for i in xrange(len(features)): # #check for absorption feature in std spectrum # ind = np.where((x<(features[i]+15)) & (x>(features[i]-15)))[0] # if len(ind)!=0: # ind = ind[len(ind)/2] # #if feature is found (flux is higher on both sides of the specified wavelength where the feature should be) # if y[ind]<y[ind+1] and y[ind]<y[ind-1]: # #cut out width[i] around feature[i] # inds = np.where((x >= features[i]+widths[i]) | (x <= features[i]-widths[i])) # x = x[inds] # y = y[inds] #fit a tail to the end of the spectrum to interpolate out to desired wavelength in angstroms fraction = 3.0/4.0 newx = np.arange(int(x[fraction*len(x)]),20000) slopeguess = (np.log(y[-1])-np.log(y[fraction*len(x)]))/(x[-1]-x[fraction*len(x)]) print "Guess at exponential slope is %f"%(slopeguess) guess_a, guess_b, guess_c = float(y[fraction*len(x)]), x[fraction*len(x)], slopeguess guess = [guess_a, guess_b, guess_c] fitx = x[fraction*len(x):] fity = y[fraction*len(x):] exp_decay = lambda fx, A, x0, t: A * np.exp((fx-x0) * t) params, cov = curve_fit(exp_decay, fitx, fity, p0=guess, maxfev=2000) A, x0, t= params print "A = %s\nx0 = %s\nt = %s\n"%(A, x0, t) best_fit = lambda fx: A * np.exp((fx-x0)*t) calcx = np.array(newx,dtype=float) newy = best_fit(calcx) #func = interpolate.splrep(x[fration*len(x):],y[fraction*len(x):],s=smooth) #newx = np.arange(int(x[fraction*len(x)]),self.wvlBinEdges[-1]) #newy = interpolate.splev(newx,func) wl = np.concatenate((x,newx[newx>max(x)])) flux = np.concatenate((y,newy[newx>max(x)])) #new method, rebin data to grid of wavelengths generated from a grid of evenly spaced energy bins #R=7.0 at 4500 #R=E/dE -> dE = R/E dE = 0.3936 #eV start = 1000 #Angs stop = 20000 #Angs enBins = ObsFile.makeWvlBins(dE,start,stop) rebinned = rebin(wl,flux,enBins) re_wl = rebinned[:,0] re_flux = rebinned[:,1] #plt.plot(re_wl,re_flux,color='r') re_wl = re_wl[np.isnan(re_flux)==False] re_flux = re_flux[np.isnan(re_flux)==False] start1 = self.wvlBinEdges[0] stop1 = self.wvlBinEdges[-1] #regrid downsampled data new_wl = np.arange(start1,stop1) #print re_wl #print re_flux #print new_wl #weight=1.0/(re_flux)**(2/1.00) print len(re_flux) weight = np.ones(len(re_flux)) #decrease weights near peak ind = np.where(re_flux == max(re_flux))[0] weight[ind] = 0.3 for p in [1,2,3]: if p==1: wt = 0.3 elif p==2: wt = 0.6 elif p==3: wt = 0.7 try: weight[ind+p] = wt except IndexError: pass try: if ind-p >= 0: weight[ind-p] = wt except IndexError: pass weight[-4:] = 1.0 #weight = [0.7,1,0.3,0.3,0.5,0.7,1,1,1] #print len(weight) #weight = re_flux/min(re_flux) #weight = 1.0/weight #weight = weight/max(weight) #print weight f = interpolate.splrep(re_wl,re_flux,w=weight,k=3,s=max(re_flux)**1.71) new_flux = interpolate.splev(new_wl,f,der=0) return new_wl, new_flux if __name__ == '__main__': try: paramFile = sys.argv[1] except: paramFile = '/home/srmeeker/ARCONS-pipeline/params/fluxCal.dict' fc = FluxCal(paramFile, plots=True, verbose=True)
gpl-2.0
nomadcube/scikit-learn
examples/neighbors/plot_nearest_centroid.py
264
1804
""" =============================== Nearest Centroid Classification =============================== Sample usage of Nearest Centroid classification. It will plot the decision boundaries for each class. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import ListedColormap from sklearn import datasets from sklearn.neighbors import NearestCentroid n_neighbors = 15 # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. We could # avoid this ugly slicing by using a two-dim dataset y = iris.target h = .02 # step size in the mesh # Create color maps cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) for shrinkage in [None, 0.1]: # we create an instance of Neighbours Classifier and fit the data. clf = NearestCentroid(shrink_threshold=shrinkage) clf.fit(X, y) y_pred = clf.predict(X) print(shrinkage, np.mean(y == y_pred)) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, m_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light) # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold) plt.title("3-Class classification (shrink_threshold=%r)" % shrinkage) plt.axis('tight') plt.show()
bsd-3-clause
cgheller/splotch
labeltool/splotchColormap.py
4
5402
#!/usr/bin/env python # Generate overlay images in PNG format with transparancy which can be # used to label Splotch frames. This script can be called as a # standalone program, see below for details. To label an entire # directory of Splotch frames, use the driver script <splotchLabelFrames.sh>. # # (Klaus Reuter, RZG, Sep 2011) def splotchColormap(time=-1.0, # for time>0, a time stamp is printed in the upper left corner redshift=-1.0, # for redshift>0, a redshift stamp is printed valMin=0.1, # minimum value for the log colorscale valMax=1.e4, # maximum value for the log colorscale outfile="overlay.png", # default file name of the overlay to be created xinches=12, # width of the image | at 100 DPI, this corresponds to yinches=8, # height of the image | the dimensions 1200x800 myFontSize="large", myFontColor="white", putMinerva=False): # place the MPG minerva logo in the top right corner # import necessary modules import numpy as np from matplotlib import pyplot import matplotlib as mpl from subprocess import call from math import pow # *** set font properties for annotations *** fprops=mpl.font_manager.FontProperties() fprops.set_size(myFontSize) #fprops.set_weight("bold") # *** set up the matplotlib colormap based on a Splotch colormap *** #$ cat OldSplotch.pal #OldSplotch #0100 #3 # 0 0 255 #128 255 128 #255 0 0 # See <http://matplotlib.sourceforge.net/api/colors_api.html> # to understand what's going on ... # <OldSplotch.pal> corresponds to: OldSplotch = {'red': ((0.0, 0.0, 0.0), (0.5, 0.5, 0.5), (1.0, 1.0, 1.0)), 'green': ((0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)), 'blue': ((0.0, 1.0, 1.0), (0.5, 0.5, 0.5), (1.0, 0.0, 0.0))} colormap = mpl.colors.LinearSegmentedColormap('colormap', OldSplotch) # TODO implement a reader for Splotch palette files # *** set up the figure *** fig = pyplot.figure(figsize=(xinches,yinches)) # *** set up the colorbar *** ax1 = fig.add_axes([0.90, 0.05, 0.02, 0.5]) norm = mpl.colors.LogNorm(vmin=valMin, vmax=valMax) form = mpl.ticker.LogFormatterMathtext() cb1 = mpl.colorbar.ColorbarBase(ax1, cmap=colormap, norm=norm, format=form, orientation='vertical') # manipulate the style of the ticklabels, which requires a loop for tl in cb1.ax.get_yticklabels(): tl.set_fontsize(myFontSize) tl.set_color(myFontColor) cb1.set_label('Temperature [K]', fontproperties=fprops, color=myFontColor) # *** set up the time/redshift variable *** if (time>=0.0): timeString="age of universe=%.3f" % (time, ) timeString=timeString+" Gyr" pyplot.figtext(x=0.025, y=0.950, s=timeString, fontdict=None, fontproperties=fprops, color=myFontColor) # if (redshift>0): timeString="redshift=%.3f" % (redshift, ) pyplot.figtext(x=0.025, y=0.910, s=timeString, fontdict=None, fontproperties=fprops, color=myFontColor) # Minerva needs an intermediate call of the ImageMagick tools if putMinerva: plotFile="./splotchColormapTmp.png" else: plotFile=outfile # *** finally, plot the image and write it to a png file *** pyplot.plot() F=pyplot.gcf() myDPI=100 F.savefig(plotFile, transparent=True, dpi=myDPI) # *** put a logo (e.g. MPG Minerva) on top using ImageMagick convert *** if putMinerva: minervaFile="__INSERT_VALID_PATH__/minerva-white-96.png" xoffset=str(int( (xinches*myDPI)*0.895 )) yoffset=str(int( (yinches*myDPI)*0.005 )) #print (xoffset, yoffset) convertCommand="/usr/bin/env convert "+plotFile+" "+minervaFile+" -geometry +"+xoffset+"+"+yoffset+" -composite -format png "+outfile call(convertCommand, shell=True) # *** END SplotchColormap() *** # # *** Allow this Python module to be run as a standalone script. *** # if __name__ == "__main__": import sys import getopt # try: opts, args = getopt.getopt(sys.argv[1:], "t:r:c:d:o:", # the "-" options, below are the "--" options ["time=", "redshift=", "colormin=", "colormax=", "outfile="]) except getopt.GetoptError, err: print str(err) sys.exit(2) # myOutFile = "overlay.png" myTime = -1.0 myRedshift = -1.0 myMinVal = 1 myMaxVal = 100 # for o, a in opts: # print (o,a) if o in ("-t", "--time"): myTime = float(a) elif o in ("-r", "--redshift"): myRedshift = float(a) elif o in ("-c", "--colormin"): myMinVal = pow(10.0, float(a)) elif o in ("-d", "--colormax"): myMaxVal = pow(10.0, float(a)) elif o in ("-o", "--outfile"): myOutFile = a else: assert False, "unhandled option" # splotchColormap(outfile=myOutFile, time=myTime, redshift=myRedshift, valMin=myMinVal, valMax=myMaxVal) # EOF
gpl-2.0
APMonitor/arduino
2_Regression/2nd_order_MIMO/GEKKO/tclab_2nd_order_linear.py
1
3283
import numpy as np import time import matplotlib.pyplot as plt import random # get gekko package with: # pip install gekko from gekko import GEKKO import pandas as pd # import data data = pd.read_csv('data.txt') tm = data['Time (sec)'].values Q1s = data[' Heater 1'].values Q2s = data[' Heater 2'].values T1s = data[' Temperature 1'].values T2s = data[' Temperature 2'].values ######################################################### # Initialize Model as Estimator ######################################################### m = GEKKO(name='tclab-mhe') #m.server = 'http://127.0.0.1' # if local server is installed # 120 second time horizon, 40 steps m.time = tm # Parameters to Estimate K1 = m.FV(value=0.5) K1.STATUS = 1 K1.FSTATUS = 0 K1.LOWER = 0.1 K1.UPPER = 1.0 K2 = m.FV(value=0.3) K2.STATUS = 1 K2.FSTATUS = 0 K2.LOWER = 0.1 K2.UPPER = 1.0 K3 = m.FV(value=0.1) K3.STATUS = 1 K3.FSTATUS = 0 K3.LOWER = 0.0001 K3.UPPER = 1.0 tau12 = m.FV(value=150) tau12.STATUS = 1 tau12.FSTATUS = 0 tau12.LOWER = 50.0 tau12.UPPER = 250 tau3 = m.FV(value=15) tau3.STATUS = 0 tau3.FSTATUS = 0 tau3.LOWER = 10 tau3.UPPER = 20 # Measured inputs Q1 = m.MV(value=0) Q1.FSTATUS = 1 # measured Q1.value = Q1s Q2 = m.MV(value=0) Q2.FSTATUS = 1 # measured Q2.value = Q2s # Ambient temperature Ta = m.Param(value=23.0) # degC # State variables TH1 = m.SV(value=T1s[0]) TH2 = m.SV(value=T2s[0]) # Measurements for model alignment TC1 = m.CV(value=T1s) TC1.STATUS = 1 # minimize error between simulation and measurement TC1.FSTATUS = 1 # receive measurement TC1.MEAS_GAP = 0.1 # measurement deadband gap TC2 = m.CV(value=T1s[0]) TC2.STATUS = 1 # minimize error between simulation and measurement TC2.FSTATUS = 1 # receive measurement TC2.MEAS_GAP = 0.1 # measurement deadband gap TC2.value = T2s # Heat transfer between two heaters DT = m.Intermediate(TH2-TH1) # Empirical correlations m.Equation(tau12 * TH1.dt() + (TH1-Ta) == K1*Q1 + K3*DT) m.Equation(tau12 * TH2.dt() + (TH2-Ta) == K2*Q2 - K3*DT) m.Equation(tau3 * TC1.dt() + TC1 == TH1) m.Equation(tau3 * TC2.dt() + TC2 == TH2) # Global Options m.options.IMODE = 5 # MHE m.options.EV_TYPE = 2 # Objective type m.options.NODES = 3 # Collocation nodes m.options.SOLVER = 3 # IPOPT m.options.COLDSTART = 0 # COLDSTART on first cycle # Predict Parameters and Temperatures # use remote=False for local solve m.solve() # Create plot plt.figure(figsize=(10,7)) ax=plt.subplot(2,1,1) ax.grid() plt.plot(tm,T1s,'ro',label=r'$T_1$ measured') plt.plot(tm,TC1.value,'k-',label=r'$T_1$ predicted') plt.plot(tm,T2s,'bx',label=r'$T_2$ measured') plt.plot(tm,TC2.value,'k--',label=r'$T_2$ predicted') plt.ylabel('Temperature (degC)') plt.legend(loc=2) ax=plt.subplot(2,1,2) ax.grid() plt.plot(tm,Q1s,'r-',label=r'$Q_1$') plt.plot(tm,Q2s,'b:',label=r'$Q_2$') plt.ylabel('Heaters') plt.xlabel('Time (sec)') plt.legend(loc='best') # Print optimal values print('K1: ' + str(K1.newval)) print('K2: ' + str(K2.newval)) print('K3: ' + str(K3.newval)) print('tau12: ' + str(tau12.newval)) print('tau3: ' + str(tau3.newval)) # Save figure plt.savefig('tclab_estimation.png') plt.show()
apache-2.0
DistrictDataLabs/yellowbrick
yellowbrick/contrib/scatter.py
1
11862
# yellowbrick.contrib.scatter # Implements a 2d scatter plot for feature analysis. # # Author: Nathan Danielsen # Created: Fri Feb 26 19:40:00 2017 -0400 # # Copyright (C) 2017 The scikit-yb developers # For license information, see LICENSE.txt # # ID: scatter.py [a89633e] benjamin@bengfort.com $ """ Implements a 2D scatter plot for feature analysis. """ ########################################################################## # Imports ########################################################################## import itertools import numpy as np from yellowbrick.features.base import DataVisualizer from yellowbrick.utils import is_dataframe, is_structured_array from yellowbrick.utils import has_ndarray_int_columns from yellowbrick.exceptions import YellowbrickValueError from yellowbrick.style.colors import resolve_colors ########################################################################## # Quick Methods ########################################################################## def scatterviz( X, y=None, ax=None, features=None, classes=None, color=None, colormap=None, markers=None, alpha=1.0, **kwargs ): """Displays a bivariate scatter plot. This helper function is a quick wrapper to utilize the ScatterVisualizer (Transformer) for one-off analysis. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n, default: None An array or series of target or class values ax : matplotlib axes, default: None The axes to plot the figure on. features : list of strings, default: None The names of two features or columns. More than that will raise an error. classes : list of strings, default: None The names of the classes in the target color : list or tuple of colors, default: None Specify the colors for each individual class colormap : string or matplotlib cmap, default: None Sequential colormap for continuous target markers : iterable of strings, default: ,+o*vhd Matplotlib style markers for points on the scatter plot points alpha : float, default: 1.0 Specify a transparency where 1 is completely opaque and 0 is completely transparent. This property makes densely clustered points more visible. Returns ------- viz : ScatterVisualizer Returns the fitted, finalized visualizer """ # Instantiate the visualizer visualizer = ScatterVisualizer( ax=ax, features=features, classes=classes, color=color, colormap=colormap, markers=markers, alpha=alpha, **kwargs ) # Fit and transform the visualizer (calls draw) visualizer.fit(X, y, **kwargs) visualizer.transform(X) # Return the visualizer object return visualizer ########################################################################## # Static ScatterVisualizer Visualizer ########################################################################## class ScatterVisualizer(DataVisualizer): """ ScatterVisualizer is a bivariate feature data visualization algorithm that plots using the Cartesian coordinates of each point. Parameters ---------- ax : a matplotlib plot, default: None The axis to plot the figure on. x : string, default: None The feature name that corresponds to a column name or index postion in the matrix that will be plotted against the x-axis y : string, default: None The feature name that corresponds to a column name or index postion in the matrix that will be plotted against the y-axis features : a list of two feature names to use, default: None List of two features that correspond to the columns in the array. The order of the two features correspond to X and Y axes on the graph. More than two feature names or columns will raise an error. If a DataFrame is passed to fit and features is None, feature names are selected that are the columns of the DataFrame. classes : a list of class names for the legend, default: None If classes is None and a y value is passed to fit then the classes are selected from the target vector. color : optional list or tuple of colors to colorize points, default: None Use either color to colorize the points on a per class basis or colormap to color them on a continuous scale. colormap : optional string or matplotlib cmap to colorize points, default: None Use either color to colorize the points on a per class basis or colormap to color them on a continuous scale. markers : iterable of strings, default: ,+o*vhd Matplotlib style markers for points on the scatter plot points alpha : float, default: 1.0 Specify a transparency where 1 is completely opaque and 0 is completely transparent. This property makes densely clustered points more visible. kwargs : keyword arguments passed to the super class. These parameters can be influenced later on in the visualization process, but can and should be set as early as possible. """ def __init__( self, ax=None, x=None, y=None, features=None, classes=None, color=None, colormap=None, markers=None, alpha=1.0, **kwargs ): """ Initialize the base scatter with many of the options required in order to make the visualization work. """ super(ScatterVisualizer, self).__init__( ax=ax, features=features, classes=classes, color=color, colormap=colormap, **kwargs ) self.x = x self.y = y self.alpha = alpha self.markers = itertools.cycle( kwargs.pop("markers", (",", "+", "o", "*", "v", "h", "d")) ) self.color = color self.colormap = colormap if self.x is not None and self.y is not None and self.features is not None: raise YellowbrickValueError("Please specify x,y or features, not both.") if self.x is not None and self.y is not None and self.features is None: self.features = [self.x, self.y] # Ensure with init that features doesn't have more than two features if features is not None: if len(features) != 2: raise YellowbrickValueError( "ScatterVisualizer only accepts two features." ) def fit(self, X, y=None, **kwargs): """ The fit method is the primary drawing input for the parallel coords visualization since it has both the X and y data required for the viz and the transform method does not. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with 2 features y : ndarray or Series of length n An array or series of target or class values kwargs : dict Pass generic arguments to the drawing method Returns ------- self : instance Returns the instance of the transformer/visualizer """ _, ncols = X.shape # NOTE: Do not call super for this class, it conflicts with the fit. # Setting these variables is similar to the old behavior of DataVisualizer. # TODO: refactor to make use of the new DataVisualizer functionality self.features_ = self.features self.classes_ = self.classes if ncols == 2: X_two_cols = X if self.features_ is None: self.features_ = ["Feature One", "Feature Two"] # Handle the feature names if they're None. elif self.features_ is not None and is_dataframe(X): X_two_cols = X[self.features_].values # handle numpy named/ structured array elif self.features_ is not None and is_structured_array(X): X_selected = X[self.features_] X_two_cols = X_selected.copy().view( (np.float64, len(X_selected.dtype.names)) ) # handle features that are numeric columns in ndarray matrix elif self.features_ is not None and has_ndarray_int_columns(self.features_, X): f_one, f_two = self.features_ X_two_cols = X[:, [int(f_one), int(f_two)]] else: raise YellowbrickValueError( """ ScatterVisualizer only accepts two features, please explicitly set these two features in the init kwargs or pass a matrix/ dataframe in with only two columns.""" ) # Store the classes for the legend if they're None. if self.classes_ is None: # TODO: Is this the most efficient method? self.classes_ = [str(label) for label in np.unique(y)] # Draw the instances self.draw(X_two_cols, y, **kwargs) # Fit always returns self. return self def draw(self, X, y, **kwargs): """Called from the fit method, this method creates a scatter plot that draws each instance as a class or target colored point, whose location is determined by the feature data set. """ # Set the axes limits self.ax.set_xlim([-1, 1]) self.ax.set_ylim([-1, 1]) # set the colors color_values = resolve_colors( n_colors=len(self.classes_), colormap=self.colormap, colors=self.color ) colors = dict(zip(self.classes_, color_values)) # Create a data structure to hold the scatter plot representations to_plot = {} for kls in self.classes_: to_plot[kls] = [[], []] # Add each row of the data set to to_plot for plotting # TODO: make this an independent function for override for i, row in enumerate(X): row_ = np.repeat(np.expand_dims(row, axis=1), 2, axis=1) x_, y_ = row_[0], row_[1] kls = self.classes_[y[i]] to_plot[kls][0].append(x_) to_plot[kls][1].append(y_) # Add the scatter plots from the to_plot function # TODO: store these plots to add more instances to later # TODO: make this a separate function for i, kls in enumerate(self.classes_): self.ax.scatter( to_plot[kls][0], to_plot[kls][1], marker=next(self.markers), color=colors[kls], label=str(kls), alpha=self.alpha, **kwargs ) self.ax.axis("equal") def finalize(self, **kwargs): """ Adds a title and a legend and ensures that the axis labels are set as the feature names being visualized. Parameters ---------- kwargs: generic keyword arguments. Notes ----- Generally this method is called from show and not directly by the user. """ # Divide out the two features feature_one, feature_two = self.features_ # Set the title self.set_title( "Scatter Plot: {0} vs {1}".format(str(feature_one), str(feature_two)) ) # Add the legend self.ax.legend(loc="best") self.ax.set_xlabel(str(feature_one)) self.ax.set_ylabel(str(feature_two)) # Alias for ScatterViz ScatterViz = ScatterVisualizer
apache-2.0
santiago-salas-v/walas
node_images.py
1
1746
import matplotlib import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(111) patch1 = matplotlib.patches.Circle( [0.5,0.5],0.05 ) patch2 = matplotlib.patches.Rectangle( [0.3,0.3],0.4, 0.4, alpha=0.5, fill=False, edgecolor='black', linestyle = '--' ) arrow1 = matplotlib.patches.Arrow( 0, 0.5,0.45,0, width=0.05, color='black' ) arrow2 = matplotlib.patches.Arrow( 0.55, 0.5,0.45,0, width=0.05, color='black' ) line1 = matplotlib.lines.Line2D( [0.5,0.5], [0,0.45], linestyle='--', color='black' ) text1 = matplotlib.text.Text( 0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$' ) text2 = matplotlib.text.Text( 0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$' ) for artist in [ patch1,patch2,arrow1,arrow2, line1,text1,text2 ]: ax.add_artist(artist) ax.set_frame_on(False) ax.set_axis_off() ax.set_aspect(1.0) fig. fig = plt.figure() ax = fig.add_subplot(111) patch1 = matplotlib.patches.Circle( [0.5,0.5],0.05 ) patch2 = matplotlib.patches.Rectangle( [0.3,0.3],0.4, 0.4, alpha=0.5, fill=False, edgecolor='black', linestyle = '--' ) arrow1 = matplotlib.patches.Arrow( 0, 0.5,0.45,0, width=0.05, color='black' ) arrow2 = matplotlib.patches.Arrow( 0.55, 0.5,0.45,0, width=0.05, color='black' ) arrow3 = matplotlib.patches.Arrow( 0.5, 0.0, 0,0.45, width=0.05, color='black' ) text1 = matplotlib.text.Text( 0, 0.45, '$n_{A0}$\n$V_0$\n$U_A=0$' ) text2 = matplotlib.text.Text( 0.8, 0.45, '$n_{A1}$\n$V_1$\n$U_{A1}$' ) text3 = matplotlib.text.Text( 0.55, 0.1, '$n_{Ar}$\n$V_r$' ) for artist in [ patch1,patch2,arrow1,arrow2, arrow3,text1,text2,text3 ]: ax.add_artist(artist) ax.set_frame_on(False) ax.set_axis_off() ax.set_aspect(1.0)
mit
eric-haibin-lin/mxnet
python/mxnet/ndarray/numpy/_op.py
2
252233
# pylint: disable=C0302 # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=unused-argument """Namespace for numpy operators used in Gluon dispatched by F=ndarray.""" import numpy as _np from ...base import numeric_types, integer_types from ...util import _sanity_check_params, set_module from ...util import wrap_np_unary_func, wrap_np_binary_func from ...context import current_context from . import _internal as _npi from ..ndarray import NDArray __all__ = ['shape', 'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'empty_like', 'invert', 'delete', 'add', 'broadcast_to', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'power', 'bitwise_not', 'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'sqrt', 'cbrt', 'abs', 'insert', 'absolute', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log', 'degrees', 'log2', 'matmul', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'fix', 'ceil', 'floor', 'histogram', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'argsort', 'sort', 'tensordot', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange', 'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'concatenate', 'append', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack', 'average', 'mean', 'maximum', 'minimum', 'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman', 'flip', 'flipud', 'fliplr', 'around', 'round', 'hypot', 'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad', 'unique', 'lcm', 'tril', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer', 'equal', 'not_equal', 'greater', 'less', 'greater_equal', 'less_equal', 'rot90', 'einsum', 'true_divide', 'nonzero', 'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'resize', 'polyval', 'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'where', 'bincount', 'pad'] @set_module('mxnet.ndarray.numpy') def shape(a): """ Return the shape of an array. Parameters ---------- a : array_like Input array. Returns ------- shape : tuple of ints The elements of the shape tuple give the lengths of the corresponding array dimensions. See Also -------- ndarray.shape : Equivalent array method. Examples -------- >>> np.shape(np.eye(3)) (3, 3) >>> np.shape([[1, 2]]) (1, 2) >>> np.shape([0]) (1,) >>> np.shape(0) () """ return a.shape @set_module('mxnet.ndarray.numpy') def zeros(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name """Return a new array of given shape and type, filled with zeros. This function currently only supports storing multi-dimensional data in row-major (C-style). Parameters ---------- shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional An optional value type. Default is `numpy.float32`. Note that this behavior is different from NumPy's `zeros` function where `float64` is the default value, because `float32` is considered as the default data type in deep learning. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : ndarray Array of zeros with the given shape, dtype, and ctx. """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() dtype = _np.float32 if dtype is None else dtype return _npi.zeros(shape=shape, ctx=ctx, dtype=dtype) @set_module('mxnet.ndarray.numpy') def ones(shape, dtype=_np.float32, order='C', ctx=None): # pylint: disable=redefined-outer-name """Return a new array of given shape and type, filled with ones. This function currently only supports storing multi-dimensional data in row-major (C-style). Parameters ---------- shape : int or tuple of int The shape of the empty array. dtype : str or numpy.dtype, optional An optional value type. Default is `numpy.float32`. Note that this behavior is different from NumPy's `ones` function where `float64` is the default value, because `float32` is considered as the default data type in deep learning. order : {'C'}, optional, default: 'C' How to store multi-dimensional data in memory, currently only row-major (C-style) is supported. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : ndarray Array of ones with the given shape, dtype, and ctx. """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() dtype = _np.float32 if dtype is None else dtype return _npi.ones(shape=shape, ctx=ctx, dtype=dtype) # pylint: disable=too-many-arguments, redefined-outer-name @set_module('mxnet.ndarray.numpy') def zeros_like(a, dtype=None, order='C', ctx=None, out=None): """ Return an array of zeros with the same shape and type as a given array. Parameters ---------- a : ndarray The shape and data-type of `a` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. Temporarily do not support boolean type. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray Array of zeros with the same shape and type as a. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full : Return a new array of given shape filled with value. Examples -------- >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x array([[0., 1., 2.], [3., 4., 5.]]) >>> np.zeros_like(x) array([[0., 0., 0.], [0., 0., 0.]]) >>> np.zeros_like(x, int) array([[0, 0, 0], [0, 0, 0]], dtype=int64) >>> y = np.arange(3, dtype=float) >>> y array([0., 1., 2.], dtype=float64) >>> np.zeros_like(y) array([0., 0., 0.], dtype=float64) """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() return _npi.full_like(a, fill_value=0, dtype=dtype, ctx=ctx, out=out) @set_module('mxnet.ndarray.numpy') def ones_like(a, dtype=None, order='C', ctx=None, out=None): """ Return an array of ones with the same shape and type as a given array. Parameters ---------- a : ndarray The shape and data-type of `a` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. Temporarily do not support boolean type. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray Array of ones with the same shape and type as a. See Also -------- empty_like : Return an empty array with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. ones : Return a new array setting values to one. Examples -------- >>> x = np.arange(6) >>> x = x.reshape((2, 3)) >>> x array([[0., 1., 2.], [3., 4., 5.]]) >>> np.ones_like(x) array([[1., 1., 1.], [1., 1., 1.]]) >>> np.ones_like(x, int) array([[1, 1, 1], [1, 1, 1]], dtype=int64) >>> y = np.arange(3, dtype=float) >>> y array([0., 1., 2.], dtype=float64) >>> np.ones_like(y) array([1., 1., 1.], dtype=float64) """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() return _npi.full_like(a, fill_value=1, dtype=dtype, ctx=ctx, out=out) @set_module('mxnet.ndarray.numpy') def broadcast_to(array, shape): """ Broadcast an array to a new shape. Parameters ---------- array : ndarray or scalar The array to broadcast. shape : tuple The shape of the desired array. Returns ------- broadcast : array A readonly view on the original array with the given shape. It is typically not contiguous. Furthermore, more than one element of a broadcasted array may refer to a single memory location. Raises ------ MXNetError If the array is not compatible with the new shape according to NumPy's broadcasting rules. """ if _np.isscalar(array): return full(shape, array) return _npi.broadcast_to(array, shape) @set_module('mxnet.ndarray.numpy') def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments """ Return a new array of given shape and type, filled with `fill_value`. Parameters ---------- shape : int or sequence of ints Shape of the new array, e.g., ``(2, 3)`` or ``2``. fill_value : scalar or ndarray Fill value. dtype : data-type, optional The desired data-type for the array. The default, `None`, means `np.array(fill_value).dtype`. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray Array of `fill_value` with the given shape, dtype, and order. If `fill_value` is an ndarray, out will have the same context as `fill_value` regardless of the provided `ctx`. Notes ----- This function differs from the original `numpy.full https://docs.scipy.org/doc/numpy/reference/generated/numpy.full.html`_ in the following way(s): - Have an additional `ctx` argument to specify the device - Have an additional `out` argument - Currently does not support `order` selection See Also -------- empty : Return a new uninitialized array. ones : Return a new array setting values to one. zeros : Return a new array setting values to zero. Examples -------- >>> np.full((2, 2), 10) array([[10., 10.], [10., 10.]]) >>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0)) array([[2, 2], [2, 2]], dtype=int32) """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() if isinstance(fill_value, NDArray): if dtype is None: ret = broadcast_to(fill_value, shape) else: ret = broadcast_to(fill_value, shape).astype(dtype) return ret dtype = _np.float32 if dtype is None else dtype return _npi.full(shape=shape, value=fill_value, ctx=ctx, dtype=dtype, out=out) # pylint: enable=too-many-arguments, redefined-outer-name @set_module('mxnet.ndarray.numpy') def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments """ Return a full array with the same shape and type as a given array. Parameters ---------- a : ndarray The shape and data-type of `a` define these same attributes of the returned array. fill_value : scalar Fill value. dtype : data-type, optional Overrides the data type of the result. Temporarily do not support boolean type. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. ctx: to specify the device, e.g. the i-th GPU. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray Array of `fill_value` with the same shape and type as `a`. See Also -------- empty_like : Return an empty array with shape and type of input. ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full : Return a new array of given shape filled with value. Examples -------- >>> x = np.arange(6, dtype=int) >>> np.full_like(x, 1) array([1, 1, 1, 1, 1, 1], dtype=int64) >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0], dtype=int64) >>> np.full_like(x, 0.1, dtype=np.float64) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64) >>> np.full_like(x, np.nan, dtype=np.double) array([nan, nan, nan, nan, nan, nan], dtype=float64) >>> y = np.arange(6, dtype=np.float32) >>> np.full_like(y, 0.1) array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) """ if order != 'C': raise NotImplementedError if ctx is None: ctx = current_context() return _npi.full_like(a, fill_value=fill_value, dtype=dtype, ctx=ctx, out=out) @set_module('mxnet.ndarray.numpy') def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621 """ Return a new array with the same shape and type as a given array. Parameters ---------- prototype : ndarray The shape and data-type of `prototype` define these same attributes of the returned array. dtype : data-type, optional Overrides the data type of the result. order : {'C'}, optional Whether to store multidimensional data in C- or Fortran-contiguous (row- or column-wise) order in memory. Currently only supports C order. subok : {False}, optional If True, then the newly created array will use the sub-class type of 'a', otherwise it will be a base-class array. Defaults to False. (Only support False at this moment) shape : int or sequence of ints, optional. Overrides the shape of the result. If order='K' and the number of dimensions is unchanged, will try to keep order, otherwise, order='C' is implied. (Not supported at this moment) Returns ------- out : ndarray Array of uninitialized (arbitrary) data with the same shape and type as `prototype`. See Also -------- ones_like : Return an array of ones with shape and type of input. zeros_like : Return an array of zeros with shape and type of input. full_like : Return a new array with shape of input filled with value. empty : Return a new uninitialized array. Notes ----- This function does *not* initialize the returned array; to do that use `zeros_like` or `ones_like` instead. It may be marginally faster than the functions that do set the array values. Examples -------- >>> a = np.array([[1,2,3], [4,5,6]]) >>> np.empty_like(a) array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized [ 4567052944, -5764607523034234880, 844424930131968]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> np.empty_like(a) array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized [2.0e-323, 2.5e-323, 3.0e-323]]) """ dtype_list = {None:'None', _np.int8:'int8', _np.uint8:'uint8', _np.int32:'int32', _np.int64:'int64', _np.float16:'float16', _np.float32:'float32', _np.float64:'float64', _np.bool_:'bool_', bool:'bool', int:'int64', float:'float64'} if order != 'C': raise NotImplementedError("Only support C-order at this moment") if subok: raise NotImplementedError("Creating array by using sub-class is not supported at this moment") if shape is not None: raise NotImplementedError("Assigning new shape is not supported at this moment") try: dtype = dtype if isinstance(dtype, str) else dtype_list[dtype] except: raise NotImplementedError("Do not support this dtype at this moment") return _npi.empty_like_fallback(prototype, dtype=dtype, order=order, subok=subok, shape=shape) @set_module('mxnet.ndarray.numpy') def arange(start, stop=None, step=1, dtype=None, ctx=None): """Return evenly spaced values within a given interval. Values are generated within the half-open interval ``[start, stop)`` (in other words, the interval including `start` but excluding `stop`). For integer arguments the function is equivalent to the Python built-in `range` function, but returns an ndarray rather than a list. Parameters ---------- start : number, optional Start of interval. The interval includes this value. The default start value is 0. stop : number End of interval. The interval does not include this value, except in some cases where `step` is not an integer and floating point round-off affects the length of `out`. step : number, optional Spacing between values. For any output `out`, this is the distance between two adjacent values, ``out[i+1] - out[i]``. The default step size is 1. If `step` is specified as a position argument, `start` must also be given. dtype : dtype The type of the output array. The default is `float32`. Returns ------- arange : ndarray Array of evenly spaced values. For floating point arguments, the length of the result is ``ceil((stop - start)/step)``. Because of floating point overflow, this rule may result in the last element of `out` being greater than `stop`. """ if dtype is None: dtype = 'float32' if ctx is None: ctx = current_context() if stop is None: stop = start start = 0 if step is None: step = 1 if start is None and stop is None: raise ValueError('start and stop cannot be both None') if step == 0: raise ZeroDivisionError('step cannot be 0') return _npi.arange(start=start, stop=stop, step=step, dtype=dtype, ctx=ctx) @set_module('mxnet.ndarray.numpy') def identity(n, dtype=None, ctx=None): """ Return the identity array. The identity array is a square array with ones on the main diagonal. Parameters ---------- n : int Number of rows (and columns) in `n` x `n` output. dtype : data-type, optional Data-type of the output. Defaults to ``numpy.float32``. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : ndarray `n` x `n` array with its main diagonal set to one, and all other elements 0. Examples -------- >>> np.identity(3) >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]]) """ if not isinstance(n, int): raise TypeError("Input 'n' should be an integer") if n < 0: raise ValueError("Input 'n' cannot be negative") if ctx is None: ctx = current_context() dtype = _np.float32 if dtype is None else dtype return _npi.identity(shape=(n, n), ctx=ctx, dtype=dtype) # pylint: disable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def take(a, indices, axis=None, mode='raise', out=None): r""" Take elements from an array along an axis. When axis is not None, this function does the same thing as "fancy" indexing (indexing arrays using arrays); however, it can be easier to use if you need elements along a given axis. A call such as ``np.take(arr, indices, axis=3)`` is equivalent to ``arr[:,:,:,indices,...]``. Explained without fancy indexing, this is equivalent to the following use of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices:: Ni, Nk = a.shape[:axis], a.shape[axis+1:] Nj = indices.shape for ii in ndindex(Ni): for jj in ndindex(Nj): for kk in ndindex(Nk): out[ii + jj + kk] = a[ii + (indices[jj],) + kk] Parameters ---------- a : ndarray The source array. indices : ndarray The indices of the values to extract. Also allow scalars for indices. axis : int, optional The axis over which to select values. By default, the flattened input array is used. out : ndarray, optional If provided, the result will be placed in this array. It should be of the appropriate shape and dtype. mode : {'clip', 'wrap'}, optional Specifies how out-of-bounds indices will behave. * 'clip' -- clip to the range (default) * 'wrap' -- wrap around 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. Returns ------- out : ndarray The returned array has the same type as `a`. Notes ----- This function differs from the original `numpy.take <https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in the following way(s): - Only ndarray or scalar ndarray is accepted as valid input. Examples -------- >>> a = np.array([4, 3, 5, 7, 6, 8]) >>> indices = np.array([0, 1, 4]) >>> np.take(a, indices) array([4., 3., 6.]) In this example for `a` is an ndarray, "fancy" indexing can be used. >>> a[indices] array([4., 3., 6.]) If `indices` is not one dimensional, the output also has these dimensions. >>> np.take(a, np.array([[0, 1], [2, 3]])) array([[4., 3.], [5., 7.]]) """ if mode not in ('wrap', 'clip', 'raise'): raise NotImplementedError( "function take does not support mode '{}'".format(mode)) if axis is None: return _npi.take(_npi.reshape(a, -1), indices, 0, mode, out) else: return _npi.take(a, indices, axis, mode, out) # pylint: enable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def insert(arr, obj, values, axis=None): """ Insert values along the given axis before the given indices. Parameters ---------- arr : ndarray Input array. obj : int, slice or ndarray of int64 Object that defines the index or indices before which `values` is inserted. Support for multiple insertions when `obj` is a single scalar or a sequence with one element (only support int32 and int64 element). values : ndarray Values to insert into `arr`. If the type of values is different from that of arr, values is converted to the type of arr. axis : int, optional Axis along which to insert `values`. If `axis` is None then `arr` is flattened first. Returns ------- out : ndarray A copy of `arr` with `values` inserted. Note that `insert` does not occur in-place: a new array is returned. If `axis` is None, `out` is a flattened array. Notes ----- - Note that for higher dimensional inserts `obj=0` behaves very different from `obj=[0]` just like `arr[:,0,:] = values` is different from `arr[:,[0],:] = values`. - If obj is a ndarray, it's dtype only supports int64 Examples -------- >>> a = np.array([[1, 1], [2, 2], [3, 3]]) >>> a array([[1., 1.], [2., 2.], [3., 3.]]) >>> np.insert(a, 1, np.array(5)) array([1., 5., 1., 2., 2., 3., 3.]) >>> np.insert(a, 1, np.array(5), axis=1) array([[1., 5., 1.], [2., 5., 2.], [3., 5., 3.]]) Difference between sequence and scalars: >>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1) array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]) >>> np.insert(a, 1, np.array([1, 2, 3]), axis=1) array([[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]) >>> b = a.flatten() >>> b array([1., 1., 2., 2., 3., 3.]) >>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6])) array([1., 1., 5., 6., 2., 2., 3., 3.]) >>> np.insert(b, slice(2, 4), np.array([5, 6])) array([1., 1., 5., 2., 6., 2., 3., 3.]) # type casting >>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False])) array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32) >>> x = np.arange(8).reshape(2, 4) >>> idx = np.array([1, 3], dtype=np.int64) >>> np.insert(x, idx, np.array([999]), axis=1) array([[ 0., 999., 1., 2., 999., 3.], [ 4., 999., 5., 6., 999., 7.]]) """ if isinstance(values, numeric_types): if isinstance(obj, slice): start = obj.start stop = obj.stop step = 1 if obj.step is None else obj.step return _npi.insert_slice(arr, val=values, start=start, stop=stop, step=step, axis=axis) elif isinstance(obj, integer_types): return _npi.insert_scalar(arr, val=values, int_ind=obj, axis=axis) elif isinstance(obj, NDArray): return _npi.insert_tensor(arr, obj, val=values, axis=axis) if not isinstance(arr, NDArray): raise TypeError("'arr' can not support type {}".format(str(type(arr)))) if not isinstance(values, NDArray): raise TypeError("'values' can not support type {}".format(str(type(values)))) if isinstance(obj, slice): start = obj.start stop = obj.stop step = 1 if obj.step is None else obj.step return _npi.insert_slice(arr, values, start=start, stop=stop, step=step, axis=axis) elif isinstance(obj, integer_types): return _npi.insert_scalar(arr, values, int_ind=obj, axis=axis) elif isinstance(obj, NDArray): return _npi.insert_tensor(arr, values, obj, axis=axis) else: raise TypeError("'obj' can not support type {}".format(str(type(obj)))) #pylint: disable= too-many-arguments, no-member, protected-access def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None, out=None): """ Helper function for element-wise operation. The function will perform numpy-like broadcasting if needed and call different functions. Parameters -------- lhs : ndarray or numeric value Left-hand side operand. rhs : ndarray or numeric value Right-hand operand, fn_array : function Function to be called if both lhs and rhs are of ``ndarray`` type. fn_scalar : function Function to be called if both lhs and rhs are numeric values. lfn_scalar : function Function to be called if lhs is ``ndarray`` while rhs is numeric value rfn_scalar : function Function to be called if lhs is numeric value while rhs is ``ndarray``; if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar Returns -------- mxnet.numpy.ndarray or scalar result array or scalar """ from ...numpy import ndarray from ..ndarray import from_numpy # pylint: disable=unused-import if isinstance(lhs, numeric_types): if isinstance(rhs, numeric_types): return fn_scalar(lhs, rhs, out=out) else: if rfn_scalar is None: # commutative function return lfn_scalar(rhs, float(lhs), out=out) else: return rfn_scalar(rhs, float(lhs), out=out) elif isinstance(rhs, numeric_types): return lfn_scalar(lhs, float(rhs), out=out) elif isinstance(lhs, ndarray) and isinstance(rhs, ndarray): return fn_array(lhs, rhs, out=out) else: raise TypeError('type {} not supported'.format(str(type(rhs)))) #pylint: enable= too-many-arguments, no-member, protected-access @set_module('mxnet.ndarray.numpy') def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None): """ Find the unique elements of an array. Returns the sorted unique elements of an array. There are three optional outputs in addition to the unique elements: * the indices of the input array that give the unique values * the indices of the unique array that reconstruct the input array * the number of times each unique value comes up in the input array Parameters ---------- ar : ndarray Input array. Unless `axis` is specified, this will be flattened if it is not already 1-D. return_index : bool, optional If True, also return the indices of `ar` (along the specified axis, if provided, or in the flattened array) that result in the unique array. return_inverse : bool, optional If True, also return the indices of the unique array (for the specified axis, if provided) that can be used to reconstruct `ar`. return_counts : bool, optional If True, also return the number of times each unique item appears in `ar`. axis : int or None, optional The axis to operate on. If None, `ar` will be flattened. If an integer, the subarrays indexed by the given axis will be flattened and treated as the elements of a 1-D array with the dimension of the given axis, see the notes for more details. The default is None. Returns ------- unique : ndarray The sorted unique values. unique_indices : ndarray, optional The indices of the first occurrences of the unique values in the original array. Only provided if `return_index` is True. unique_inverse : ndarray, optional The indices to reconstruct the original array from the unique array. Only provided if `return_inverse` is True. unique_counts : ndarray, optional The number of times each of the unique values comes up in the original array. Only provided if `return_counts` is True. Notes ----- When an axis is specified the subarrays indexed by the axis are sorted. This is done by making the specified axis the first dimension of the array and then flattening the subarrays in C order. The flattened subarrays are then viewed as a structured type with each element given a label, with the effect that we end up with a 1-D array of structured types that can be treated in the same way as any other 1-D array. The result is that the flattened subarrays are sorted in lexicographic order starting with the first element. This function differs from the original `numpy.unique <https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in the following aspects: - Only support ndarray as input. - Object arrays or structured arrays are not supported. Examples -------- >>> np.unique(np.array([1, 1, 2, 2, 3, 3])) array([1., 2., 3.]) >>> a = np.array([[1, 1], [2, 3]]) >>> np.unique(a) array([1., 2., 3.]) Return the unique rows of a 2D array >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]]) >>> np.unique(a, axis=0) array([[1., 0., 0.], [2., 3., 4.]]) Return the indices of the original array that give the unique values: >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) >>> u, indices = np.unique(a, return_index=True) >>> u array([1., 2., 3., 4., 6.]) >>> indices array([0, 1, 5, 3, 2], dtype=int64) >>> a[indices] array([1., 2., 3., 4., 6.]) Reconstruct the input array from the unique values: >>> a = np.array([1, 2, 6, 4, 2, 3, 2]) >>> u, indices = np.unique(a, return_inverse=True) >>> u array([1., 2., 3., 4., 6.]) >>> indices array([0, 1, 4, 3, 1, 2, 1], dtype=int64) >>> u[indices] array([1., 2., 6., 4., 2., 3., 2.]) """ ret = _npi.unique(ar, return_index, return_inverse, return_counts, axis) if isinstance(ret, list): return tuple(ret) else: return ret @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def add(x1, x2, out=None, **kwargs): """ Add arguments element-wise. Parameters ---------- x1, x2 : ndarrays or scalar values The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to a common shape (which may be the shape of one or the other). out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- add : ndarray or scalar The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars. Notes ----- This operator now supports automatic type promotion. The resulting type will be determined according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. * If both inputs are of integer types (including boolean), not supported yet. """ return _ufunc_helper(x1, x2, _npi.add, _np.add, _npi.add_scalar, None, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def subtract(x1, x2, out=None, **kwargs): """ Subtract arguments element-wise. Parameters ---------- x1, x2 : ndarrays or scalar values The arrays to be subtracted from each other. If x1.shape != x2.shape, they must be broadcastable to a common shape (which may be the shape of one or the other). out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- subtract : ndarray or scalar The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars. Notes ----- This operator now supports automatic type promotion. The resulting type will be determined according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. * If both inputs are of integer types (including boolean), not supported yet. """ return _ufunc_helper(x1, x2, _npi.subtract, _np.subtract, _npi.subtract_scalar, _npi.rsubtract_scalar, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def multiply(x1, x2, out=None, **kwargs): """ Multiply arguments element-wise. Parameters ---------- x1, x2 : ndarrays or scalar values The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to a common shape (which may be the shape of one or the other). out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar The multiplication of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars. Notes ----- This operator now supports automatic type promotion. The resulting type will be determined according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. * If both inputs are of integer types (including boolean), not supported yet. """ return _ufunc_helper(x1, x2, _npi.multiply, _np.multiply, _npi.multiply_scalar, None, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def divide(x1, x2, out=None, **kwargs): """ Returns a true division of the inputs, element-wise. Parameters ---------- x1 : ndarray or scalar Dividend array. x2 : ndarray or scalar Divisor array. out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar This is a scalar if both x1 and x2 are scalars. Notes ----- This operator now supports automatic type promotion. The resulting type will be determined according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. * If both inputs are of integer types (including boolean), the output is of float32 type. """ return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar, _npi.rtrue_divide_scalar, out) @set_module('mxnet.ndarray.numpy') def true_divide(x1, x2, out=None): """Returns a true division of the inputs, element-wise. Instead of the Python traditional 'floor division', this returns a true division. True division adjusts the output type to present the best answer, regardless of input types. Parameters ---------- x1 : ndarray or scalar Dividend array. x2 : ndarray or scalar Divisor array. out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar This is a scalar if both x1 and x2 are scalars. Notes ----- This operator now supports automatic type promotion. The resulting type will be determined according to the following rules: * If both inputs are of floating number types, the output is the more precise type. * If only one of the inputs is floating number type, the result is that type. * If both inputs are of integer types (including boolean), the output is of float32 type. """ return _ufunc_helper(x1, x2, _npi.true_divide, _np.divide, _npi.true_divide_scalar, _npi.rtrue_divide_scalar, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def mod(x1, x2, out=None, **kwargs): """ Return element-wise remainder of division. Parameters ---------- x1 : ndarray or scalar Dividend array. x2 : ndarray or scalar Divisor array. out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar This is a scalar if both x1 and x2 are scalars. """ return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out) @set_module('mxnet.ndarray.numpy') def delete(arr, obj, axis=None): """ Return a new array with sub-arrays along an axis deleted. For a one dimensional array, this returns those entries not returned by `arr[obj]`. Parameters ---------- arr : ndarray Input array. obj : slice, int or ndarray of ints Indicate indices of sub-arrays to remove along the specified axis. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. Returns ------- out : ndarray A copy of `arr` with the elements specified by `obj` removed. Note that `delete` does not occur in-place. If `axis` is None, `out` is a flattened array. Examples -------- >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) >>> arr array([[ 1., 2., 3., 4.], [ 5., 6., 7., 8.], [ 9., 10., 11., 12.]]) >>> np.delete(arr, 1, 0) array([[ 1., 2., 3., 4.], [ 9., 10., 11., 12.]]) >>> np.delete(arr, slice(None, None, 2), 1) array([[ 2., 4.], [ 6., 8.], [10., 12.]]) >>> np.delete(arr, np.array([1,3,5]), None) array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.]) >>> np.delete(arr, np.array([1,1,5]), None) array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.]) """ if not isinstance(arr, NDArray): raise TypeError("'arr' can not support type {}".format(str(type(arr)))) if isinstance(obj, slice): start = obj.start stop = obj.stop step = 1 if obj.step is None else obj.step return _npi.delete(arr, start=start, stop=stop, step=step, axis=axis) elif isinstance(obj, integer_types): return _npi.delete(arr, int_ind=obj, axis=axis) elif isinstance(obj, NDArray): return _npi.delete(arr, obj, axis=axis) else: raise TypeError("'obj' can not support type {}".format(str(type(obj)))) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def matmul(a, b, out=None): """ Matrix product of two arrays. Parameters ---------- a, b : ndarray Input arrays, scalars not allowed. out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m). If not provided or None, a freshly-allocated array is returned. Returns ------- y : ndarray The matrix product of the inputs. This is a scalar only when both x1, x2 are 1-d vectors. Raises ------ MXNetError If the last dimension of a is not the same size as the second-to-last dimension of b. If a scalar value is passed in. See Also -------- tensordot : Sum products over arbitrary axes. dot : alternative matrix product with different broadcasting rules. einsum : Einstein summation convention. Notes ----- The behavior depends on the arguments in the following way. - If both arguments are 2-D they are multiplied like conventional matrices. - If either argument is N-D, N > 2, it is treated as a stack of matrices residing in the last two indexes and broadcast accordingly. - If the first argument is 1-D, it is promoted to a matrix by prepending a 1 to its dimensions. After matrix multiplication the prepended 1 is removed. - If the second argument is 1-D, it is promoted to a matrix by appending a 1 to its dimensions. After matrix multiplication the appended 1 is removed. matmul differs from dot in two important ways: - Multiplication by scalars is not allowed, use multiply instead. - Stacks of matrices are broadcast together as if the matrices were elements, respecting the signature (n,k),(k,m)->(n,m): >>> a = np.ones([9, 5, 7, 4]) >>> c = np.ones([9, 5, 4, 3]) >>> np.dot(a, c).shape (9, 5, 7, 9, 5, 3) >>> np.matmul(a, c).shape (9, 5, 7, 3) >>> # n is 7, k is 4, m is 3 Examples -------- For 2-D arrays it is the matrix product: >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([[4, 1], ... [2, 2]]) >>> np.matmul(a, b) array([[4., 1.], [2., 2.]]) For 2-D mixed with 1-D, the result is the usual. >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([1, 2]) >>> np.matmul(a, b) array([1., 2.]) >>> np.matmul(b, a) array([1., 2.]) Broadcasting is conventional for stacks of arrays >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4)) >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2)) >>> np.matmul(a, b).shape (2, 2, 2) >>> np.matmul(a, b)[0, 1, 1] array(98.) >>> sum(a[0, 1, :] * b[0, :, 1]) array(98.) Scalar multiplication raises an error. >>> np.matmul([1, 2], 3) Traceback (most recent call last): ... mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed. """ return _npi.matmul(a, b, out=out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def remainder(x1, x2, out=None): """ Return element-wise remainder of division. Parameters ---------- x1 : ndarray or scalar Dividend array. x2 : ndarray or scalar Divisor array. out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar This is a scalar if both x1 and x2 are scalars. """ return _ufunc_helper(x1, x2, _npi.mod, _np.mod, _npi.mod_scalar, _npi.rmod_scalar, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def power(x1, x2, out=None, **kwargs): """ First array elements raised to powers from second array, element-wise. Parameters ---------- x1 : ndarray or scalar The bases. x2 : ndarray or scalar The exponent. out : ndarray A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar The bases in x1 raised to the exponents in x2. This is a scalar if both x1 and x2 are scalars. """ return _ufunc_helper(x1, x2, _npi.power, _np.power, _npi.power_scalar, _npi.rpower_scalar, out) @set_module('mxnet.ndarray.numpy') def argsort(a, axis=-1, kind=None, order=None): """ Returns the indices that would sort an array. Perform an indirect sort along the given axis using the algorithm specified by the `kind` keyword. It returns an array of indices of the same shape as `a` that index data along the given axis in sorted order. Parameters ---------- a : ndarray Array to sort. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. kind : string, optional This argument can take any string, but it does not have any effect on the final result. order : str or list of str, optional Not supported yet, will raise NotImplementedError if not None. Returns ------- index_array : ndarray, int Array of indices that sort `a` along the specified `axis`. If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`. More generally, ``np.take_along_axis(a, index_array, axis=axis)`` always yields the sorted `a`, irrespective of dimensionality. Notes ----- This operator does not support different sorting algorithms. Examples -------- One dimensional array: >>> x = np.array([3, 1, 2]) >>> np.argsort(x) array([1, 2, 0]) Two-dimensional array: >>> x = np.array([[0, 3], [2, 2]]) >>> x array([[0, 3], [2, 2]]) >>> ind = np.argsort(x, axis=0) # sorts along first axis (down) >>> ind array([[0, 1], [1, 0]]) >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0) array([[0, 2], [2, 3]]) >>> ind = np.argsort(x, axis=1) # sorts along last axis (across) >>> ind array([[0, 1], [0, 1]]) >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1) array([[0, 3], [2, 2]]) Indices of the sorted elements of a N-dimensional array: >>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape) >>> ind (array([0, 1, 1, 0]), array([0, 0, 1, 1])) >>> x[ind] # same as np.sort(x, axis=None) array([0, 2, 2, 3]) """ if order is not None: raise NotImplementedError("order not supported here") return _npi.argsort(data=a, axis=axis, is_ascend=True, dtype='int64') @set_module('mxnet.ndarray.numpy') def sort(a, axis=-1, kind=None, order=None): """ Return a sorted copy of an array. Parameters ---------- a : ndarray Array to be sorted. axis : int or None, optional Axis along which to sort. The default is -1 (the last axis). If None, the flattened array is used. kind : string, optional This argument can take any string, but it does not have any effect on the final result. order : str or list of str, optional Not supported yet, will raise NotImplementedError if not None. Returns ------- sorted_array : ndarray Array of the same type and shape as `a`. Notes ----- This operator does not support different sorting algorithms. Examples -------- >>> a = np.array([[1,4],[3,1]]) >>> np.sort(a) # sort along the last axis array([[1, 4], [1, 3]]) >>> np.sort(a, axis=None) # sort the flattened array array([1, 1, 3, 4]) >>> np.sort(a, axis=0) # sort along the first axis array([[1, 1], [3, 4]]) """ if order is not None: raise NotImplementedError("order not supported here") return _npi.sort(data=a, axis=axis, is_ascend=True) @set_module('mxnet.ndarray.numpy') def tensordot(a, b, axes=2): r""" tensordot(a, b, axes=2) Compute tensor dot product along specified axes for arrays >= 1-D. Given two tensors (arrays of dimension greater than or equal to one), `a` and `b`, and an ndarray object containing two ndarray objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s elements (components) over the axes specified by ``a_axes`` and ``b_axes``. The third argument can be a single non-negative integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions of `a` and the first ``N`` dimensions of `b` are summed over. Parameters ---------- a, b : ndarray, len(shape) >= 1 Tensors to "dot". axes : int or (2,) ndarray * integer_like If an int N, sum over the last N axes of `a` and the first N axes of `b` in order. The sizes of the corresponding axes must match. * (2,) ndarray Or, a list of axes to be summed over, first sequence applying to `a`, second to `b`. Both elements ndarray must be of the same length. See Also -------- dot, einsum Notes ----- Three common use cases are: * ``axes = 0`` : tensor product :math:`a\otimes b` * ``axes = 1`` : tensor dot product :math:`a\cdot b` * ``axes = 2`` : (default) tensor double contraction :math:`a:b` When `axes` is integer_like, the sequence for evaluation will be: first the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and Nth axis in `b` last. When there is more than one axis to sum over - and they are not the last (first) axes of `a` (`b`) - the argument `axes` should consist of two sequences of the same length, with the first axis to sum over given first in both sequences, the second axis second, and so forth. Examples -------- >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> c = np.tensordot(a,b, axes=([1,0],[0,1])) >>> c.shape (5, 2) >>> c array([[ 4400., 4730.], [ 4532., 4874.], [ 4664., 5018.], [ 4796., 5162.], [ 4928., 5306.]]) """ if _np.isscalar(axes): return _npi.tensordot_int_axes(a, b, axes) if len(axes) != 2: raise ValueError('Axes must consist of two arrays.') a_axes_summed, b_axes_summed = axes if _np.isscalar(a_axes_summed): a_axes_summed = (a_axes_summed,) if _np.isscalar(b_axes_summed): b_axes_summed = (b_axes_summed,) if len(a_axes_summed) != len(b_axes_summed): raise ValueError('Axes length mismatch') return _npi.tensordot(a, b, a_axes_summed, b_axes_summed) @set_module('mxnet.ndarray.numpy') def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments """ Compute the histogram of a set of data. Parameters ---------- a : ndarray Input data. The histogram is computed over the flattened array. bins : int or NDArray If `bins` is an int, it defines the number of equal-width bins in the given range (10, by default). If `bins` is a sequence, it defines a monotonically increasing array of bin edges, including the rightmost edge, allowing for non-uniform bin widths. .. versionadded:: 1.11.0 If `bins` is a string, it defines the method used to calculate the optimal bin width, as defined by `histogram_bin_edges`. range : (float, float) The lower and upper range of the bins. Required when `bins` is an integer. Values outside the range are ignored. The first element of the range must be less than or equal to the second. normed : bool, optional Not supported yet, coming soon. weights : array_like, optional Not supported yet, coming soon. density : bool, optional Not supported yet, coming soon. """ if normed is True: raise NotImplementedError("normed is not supported yet...") if weights is not None: raise NotImplementedError("weights is not supported yet...") if density is True: raise NotImplementedError("density is not supported yet...") if isinstance(bins, numeric_types): if range is None: raise NotImplementedError("automatic range is not supported yet...") return _npi.histogram(a, bin_cnt=bins, range=range) if isinstance(bins, (list, tuple)): raise NotImplementedError("array_like bins is not supported yet...") if isinstance(bins, str): raise NotImplementedError("string bins is not supported yet...") if isinstance(bins, NDArray): return _npi.histogram(a, bins=bins) raise ValueError("np.histogram fails with", locals()) @set_module('mxnet.ndarray.numpy') def eye(N, M=None, k=0, dtype=_np.float32, **kwargs): """ Return a 2-D array with ones on the diagonal and zeros elsewhere. Parameters ---------- N : int Number of rows in the output. M : int, optional Number of columns in the output. If None, defaults to N. k : int, optional Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal. dtype : data-type, optional Data-type of the returned array. Returns ------- I : ndarray of shape (N,M) An array where all elements are equal to zero, except for the k-th diagonal, whose values are equal to one. """ _sanity_check_params('eye', ['order'], kwargs) ctx = kwargs.pop('ctx', current_context()) if ctx is None: ctx = current_context() return _npi.eye(N, M, k, ctx, dtype) @set_module('mxnet.ndarray.numpy') def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments r""" Return evenly spaced numbers over a specified interval. Returns num evenly spaced samples, calculated over the interval [start, stop]. The endpoint of the interval can optionally be excluded. Parameters ---------- start : real number The starting value of the sequence. stop : real number The end value of the sequence, unless endpoint is set to False. In that case, the sequence consists of all but the last of num + 1 evenly spaced samples, so that stop is excluded. Note that the step size changes when endpoint is False. num : int, optional Number of samples to generate. Default is 50. Must be non-negative. endpoint : bool, optional If True, stop is the last sample. Otherwise, it is not included. Default is True. retstep : bool, optional If True, return (samples, step), where step is the spacing between samples. dtype : dtype, optional The type of the output array. If dtype is not given, infer the data type from the other input arguments. axis : int, optional The axis in the result to store the samples. Relevant only if start or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Use -1 to get an axis at the end. Returns ------- samples : ndarray There are num equally spaced samples in the closed interval `[start, stop]` or the half-open interval `[start, stop)` (depending on whether endpoint is True or False). step : float, optional Only returned if retstep is True Size of spacing between samples. See Also -------- arange : Similar to `linspace`, but uses a step size (instead of the number of samples). Examples -------- >>> np.linspace(2.0, 3.0, num=5) array([2. , 2.25, 2.5 , 2.75, 3. ]) >>> np.linspace(2.0, 3.0, num=5, endpoint=False) array([2. , 2.2, 2.4, 2.6, 2.8]) >>> np.linspace(2.0, 3.0, num=5, retstep=True) (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) Graphical illustration: >>> import matplotlib.pyplot as plt >>> N = 8 >>> y = np.zeros(N) >>> x1 = np.linspace(0, 10, N, endpoint=True) >>> x2 = np.linspace(0, 10, N, endpoint=False) >>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o') [<matplotlib.lines.Line2D object at 0x...>] >>> plt.ylim([-0.5, 1]) (-0.5, 1) >>> plt.show() Notes ----- This function differs from the original `numpy.linspace <https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in the following aspects: - `start` and `stop` do not support list, numpy ndarray and mxnet ndarray - axis could only be 0 - There could be an additional `ctx` argument to specify the device, e.g. the i-th GPU. """ if isinstance(start, (list, _np.ndarray, NDArray)) or \ isinstance(stop, (list, _np.ndarray, NDArray)): raise NotImplementedError('start and stop only support int') if axis != 0: raise NotImplementedError("the function only support axis 0") if ctx is None: ctx = current_context() if retstep: step = (stop - start) / (num - 1) return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype), step else: return _npi.linspace(start=start, stop=stop, num=num, endpoint=endpoint, ctx=ctx, dtype=dtype) @set_module('mxnet.ndarray.numpy') def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments r"""Return numbers spaced evenly on a log scale. In linear space, the sequence starts at ``base ** start`` (`base` to the power of `start`) and ends with ``base ** stop`` (see `endpoint` below). Non-scalar `start` and `stop` are now supported. Parameters ---------- start : int or float ``base ** start`` is the starting value of the sequence. stop : int or float ``base ** stop`` is the final value of the sequence, unless `endpoint` is False. In that case, ``num + 1`` values are spaced over the interval in log-space, of which all but the last (a sequence of length `num`) are returned. num : integer, optional Number of samples to generate. Default is 50. endpoint : boolean, optional If true, `stop` is the last sample. Otherwise, it is not included. Default is True. base : float, optional The base of the log space. The step size between the elements in ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform. Default is 10.0. dtype : dtype The type of the output array. If `dtype` is not given, infer the data type from the other input arguments. axis : int, optional The axis in the result to store the samples. Relevant only if start or stop are array-like. By default (0), the samples will be along a new axis inserted at the beginning. Now, axis only support axis = 0. ctx : Context, optional An optional device context (default is the current default context). Returns ------- samples : ndarray `num` samples, equally spaced on a log scale. See Also -------- arange : Similar to linspace, with the step size specified instead of the number of samples. Note that, when used with a float endpoint, the endpoint may or may not be included. linspace : Similar to logspace, but with the samples uniformly distributed in linear space, instead of log space. Notes ----- Logspace is equivalent to the code. Now wo only support axis = 0. >>> y = np.linspace(start, stop, num=num, endpoint=endpoint) ... >>> power(base, y).astype(dtype) ... Examples -------- >>> np.logspace(2.0, 3.0, num=4) array([ 100. , 215.44347, 464.15887, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) array([100. , 177.82794, 316.22775, 562.3413 ]) >>> np.logspace(2.0, 3.0, num=4, base=2.0) array([4. , 5.0396843, 6.349604 , 8. ]) >>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32) array([4, 5, 6, 8], dtype=int32) >>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0)) array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0)) """ if isinstance(start, (list, tuple, _np.ndarray, NDArray)) or \ isinstance(stop, (list, tuple, _np.ndarray, NDArray)): raise NotImplementedError('start and stop only support int and float') if axis != 0: raise NotImplementedError("the function only support axis 0") if ctx is None: ctx = current_context() return _npi.logspace(start=start, stop=stop, num=num, endpoint=endpoint, base=base, ctx=ctx, dtype=dtype) @set_module('mxnet.ndarray.numpy') def expand_dims(a, axis): """Expand the shape of an array. Insert a new axis that will appear at the `axis` position in the expanded Parameters ---------- a : ndarray Input array. axis : int Position in the expanded axes where the new axis is placed. Returns ------- res : ndarray Output array. The number of dimensions is one greater than that of the input array. """ return _npi.expand_dims(a, axis) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def lcm(x1, x2, out=None, **kwargs): """ Returns the lowest common multiple of ``|x1|`` and ``|x2|`` Parameters ---------- x1, x2 : ndarrays or scalar values The arrays for computing lowest common multiple. If x1.shape != x2.shape, they must be broadcastable to a common shape (which may be the shape of one or the other). out : ndarray or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar The lowest common multiple of the absolute value of the inputs This is a scalar if both `x1` and `x2` are scalars. See Also -------- gcd : The greatest common divisor Examples -------- >>> np.lcm(12, 20) 60 >>> np.lcm(np.arange(6, dtype=int), 20) array([ 0, 20, 20, 60, 20, 20], dtype=int64) """ return _ufunc_helper(x1, x2, _npi.lcm, _np.lcm, _npi.lcm_scalar, None, out) @set_module('mxnet.ndarray.numpy') def tril(m, k=0): r""" Lower triangle of an array. Return a copy of an array with elements above the `k`-th diagonal zeroed. Parameters ---------- m : ndarray, shape (M, N) Input array. k : int, optional Diagonal above which to zero elements. `k = 0` (the default) is the main diagonal, `k < 0` is below it and `k > 0` is above. Returns ------- tril : ndarray, shape (M, N) Lower triangle of `m`, of same shape and data-type as `m`. See Also -------- triu : same thing, only for the upper triangle Examples -------- >>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]) >>> np.tril(a, -1) array([[ 0., 0., 0.], [ 4., 0., 0.], [ 7., 8., 0.], [10., 11., 12.]]) """ return _npi.tril(m, k) def _unary_func_helper(x, fn_array, fn_scalar, out=None, **kwargs): """Helper function for unary operators. Parameters ---------- x : ndarray or scalar Input of the unary operator. fn_array : function Function to be called if x is of ``ndarray`` type. fn_scalar : function Function to be called if x is a Python scalar. out : ndarray The buffer ndarray for storing the result of the unary function. Returns ------- out : mxnet.numpy.ndarray or scalar Result array or scalar. """ if isinstance(x, numeric_types): return fn_scalar(x, **kwargs) elif isinstance(x, NDArray): return fn_array(x, out=out, **kwargs) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def sin(x, out=None, **kwargs): r""" Trigonometric sine, element-wise. Parameters ---------- x : ndarray or scalar Angle, in radians (:math:`2 \pi` rad equals 360 degrees). out : ndarray or None A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. The dtype of the output is the same as that of the input if the input is an ndarray. Returns ------- y : ndarray or scalar The sine of each element of x. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. Examples -------- >>> np.sin(np.pi/2.) 1.0 >>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.) array([0. , 0.5 , 0.70710677, 0.86602545, 1. ]) """ return _unary_func_helper(x, _npi.sin, _np.sin, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def cos(x, out=None, **kwargs): r""" Cosine, element-wise. Parameters ---------- x : ndarray or scalar Angle, in radians (:math:`2 \pi` rad equals 360 degrees). out : ndarray or None A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. The dtype of the output is the same as that of the input if the input is an ndarray. Returns ------- y : ndarray or scalar The corresponding cosine values. This is a scalar if x is a scalar. Notes ---- This function only supports input type of float. Examples -------- >>> np.cos(np.array([0, np.pi/2, np.pi])) array([ 1.000000e+00, -4.371139e-08, -1.000000e+00]) >>> # Example of providing the optional output parameter >>> out1 = np.array([0], dtype='f') >>> out2 = np.cos(np.array([0.1]), out1) >>> out2 is out1 True """ return _unary_func_helper(x, _npi.cos, _np.cos, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def sinh(x, out=None, **kwargs): """ Hyperbolic sine, element-wise. Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``. Parameters ---------- x : ndarray or scalar Input array or scalar. out : ndarray or None A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. The dtype of the output is the same as that of the input if the input is an ndarray. Returns ------- y : ndarray or scalar The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. Examples -------- >>> np.sinh(0) 0.0 >>> # Example of providing the optional output parameter >>> out1 = np.array([0], dtype='f') >>> out2 = np.sinh(np.array([0.1]), out1) >>> out2 is out1 True """ return _unary_func_helper(x, _npi.sinh, _np.sinh, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def cosh(x, out=None, **kwargs): """ Hyperbolic cosine, element-wise. Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``. Parameters ---------- x : ndarray or scalar Input array or scalar. out : ndarray or None A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. The dtype of the output is the same as that of the input if the input is an ndarray. Returns ------- y : ndarray or scalar The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. Examples -------- >>> np.cosh(0) 1.0 """ return _unary_func_helper(x, _npi.cosh, _np.cosh, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def tanh(x, out=None, **kwargs): """ Compute hyperbolic tangent element-wise. Equivalent to ``np.sinh(x)/np.cosh(x)``. Parameters ---------- x : ndarray or scalar. Input array. out : ndarray or None A location into which the result is stored. If provided, it must have a shape that the inputs fill into. If not provided or None, a freshly-allocated array is returned. The dtype of the output and input must be the same. Returns ------- y : ndarray or scalar The corresponding hyperbolic tangent values. Notes ----- If `out` is provided, the function writes the result into it, and returns a reference to `out`. (See Examples) - input x does not support complex computation (like imaginary number) >>> np.tanh(np.pi*1j) TypeError: type <type 'complex'> not supported Examples -------- >>> np.tanh(np.array[0, np.pi])) array([0. , 0.9962721]) >>> np.tanh(np.pi) 0.99627207622075 >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter >>> out1 = np.array(1) >>> out2 = np.tanh(np.array(0.1), out1) >>> out2 is out1 True """ return _unary_func_helper(x, _npi.tanh, _np.tanh, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def log10(x, out=None, **kwargs): """ Return the base 10 logarithm of the input array, element-wise. Parameters ---------- x : ndarray or scalar Input array or scalar. out : ndarray or None A location into which t'absolute', he result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. The dtype of the output is the same as that of the input if the input is an ndarray. Returns ------- y : ndarray or scalar The logarithm to the base 10 of `x`, element-wise. NaNs are returned where x is negative. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. Examples -------- >>> np.log10(np.array([1e-15, -3.])) array([-15., nan]) """ return _unary_func_helper(x, _npi.log10, _np.log10, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def sqrt(x, out=None, **kwargs): """ Return the non-negative square-root of an array, element-wise. Parameters ---------- x : ndarray or scalar The values whose square-roots are required. out : ndarray, or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar An array of the same shape as `x`, containing the positive square-root of each element in `x`. This is a scalar if `x` is a scalar. Notes ---- This function only supports input type of float. Examples -------- >>> np.sqrt(np.array([1,4,9])) array([1., 2., 3.]) >>> np.sqrt(np.array([4, -1, _np.inf])) array([ 2., nan, inf]) """ return _unary_func_helper(x, _npi.sqrt, _np.sqrt, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def cbrt(x, out=None, **kwargs): r""" Return the cube-root of an array, element-wise. Parameters ---------- x : ndarray The values whose cube-roots are required. out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ---------- y : ndarray An array of the same shape as x, containing the cube cube-root of each element in x. If out was provided, y is a reference to it. This is a scalar if x is a scalar. Examples ---------- >>> np.cbrt([1,8,27]) array([ 1., 2., 3.]) """ return _unary_func_helper(x, _npi.cbrt, _np.cbrt, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def abs(x, out=None, **kwargs): r""" Calculate the absolute value element-wise. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- absolute : ndarray An ndarray containing the absolute value of each element in `x`. This is a scalar if `x` is a scalar. Examples -------- >>> x = np.array([-1.2, 1.2]) >>> np.abs(x) array([1.2, 1.2]) """ return _unary_func_helper(x, _npi.abs, _np.abs, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def absolute(x, out=None, **kwargs): r""" Calculate the absolute value element-wise. np.abs is a shorthand for this function. Parameters ---------- x : ndarray Input array. out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ---------- absolute : ndarray An ndarray containing the absolute value of each element in x. Examples ---------- >>> x = np.array([-1.2, 1.2]) >>> np.absolute(x) array([ 1.2, 1.2]) """ return _unary_func_helper(x, _npi.absolute, _np.absolute, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def sign(x, out=None, **kwargs): r""" Returns an element-wise indication of the sign of a number. The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number. Parameters ---------- x : ndarray or a scalar Input values. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray The sign of `x`. This is a scalar if `x` is a scalar. Note ------- - Only supports real number as input elements. - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. Examples -------- >>> a = np.array([-5., 4.5]) >>> np.sign(a) array([-1., 1.]) >>> # Use scalars as inputs: >>> np.sign(4.0) 1.0 >>> np.sign(0) 0 >>> # Use ``out`` parameter: >>> b = np.zeros((2, )) >>> np.sign(a, out=b) array([-1., 1.]) >>> b array([-1., 1.]) """ return _unary_func_helper(x, _npi.sign, _np.sign, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def exp(x, out=None, **kwargs): r""" Calculate the exponential of all elements in the input array. Parameters ---------- x : ndarray or scalar Input values. out : ndarray or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array, element-wise exponential of `x`. This is a scalar if `x` is a scalar. Examples -------- >>> np.exp(1) 2.718281828459045 >>> x = np.array([-1, 1, -2, 2]) >>> np.exp(x) array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ]) """ return _unary_func_helper(x, _npi.exp, _np.exp, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def expm1(x, out=None, **kwargs): r""" Calculate `exp(x) - 1` of all elements in the input array. Parameters ---------- x : ndarray or scalar Input values. out : ndarray or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array, element-wise exponential minus one: `out = exp(x) - 1`. This is a scalar if `x` is a scalar. Examples -------- >>> np.expm1(1) 1.718281828459045 >>> x = np.array([-1, 1, -2, 2]) >>> np.expm1(x) array([-0.63212056, 1.71828183, -0.86466472, 6.3890561]) """ return _unary_func_helper(x, _npi.expm1, _np.expm1, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def arcsin(x, out=None, **kwargs): r""" Inverse sine, element-wise. Parameters ---------- x : ndarray or scalar `y`-coordinate on the unit circle. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape as the input. If not provided or None, a freshly-allocated array is returned. Returns ------- angle : ndarray or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. The inverse sine of each element in `x`, in radians and in the closed interval ``[-pi/2, pi/2]``. Examples -------- >>> np.arcsin(1) # pi/2 1.5707963267948966 >>> np.arcsin(-1) # -pi/2 -1.5707963267948966 >>> np.arcsin(0) 0.0 Notes ----- `arcsin` is a multivalued function: for each `x` there are infinitely many numbers `z` such that :math:`sin(z) = x`. The convention is to return the angle `z` whose real part lies in [-pi/2, pi/2]. For real-valued input data types, *arcsin* always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. The inverse sine is also known as `asin` or sin^{-1}. The output `ndarray` has the same `ctx` as the input `ndarray`. This function differs from the original `numpy.arcsin <https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in the following aspects: - Only support ndarray or scalar now. - `where` argument is not supported. - Complex input is not supported. References ---------- Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*, 10th printing, New York: Dover, 1964, pp. 79ff. http://www.math.sfu.ca/~cbm/aands/ """ return _unary_func_helper(x, _npi.arcsin, _np.arcsin, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def arccos(x, out=None, **kwargs): r""" Trigonometric inverse cosine, element-wise. The inverse of cos so that, if y = cos(x), then x = arccos(y). Parameters ---------- x : ndarray x-coordinate on the unit circle. For real arguments, the domain is [-1, 1]. out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ---------- angle : ndarray The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi]. This is a scalar if x is a scalar. See also ---------- cos, arctan, arcsin Notes ---------- arccos is a multivalued function: for each x there are infinitely many numbers z such that cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi]. For real-valued input data types, arccos always returns real output. For each value that cannot be expressed as a real number or infinity, it yields nan and sets the invalid floating point error flag. The inverse cos is also known as acos or cos^-1. Examples ---------- >>> np.arccos([1, -1]) array([ 0. , 3.14159265]) """ return _unary_func_helper(x, _npi.arccos, _np.arccos, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def arctan(x, out=None, **kwargs): r""" Trigonometric inverse tangent, element-wise. The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``. Parameters ---------- x : ndarray or scalar Input values. out : ndarray or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Out has the same shape as `x`. It lies is in ``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``). This is a scalar if `x` is a scalar. Notes ----- `arctan` is a multi-valued function: for each `x` there are infinitely many numbers `z` such that tan(`z`) = `x`. The convention is to return the angle `z` whose real part lies in [-pi/2, pi/2]. For real-valued input data types, `arctan` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. For complex-valued input, we do not have support for them yet. The inverse tangent is also known as `atan` or tan^{-1}. Examples -------- >>> x = np.array([0, 1]) >>> np.arctan(x) array([0. , 0.7853982]) >>> np.pi/4 0.7853981633974483 """ return _unary_func_helper(x, _npi.arctan, _np.arctan, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def log(x, out=None, **kwargs): """ Natural logarithm, element-wise. The natural logarithm `log` is the inverse of the exponential function, so that `log(exp(x)) = x`. The natural logarithm is logarithm in base `e`. Parameters ---------- x : ndarray Input value. Elements must be of real value. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray The natural logarithm of `x`, element-wise. This is a scalar if `x` is a scalar. Notes ----- Currently only supports data of real values and ``inf`` as input. Returns data of real value, ``inf``, ``-inf`` and ``nan`` according to the input. This function differs from the original `numpy.log <https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in the following aspects: - Does not support complex number for now - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. Examples -------- >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64) >>> np.log(a) array([ 0., 1., 2., -inf], dtype=float64) >>> # Using default float32 dtype may lead to slightly different behavior: >>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float32) >>> np.log(a) array([ 0., 0.99999994, 2., -inf]) >>> np.log(1) 0.0 """ return _unary_func_helper(x, _npi.log, _np.log, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def degrees(x, out=None, **kwargs): """ Convert angles from radians to degrees. Parameters ---------- x : ndarray Input value. Elements must be of real value. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray The corresponding degree values; if `out` was supplied this is a reference to it. This is a scalar if `x` is a scalar. Notes ------- This function differs from the original `numpy.degrees <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in the following aspects: - Input type does not support Python native iterables(list, tuple, ...). Only ndarray is supported. - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. Examples -------- >>> rad = np.arange(12.) * np.pi / 6 >>> np.degrees(rad) array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.]) >>> # Use specified ``out`` ndarray: >>> out = np.zeros((rad.shape)) >>> np.degrees(rad, out) array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.]) >>> out array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.]) """ return _unary_func_helper(x, _npi.degrees, _np.degrees, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def rad2deg(x, out=None, **kwargs): r""" Convert angles from radians to degrees. Parameters ---------- x : ndarray or scalar Angles in degrees. out : ndarray or None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar The corresponding angle in radians. This is a scalar if `x` is a scalar. Notes ----- "rad2deg(x)" is "x *180 / pi". This function differs from the original numpy.arange in the following aspects: - Only support float32 and float64. - `out` must be in the same size of input. Examples -------- >>> np.rad2deg(np.pi/2) 90.0 """ return _unary_func_helper(x, _npi.rad2deg, _np.rad2deg, out=out) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def rint(x, out=None, **kwargs): """ Round elements of the array to the nearest integer. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None A location into which the result is stored. If provided, it must have the same shape and type as the input. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. Notes ----- This function differs from the original `numpy.rint <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in the following way(s): - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported - broadcasting to `out` of different shape is currently not supported - when input is plain python numerics, the result will not be stored in the `out` param Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.rint(a) array([-2., -2., -0., 0., 1., 2., 2.]) """ return _unary_func_helper(x, _npi.rint, _np.rint, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def log2(x, out=None, **kwargs): """ Base-2 logarithm of x. Parameters ---------- x : ndarray or scalar Input values. out : ndarray or None A location into which the result is stored. If provided, it must have the same shape and type as the input. If not provided or None, a freshly-allocated array is returned. Returns ------- y : ndarray The logarithm base two of `x`, element-wise. This is a scalar if `x` is a scalar. Notes ----- This function differs from the original `numpy.log2 <https://www.google.com/search?q=numpy+log2>`_ in the following way(s): - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported - broadcasting to `out` of different shape is currently not supported - when input is plain python numerics, the result will not be stored in the `out` param Examples -------- >>> x = np.array([0, 1, 2, 2**4]) >>> np.log2(x) array([-inf, 0., 1., 4.]) """ return _unary_func_helper(x, _npi.log2, _np.log2, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def log1p(x, out=None, **kwargs): """ Return the natural logarithm of one plus the input array, element-wise. Calculates ``log(1 + x)``. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None A location into which the result is stored. If provided, it must have a shape that the inputs fill into. If not provided or None, a freshly-allocated array is returned. The dtype of the output and input must be the same. Returns ------- y : ndarray or scalar Natural logarithm of 1 + x, element-wise. This is a scalar if x is a scalar. Notes ----- For real-valued input, `log1p` is accurate also for `x` so small that `1 + x == 1` in floating-point accuracy. Logarithm is a multivalued function: for each `x` there is an infinite number of `z` such that `exp(z) = 1 + x`. The convention is to return the `z` whose imaginary part lies in `[-pi, pi]`. For real-valued input data types, `log1p` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. cannot support complex-valued input. Examples -------- >>> np.log1p(1e-99) 1e-99 >>> a = np.array([3, 4, 5]) >>> np.log1p(a) array([1.3862944, 1.609438 , 1.7917595]) """ return _unary_func_helper(x, _npi.log1p, _np.log1p, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def radians(x, out=None, **kwargs): """ Convert angles from degrees to radians. Parameters ---------- x : ndarray or scalar Input array in degrees. out : ndarray or None A location into which the result is stored. If provided, it must have the same shape and type as the input. If not provided or None, a freshly-allocated array is returned. Returns ------- y : ndarray The corresponding radian values. This is a scalar if x is a scalar. Notes ----- This function differs from the original `numpy.radians <https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in the following way(s): - only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported - broadcasting to `out` of different shape is currently not supported - when input is plain python numerics, the result will not be stored in the `out` param Examples -------- >>> deg = np.arange(12.) * 30. >>> np.radians(deg) array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938, 3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863], dtype=float32) """ return _unary_func_helper(x, _npi.radians, _np.radians, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def deg2rad(x, out=None, **kwargs): r""" Convert angles from degrees to radians. Parameters ---------- x : ndarray or scalar Angles in degrees. out : ndarray or None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar The corresponding angle in radians. This is a scalar if `x` is a scalar. Notes ----- "deg2rad(x)" is "x * pi / 180". This function differs from the original numpy.arange in the following aspects: - Only support float32 and float64. - `out` must be in the same size of input. Examples -------- >>> np.deg2rad(180) 3.1415927 """ return _unary_func_helper(x, _npi.deg2rad, _np.deg2rad, out=out) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def reciprocal(x, out=None, **kwargs): r""" Return the reciprocal of the argument, element-wise. Calculates ``1/x``. Parameters ---------- x : ndarray or scalar The values whose reciprocals are required. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape as the input. If not provided or None, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. Examples -------- >>> np.reciprocal(2.) 0.5 >>> x = np.array([1, 2., 3.33]) >>> np.reciprocal(x) array([1. , 0.5 , 0.3003003]) Notes ----- .. note:: This function is not designed to work with integers. For integer arguments with absolute value larger than 1 the result is always zero because of the way Python handles integer division. For integer zero the result is an overflow. The output `ndarray` has the same `ctx` as the input `ndarray`. This function differs from the original `numpy.reciprocal <https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in the following aspects: - Only support ndarray and scalar now. - `where` argument is not supported. """ return _unary_func_helper(x, _npi.reciprocal, _np.reciprocal, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def square(x, out=None, **kwargs): r""" Return the element-wise square of the input. Parameters ---------- x : ndarray or scalar The values whose squares are required. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape as the input. If not provided or None, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar Output array is same shape and type as x. This is a scalar if x is a scalar. Examples -------- >>> np.square(2.) 4.0 >>> x = np.array([1, 2., -1]) >>> np.square(x) array([1., 4., 1.]) Notes ----- The output `ndarray` has the same `ctx` as the input `ndarray`. This function differs from the original `numpy.square <https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in the following aspects: - Only support ndarray and scalar now. - `where` argument is not supported. - Complex input is not supported. """ return _unary_func_helper(x, _npi.square, _np.square, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def negative(x, out=None, **kwargs): r""" Numerical negative, element-wise. Parameters: ------------ x : ndarray or scalar Input array. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. Returns: --------- y : ndarray or scalar Returned array or scalar: y = -x. This is a scalar if x is a scalar. Examples: --------- >>> np.negative(1) -1 """ return _unary_func_helper(x, _npi.negative, _np.negative, out=out) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def fix(x, out=None, **kwargs): r""" Round an array of floats element-wise to nearest integer towards zero. The rounded values are returned as floats. Parameters: ---------- x : ndarray An array of floats to be rounded out : ndarray, optional Output array Returns: ------- y : ndarray of floats Examples --------- >>> np.fix(3.14) 3 """ return _unary_func_helper(x, _npi.fix, _np.fix, out=out) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def tan(x, out=None, **kwargs): r""" Compute tangent element-wise. Equivalent to np.sin(x)/np.cos(x) element-wise. Parameters: ---------- x : ndarray Input array. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. where : ndarray, optional Values of True indicate to calculate the ufunc at that position, values of False indicate to leave the value in the output alone. Returns: ------- y : ndarray The corresponding tangent values. This is a scalar if x is a scalar. Examples: --------- >>> np.tan(0.5) 0.5463024898437905 """ return _unary_func_helper(x, _npi.tan, _np.tan, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def ceil(x, out=None, **kwargs): r""" Return the ceiling of the input, element-wise. The ceil of the ndarray `x` is the smallest integer `i`, such that `i >= x`. It is often denoted as :math:`\lceil x \rceil`. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None A location into which the result is stored. If provided, it must have a same shape that the inputs fill into. If not provided or None, a freshly-allocated array is returned. The dtype of the output and input must be the same. Returns ------- y : ndarray or scalar The ceiling of each element in `x`, with `float` dtype. This is a scalar if `x` is a scalar. Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.ceil(a) array([-1., -1., -0., 1., 2., 2., 2.]) >>> #if you use parameter out, x and out must be ndarray. >>> a = np.array(1) >>> np.ceil(np.array(3.5), a) array(4.) >>> a array(4.) """ return _unary_func_helper(x, _npi.ceil, _np.ceil, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def floor(x, out=None, **kwargs): r""" Return the floor of the input, element-wise. The floor of the ndarray `x` is the largest integer `i`, such that `i <= x`. It is often denoted as :math:`\lfloor x \rfloor`. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None A location into which the result is stored. If provided, it must have a same shape that the inputs fill into. If not provided or None, a freshly-allocated array is returned. The dtype of the output and input must be the same. Returns ------- y : ndarray or scalar The floor of each element in `x`, with `float` dtype. This is a scalar if `x` is a scalar. Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.floor(a) array([-2., -2., -1., 0., 1., 1., 2.]) >>> #if you use parameter out, x and out must be ndarray. >>> a = np.array(1) >>> np.floor(np.array(3.5), a) array(3.) >>> a array(3.) """ return _unary_func_helper(x, _npi.floor, _np.floor, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def bitwise_not(x, out=None, **kwargs): r""" Compute bit-wise inversion, or bit-wise NOT, element-wise. Computes the bit-wise NOT of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``~``. Parameters ---------- x : array_like Only integer and boolean types are handled. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- out : ndarray or scalar Result. This is a scalar if `x` is a scalar. See Also -------- bitwise_and, bitwise_or, bitwise_xor logical_not binary_repr : Return the binary representation of the input number as a string. Examples -------- We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: >>> x = np.invert(np.array(13, dtype=np.uint8)) >>> x 242 >>> np.binary_repr(x, width=8) '11110010' Notes ----- `bitwise_not` is an alias for `invert`: >>> np.bitwise_not is np.invert True """ return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def invert(x, out=None, **kwargs): r""" Compute bit-wise inversion, or bit-wise NOT, element-wise. Computes the bit-wise NOT of the underlying binary representation of the integers in the input arrays. This ufunc implements the C/Python operator ``~``. Parameters ---------- x : array_like Only integer and boolean types are handled. out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- out : ndarray or scalar Result. This is a scalar if `x` is a scalar. See Also -------- bitwise_and, bitwise_or, bitwise_xor logical_not binary_repr : Return the binary representation of the input number as a string. Examples -------- We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: >>> x = np.invert(np.array(13, dtype=np.uint8)) >>> x 242 >>> np.binary_repr(x, width=8) '11110010' Notes ----- `bitwise_not` is an alias for `invert`: >>> np.bitwise_not is np.invert True """ return _unary_func_helper(x, _npi.bitwise_not, _np.bitwise_not, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def trunc(x, out=None, **kwargs): r""" Return the truncated value of the input, element-wise. The truncated value of the scalar `x` is the nearest integer `i` which is closer to zero than `x` is. In short, the fractional part of the signed number `x` is discarded. Parameters ---------- x : ndarray or scalar Input data. out : ndarray or None, optional A location into which the result is stored. Returns ------- y : ndarray or scalar The truncated value of each element in `x`. This is a scalar if `x` is a scalar. Notes ----- This function differs from the original numpy.trunc in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. Examples -------- >>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) >>> np.trunc(a) array([-1., -1., -0., 0., 1., 1., 2.]) """ return _unary_func_helper(x, _npi.trunc, _np.trunc, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def logical_not(x, out=None, **kwargs): r""" Compute the truth value of NOT x element-wise. Parameters ---------- x : ndarray or scalar Logical NOT is applied to the elements of `x`. out : ndarray or None, optional A location into which the result is stored. Returns ------- y : bool or ndarray of bool Boolean result with the same shape as `x` of the NOT operation on elements of `x`. This is a scalar if `x` is a scalar. Notes ----- This function differs from the original numpy.logical_not in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. Examples -------- >>> x= np.array([True, False, 0, 1]) >>> np.logical_not(x) array([False, True, True, False]) >>> x = np.arange(5) >>> np.logical_not(x<3) array([False, False, False, True, True]) """ return _unary_func_helper(x, _npi.logical_not, _np.logical_not, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def arcsinh(x, out=None, **kwargs): r""" Inverse hyperbolic sine, element-wise. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None, optional A location into which the result is stored. Returns ------- arcsinh : ndarray Array of the same shape as `x`. This is a scalar if `x` is a scalar. Notes ----- `arcsinh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `sinh(z) = x`. For real-valued input data types, `arcsinh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. This function differs from the original numpy.arcsinh in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Do not support complex-valued input. - Cannot cast type automatically. DType of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. Examples -------- >>> a = np.array([3.2, 5.0]) >>> np.arcsinh(a) array([1.8309381, 2.2924316]) >>> np.arcsinh(1) 0.0 """ return _unary_func_helper(x, _npi.arcsinh, _np.arcsinh, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def arccosh(x, out=None, **kwargs): r""" Inverse hyperbolic cosine, element-wise. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None, optional A location into which the result is stored. Returns ------- arccosh : ndarray Array of the same shape as `x`. This is a scalar if `x` is a scalar. Notes ----- `arccosh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `cosh(z) = x`. For real-valued input data types, `arccosh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. This function differs from the original numpy.arccosh in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Do not support complex-valued input. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. Examples -------- >>> a = np.array([3.2, 5.0]) >>> np.arccosh(a) array([1.8309381, 2.2924316]) >>> np.arccosh(1) 0.0 """ return _unary_func_helper(x, _npi.arccosh, _np.arccosh, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def arctanh(x, out=None, **kwargs): r""" Inverse hyperbolic tangent, element-wise. Parameters ---------- x : ndarray or scalar Input array. out : ndarray or None, optional A location into which the result is stored. Returns ------- arctanh : ndarray Array of the same shape as `x`. This is a scalar if `x` is a scalar. Notes ----- `arctanh` is a multivalued function: for each `x` there are infinitely many numbers `z` such that `tanh(z) = x`. For real-valued input data types, `arctanh` always returns real output. For each value that cannot be expressed as a real number or infinity, it yields ``nan`` and sets the `invalid` floating point error flag. This function differs from the original numpy.arctanh in the following aspects: - Do not support `where`, a parameter in numpy which indicates where to calculate. - Do not support complex-valued input. - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot broadcast automatically. Shape of `out` must be same as the expected one. - If `x` is plain python numeric, the result won't be stored in out. Examples -------- >>> a = np.array([0.0, -0.5]) >>> np.arctanh(a) array([0., -0.54930615]) >>> np.arctanh(0.0) 0.0 """ return _unary_func_helper(x, _npi.arctanh, _np.arctanh, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') def tile(A, reps): r""" Construct an array by repeating A the number of times given by reps. If `reps` has length ``d``, the result will have dimension of ``max(d, A.ndim)``. If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication, or shape (1, 1, 3) for 3-D replication. If this is not the desired behavior, promote `A` to d-dimensions manually before calling this function. If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it. Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as (1, 1, 2, 2). Parameters ---------- A : ndarray or scalar An input array or a scalar to repeat. reps : a single integer or tuple of integers The number of repetitions of `A` along each axis. Returns ------- c : ndarray The tiled output array. Examples -------- >>> a = np.array([0, 1, 2]) >>> np.tile(a, 2) array([0., 1., 2., 0., 1., 2.]) >>> np.tile(a, (2, 2)) array([[0., 1., 2., 0., 1., 2.], [0., 1., 2., 0., 1., 2.]]) >>> np.tile(a, (2, 1, 2)) array([[[0., 1., 2., 0., 1., 2.]], [[0., 1., 2., 0., 1., 2.]]]) >>> b = np.array([[1, 2], [3, 4]]) >>> np.tile(b, 2) array([[1., 2., 1., 2.], [3., 4., 3., 4.]]) >>> np.(b, (2, 1)) array([[1., 2.], [3., 4.], [1., 2.], [3., 4.]]) >>> c = np.array([1,2,3,4]) >>> np.tile(c,(4,1)) array([[1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.], [1., 2., 3., 4.]]) Scalar as input: >>> np.tile(2, 3) array([2, 2, 2]) # repeating integer `2` """ return _unary_func_helper(A, _npi.tile, _np.tile, reps=reps) # pylint: disable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def split(ary, indices_or_sections, axis=0): """ Split an array into multiple sub-arrays. Parameters ---------- ary : ndarray Array to be divided into sub-arrays. indices_or_sections : int or 1-D python tuple, list or set. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along `axis` the array is split. For example, ``[2, 3]`` would, for ``axis=0``, result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along `axis`, an empty sub-array is returned correspondingly. axis : int, optional The axis along which to split, default is 0. Returns ------- sub-arrays : list of ndarrays A list of sub-arrays. Raises ------ ValueError If `indices_or_sections` is given as an integer, but a split does not result in equal division. """ axis_size = ary.shape[axis] if isinstance(indices_or_sections, integer_types): sections = indices_or_sections if axis_size % sections: raise ValueError('array split does not result in an equal division') section_size = int(axis_size / sections) indices = [i * section_size for i in range(sections)] elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints') ret = _npi.split(ary, indices, axis, False) assert isinstance(ret, list), 'Output of split should be list,' \ ' got a return type {}'.format(type(ret)) return ret # pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def array_split(ary, indices_or_sections, axis=0): """Split an array into multiple sub-arrays. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an array of length l that should be split into n sections, it returns l % n sub-arrays of size l//n + 1 and the rest of size l//n. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along `axis` the array is split. For example, ``[2, 3]`` would, for ``axis=0``, result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along `axis`, an empty sub-array is returned correspondingly. Parameters ---------- ary : ndarray Array to be divided into sub-arrays. indices_or_sections : int or 1-D Python tuple, list or set. Param used to determine the number and size of the subarray. axis : int, optional The axis along which to split, default is 0. Returns ------- sub-arrays : list of ndarrays A list of sub-arrays. Examples -------- >>> x = np.arange(9.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] >>> np.array_split(x, [3, 5, 6, 8]) [array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])] >>> x = np.arange(8.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] >>> x = np.arange(7.0) >>> np.array_split(x, 3) [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])] """ indices = [] sections = 0 if isinstance(indices_or_sections, integer_types): sections = indices_or_sections elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints') ret = _npi.split(ary, indices, axis, False, sections) if not isinstance(ret, list): return [ret] return ret # pylint: enable=redefined-outer-name # pylint: disable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def hsplit(ary, indices_or_sections): """Split an array into multiple sub-arrays horizontally (column-wise). This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one dimension, and otherwise that with ``axis=1``. Parameters ---------- ary : ndarray Array to be divided into sub-arrays. indices_or_sections : int, list of ints or tuple of ints. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along `axis`. If such a split is not possible, an error is raised. If `indices_or_sections` is a list of sorted integers, the entries indicate where along `axis` the array is split. If an index exceeds the dimension of the array along `axis`, it will raises errors. so index must less than or euqal to the dimension of the array along axis. Returns ------- sub-arrays : list of ndarrays A list of sub-arrays. Notes ------ - If `indices_or_sections` is given as an integer, but a split does not result in equal division.It will raises ValueErrors. - If indices_or_sections is an integer, and the number is 1, it will raises an error. Because single output from split is not supported yet... See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], [12., 13.]]), array([[ 2., 3.], [ 6., 7.], [10., 11.], [14., 15.]])] >>> np.hsplit(x, [3, 6]) [array([[ 0., 1., 2.], [ 4., 5., 6.], [ 8., 9., 10.], [12., 13., 14.]]), array([[ 3.], [ 7.], [11.], [15.]]), array([], shape=(4, 0), dtype=float32)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[ 0., 1.], [ 2., 3.]], [[ 4., 5.], [ 6., 7.]]]) >>> np.hsplit(x, 2) [array([[[ 0., 1.]], [[ 4., 5.]]]), array([[[ 2., 3.]], [[ 6., 7.]]])] If ``ary`` has one dimension, 'axis' = 0. >>> x = np.arange(4) array([0., 1., 2., 3.]) >>> np.hsplit(x, 2) [array([0., 1.]), array([2., 3.])] If you want to produce an empty sub-array, you can see an example. >>> np.hsplit(x, [2, 2]) [array([0., 1.]), array([], dtype=float32), array([2., 3.])] """ if len(ary.shape) < 1: raise ValueError('hsplit only works on arrays of 1 or more dimensions') indices = [] sections = 0 if isinstance(indices_or_sections, integer_types): sections = indices_or_sections elif isinstance(indices_or_sections, (list, set, tuple)): indices = [0] + list(indices_or_sections) else: raise ValueError('indices_or_sections must be either int, or tuple / list / set of ints') ret = _npi.hsplit(ary, indices, 1, False, sections) if not isinstance(ret, list): return [ret] return ret # pylint: enable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def vsplit(ary, indices_or_sections): r""" vsplit(ary, indices_or_sections) Split an array into multiple sub-arrays vertically (row-wise). ``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split along the first axis regardless of the array dimension. Parameters ---------- ary : ndarray Array to be divided into sub-arrays. indices_or_sections : int or 1 - D Python tuple, list or set. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along axis 0. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along axis 0 the array is split. For example, ``[2, 3]`` would result in - ary[:2] - ary[2:3] - ary[3:] If an index exceeds the dimension of the array along axis 0, an error will be thrown. Returns ------- sub-arrays : list of ndarrays A list of sub-arrays. See Also -------- split : Split an array into multiple sub-arrays of equal size. Notes ------- This function differs from the original `numpy.degrees <https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in the following aspects: - Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar, tuple and list. - In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0, an error will be thrown. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [ 12., 13., 14., 15.]]) >>> np.vsplit(x, 2) [array([[0., 1., 2., 3.], [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], [12., 13., 14., 15.]])] With a higher dimensional array the split is still along the first axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[ 0., 1.], [ 2., 3.]], [[ 4., 5.], [ 6., 7.]]]) >>> np.vsplit(x, 2) [array([[[0., 1.], [2., 3.]]]), array([[[4., 5.], [6., 7.]]])] """ if len(ary.shape) < 2: raise ValueError("vsplit only works on arrays of 2 or more dimensions") return split(ary, indices_or_sections, 0) # pylint: disable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def dsplit(ary, indices_or_sections): """ Split array into multiple sub-arrays along the 3rd axis (depth). Please refer to the `split` documentation. `dsplit` is equivalent to `split` with ``axis=2``, the array is always split along the third axis provided the array dimension is greater than or equal to 3. Parameters ---------- ary : ndarray Array to be divided into sub-arrays. indices_or_sections : int or 1 - D Python tuple, list or set. If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays along axis 2. If such a split is not possible, an error is raised. If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where along axis 2 the array is split. For example, ``[2, 3]`` would result in - ary[:, :, :2] - ary[:, :, 2:3] - ary[:, :, 3:] If an index exceeds the dimension of the array along axis 2, an error will be thrown. Examples -------- >>> x = np.arange(16.0).reshape(2, 2, 4) >>> x array([[[ 0., 1., 2., 3.], [ 4., 5., 6., 7.]], [[ 8., 9., 10., 11.], [12., 13., 14., 15.]]]) >>> np.dsplit(x, 2) [array([[[ 0., 1.], [ 4., 5.]], [[ 8., 9.], [12., 13.]]]), array([[[ 2., 3.], [ 6., 7.]], [[10., 11.], [14., 15.]]])] >>> np.dsplit(x, np.array([3, 6])) [array([[[ 0., 1., 2.], [ 4., 5., 6.]], [[ 8., 9., 10.], [12., 13., 14.]]]), array([[[ 3.], [ 7.]], [[11.], [15.]]]), array([], shape=(2, 2, 0), dtype=float64)] """ if len(ary.shape) < 3: raise ValueError('dsplit only works on arrays of 3 or more dimensions') return split(ary, indices_or_sections, 2) # pylint: enable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def concatenate(seq, axis=0, out=None): """ Join a sequence of arrays along an existing axis. Parameters ---------- a1, a2, ... : sequence of ndarray The arrays must have the same shape, except in the dimension corresponding to `axis` (the first, by default). axis : int, optional The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0. out : ndarray, optional If provided, the destination to place the result. The shape must be correct, matching that of what concatenate would have returned if no out argument were specified. Returns ------- res : ndarray The concatenated array. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> b = np.array([[5, 6]]) >>> np.concatenate((a, b), axis=0) array([[1., 2.], [3., 4.], [5., 6.]]) >>> np.concatenate((a, b), axis=None) array([1., 2., 3., 4., 5., 6.]) >>> np.concatenate((a, b.T), axis=1) array([[1., 2., 5.], [3., 4., 6.]]) """ return _npi.concatenate(*seq, axis=axis, out=out) @set_module('mxnet.ndarray.numpy') def append(arr, values, axis=None): # pylint: disable=redefined-outer-name """ Append values to the end of an array. Parameters ---------- arr : ndarray Values are appended to a copy of this array. values : ndarray These values are appended to a copy of `arr`. It must be of the correct shape (the same shape as `arr`, excluding `axis`). If `axis` is not specified, `values` can be any shape and will be flattened before use. axis : int, optional The axis along which `values` are appended. If `axis` is not given, both `arr` and `values` are flattened before use. Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. Examples -------- >>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]])) array([1., 2., 3., 4., 5., 6., 7., 8., 9.]) When `axis` is specified, `values` must have the correct shape. >>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0) array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]) """ return _npi.concatenate(arr, values, axis=axis, out=None) @set_module('mxnet.ndarray.numpy') def stack(arrays, axis=0, out=None): """Join a sequence of arrays along a new axis. The axis parameter specifies the index of the new axis in the dimensions of the result. For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension. Parameters ---------- arrays : sequence of ndarray Each array must have the same shape. axis : int, optional The axis in the result array along which the input arrays are stacked. out : ndarray, optional If provided, the destination to place the result. The shape must be correct, matching that of what stack would have returned if no out argument were specified. Returns ------- stacked : ndarray The stacked array has one more dimension than the input arrays.""" def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] arrays = get_list(arrays) return _npi.stack(*arrays, axis=axis, out=out) @set_module('mxnet.ndarray.numpy') def vstack(arrays, out=None): r"""Stack arrays in sequence vertically (row wise). This is equivalent to concatenation along the first axis after 1-D arrays of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by `vsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate` and `stack` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the first axis. 1-D arrays must have the same length. Returns ------- stacked : ndarray The array formed by stacking the given arrays, will be at least 2-D. Examples -------- >>> a = np.array([1, 2, 3]) >>> b = np.array([2, 3, 4]) >>> np.vstack((a, b)) array([[1., 2., 3.], [2., 3., 4.]]) >>> a = np.array([[1], [2], [3]]) >>> b = np.array([[2], [3], [4]]) >>> np.vstack((a, b)) array([[1.], [2.], [3.], [2.], [3.], [4.]]) """ def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] arrays = get_list(arrays) return _npi.vstack(*arrays) @set_module('mxnet.ndarray.numpy') def row_stack(arrays): r"""Stack arrays in sequence vertically (row wise). This is equivalent to concatenation along the first axis after 1-D arrays of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by `vsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate` and `stack` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the first axis. 1-D arrays must have the same length. Returns ------- stacked : ndarray The array formed by stacking the given arrays, will be at least 2-D. Examples -------- >>> a = np.array([1, 2, 3]) >>> b = np.array([2, 3, 4]) >>> np.vstack((a, b)) array([[1., 2., 3.], [2., 3., 4.]]) >>> a = np.array([[1], [2], [3]]) >>> b = np.array([[2], [3], [4]]) >>> np.vstack((a, b)) array([[1.], [2.], [3.], [2.], [3.], [4.]]) """ def get_list(arrays): if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'): raise ValueError("expected iterable for arrays but got {}".format(type(arrays))) return [arr for arr in arrays] arrays = get_list(arrays) return _npi.vstack(*arrays) @set_module('mxnet.ndarray.numpy') def column_stack(tup): """ Stack 1-D arrays as columns into a 2-D array. Take a sequence of 1-D arrays and stack them as columns to make a single 2-D array. 2-D arrays are stacked as-is, just like with `hstack`. 1-D arrays are turned into 2-D columns first. Returns -------- stacked : 2-D array The array formed by stacking the given arrays. See Also -------- stack, hstack, vstack, concatenate Examples -------- >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.column_stack((a,b)) array([[1., 2.], [2., 3.], [3., 4.]]) """ return _npi.column_stack(*tup) @set_module('mxnet.ndarray.numpy') def hstack(arrays): """ Stack arrays in sequence horizontally (column wise). This is equivalent to concatenation along the second axis, except for 1-D arrays where it concatenates along the first axis. Rebuilds arrays divided by hsplit. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions concatenate, stack and block provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of ndarrays The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length. Returns ------- stacked : ndarray The array formed by stacking the given arrays. Examples -------- >>> from mxnet import np,npx >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.hstack((a,b)) array([1., 2., 3., 2., 3., 4.]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[2],[3],[4]]) >>> np.hstack((a,b)) array([[1., 2.], [2., 3.], [3., 4.]]) """ return _npi.hstack(*arrays) @set_module('mxnet.ndarray.numpy') def dstack(arrays): """ Stack arrays in sequence depth wise (along third axis). This is equivalent to concatenation along the third axis after 2-D arrays of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by `dsplit`. This function makes most sense for arrays with up to 3 dimensions. For instance, for pixel-data with a height (first axis), width (second axis), and r/g/b channels (third axis). The functions `concatenate`, `stack` and `block` provide more general stacking and concatenation operations. Parameters ---------- tup : sequence of arrays The arrays must have the same shape along all but the third axis. 1-D or 2-D arrays must have the same shape. Returns ------- stacked : ndarray The array formed by stacking the given arrays, will be at least 3-D. Examples -------- >>> a = np.array((1,2,3)) >>> b = np.array((2,3,4)) >>> np.dstack((a,b)) array([[[1, 2], [2, 3], [3, 4]]]) >>> a = np.array([[1],[2],[3]]) >>> b = np.array([[2],[3],[4]]) >>> np.dstack((a,b)) array([[[1, 2]], [[2, 3]], [[3, 4]]]) """ return _npi.dstack(*arrays) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def maximum(x1, x2, out=None, **kwargs): """ Returns element-wise maximum of the input arrays with broadcasting. Parameters ---------- x1, x2 : scalar or mxnet.numpy.ndarray The arrays holding the elements to be compared. They must have the same shape, or shapes that can be broadcast to a single shape. Returns ------- out : mxnet.numpy.ndarray or scalar The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.""" return _ufunc_helper(x1, x2, _npi.maximum, _np.maximum, _npi.maximum_scalar, None, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def minimum(x1, x2, out=None, **kwargs): """ Returns element-wise minimum of the input arrays with broadcasting. Parameters ---------- x1, x2 : scalar or mxnet.numpy.ndarray The arrays holding the elements to be compared. They must have the same shape, or shapes that can be broadcast to a single shape. Returns ------- out : mxnet.numpy.ndarray or scalar The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.""" return _ufunc_helper(x1, x2, _npi.minimum, _np.minimum, _npi.minimum_scalar, None, out) @set_module('mxnet.ndarray.numpy') def swapaxes(a, axis1, axis2): """Interchange two axes of an array. Parameters ---------- a : ndarray Input array. axis1 : int First axis. axis2 : int Second axis. Returns ------- a_swapped : ndarray Swapped array. This is always a copy of the input array. """ return _npi.swapaxes(a, dim1=axis1, dim2=axis2) @set_module('mxnet.ndarray.numpy') def clip(a, a_min, a_max, out=None): """clip(a, a_min, a_max, out=None) Clip (limit) the values in an array. Given an interval, values outside the interval are clipped to the interval edges. For example, if an interval of ``[0, 1]`` is specified, values smaller than 0 become 0, and values larger than 1 become 1. Parameters ---------- a : ndarray Array containing elements to clip. a_min : scalar or `None` Minimum value. If `None`, clipping is not performed on lower interval edge. Not more than one of `a_min` and `a_max` may be `None`. a_max : scalar or `None` Maximum value. If `None`, clipping is not performed on upper interval edge. Not more than one of `a_min` and `a_max` may be `None`. out : ndarray, optional The results will be placed in this array. It may be the input array for in-place clipping. `out` must be of the right shape to hold the output. Its type is preserved. Returns ------- clipped_array : ndarray An array with the elements of `a`, but where values < `a_min` are replaced with `a_min`, and those > `a_max` with `a_max`. Notes ----- ndarray `a_min` and `a_max` are not supported. Examples -------- >>> a = np.arange(10) >>> np.clip(a, 1, 8) array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.], dtype=float32) >>> a array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.], dtype=float32) >>> np.clip(a, 3, 6, out=a) array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.], dtype=float32) """ if a_min is None and a_max is None: raise ValueError('array_clip: must set either max or min') if a_min is None: a_min = float('-inf') if a_max is None: a_max = float('inf') return _npi.clip(a, a_min, a_max, out=out) @set_module('mxnet.ndarray.numpy') def argmax(a, axis=None, out=None): r""" Returns the indices of the maximum values along an axis. Parameters ---------- a : ndarray Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`. axis : int, optional By default, the index is into the flattened array, otherwise along the specified axis. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- index_array : ndarray of indices whose dtype is same as the input ndarray. Array of indices into the array. It has the same shape as `a.shape` with the dimension along `axis` removed. Notes ----- In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence are returned. This function differs from the original `numpy.argmax <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in the following aspects: - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. Examples -------- >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10., 11., 12.], [13., 14., 15.]]) >>> np.argmax(a) array(5.) >>> np.argmax(a, axis=0) array([1., 1., 1.]) >>> np.argmax(a, axis=1) array([2., 2.]) >>> b = np.arange(6) >>> b[1] = 5 >>> b array([0., 5., 2., 3., 4., 5.]) >>> np.argmax(b) # Only the first occurrence is returned. array(1.) Specify ``out`` ndarray: >>> a = np.arange(6).reshape(2,3) + 10 >>> b = np.zeros((2,)) >>> np.argmax(a, axis=1, out=b) array([2., 2.]) >>> b array([2., 2.]) """ return _npi.argmax(a, axis=axis, keepdims=False, out=out) @set_module('mxnet.ndarray.numpy') def argmin(a, axis=None, out=None): r""" Returns the indices of the maximum values along an axis. Parameters ---------- a : ndarray Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`. axis : int, optional By default, the index is into the flattened array, otherwise along the specified axis. out : ndarray or None, optional If provided, the result will be inserted into this array. It should be of the appropriate shape and dtype. Returns ------- index_array : ndarray of indices whose dtype is same as the input ndarray. Array of indices into the array. It has the same shape as `a.shape` with the dimension along `axis` removed. Notes ----- In case of multiple occurrences of the maximum values, the indices corresponding to the first occurrence are returned. This function differs from the original `numpy.argmax <https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in the following aspects: - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. Examples -------- >>> a = np.arange(6).reshape(2,3) + 10 >>> a array([[10., 11., 12.], [13., 14., 15.]]) >>> np.argmin(a) array(0.) >>> np.argmin(a, axis=0) array([0., 0., 0.]) >>> np.argmin(a, axis=1) array([0., 0.]) >>> b = np.arange(6) >>> b[2] = 0 >>> b array([0., 1., 0., 3., 4., 5.]) >>> np.argmax(b) # Only the first occurrence is returned. array(0.) Specify ``out`` ndarray: >>> a = np.arange(6).reshape(2,3) + 10 >>> b = np.zeros((2,)) >>> np.argmin(a, axis=1, out=b) array([0., 0.]) >>> b array([0., 0.]) """ return _npi.argmin(a, axis=axis, keepdims=False, out=out) @set_module('mxnet.ndarray.numpy') def average(a, axis=None, weights=None, returned=False, out=None): """ Compute the weighted average along the specified axis. Parameters -------- a : ndarray Array containing data to be averaged. axis : None or int or tuple of ints, optional Axis or axes along which to average a. The default, axis=None, will average over all of the elements of the input array. If axis is negative it counts from the last to the first axis. New in version 1.7.0. If axis is a tuple of ints, averaging is performed on all of the axes specified in the tuple instead of a single axis or all the axes as before. weights : ndarray, optional An array of weights associated with the values in a, must be the same dtype with a. Each value in a contributes to the average according to its associated weight. The weights array can either be 1-D (in which case its length must be the size of a along the given axis) or of the same shape as a. If weights=None, then all data in a are assumed to have a weight equal to one. The 1-D calculation is: avg = sum(a * weights) / sum(weights) The only constraint on weights is that sum(weights) must not be 0. returned : bool, optional Default is False. If True, the tuple (average, sum_of_weights) is returned, otherwise only the average is returned. If weights=None, sum_of_weights is equivalent to the number of elements over which the average is taken. out : ndarray, optional If provided, the calculation is done into this array. Returns -------- retval, [sum_of_weights] : ndarray Return the average along the specified axis. When returned is True, return a tuple with the average as the first element and the sum of the weights as the second element. sum_of_weights is of the same type as retval. If a is integral, the result dtype will be float32, otherwise it will be the same as dtype of a. Raises -------- MXNetError - When all weights along axis sum to zero. - When the length of 1D weights is not the same as the shape of a along axis. - When given 1D weights, the axis is not specified or is not int. - When the shape of weights and a differ, but weights are not 1D. See also -------- mean Notes -------- This function differs from the original `numpy.average` <https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in the following way(s): - Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens - Does not support complex dtype - The dtypes of a and weights must be the same - Integral a results in float32 returned dtype, not float64 Examples -------- >>> data = np.arange(1, 5) >>> data array([1., 2., 3., 4.]) >>> np.average(data) array(2.5) >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) array(4.) >>> data = np.arange(6).reshape((3,2)) >>> data array([[0., 1.], [2., 3.], [4., 5.]]) >>> weights = np.array([0.25, 0.75]) array([0.25, 0.75]) >>> np.average(data, axis=1, weights=weights) array([0.75, 2.75, 4.75]) """ if weights is None: return _npi.average(a, axis=axis, weights=None, returned=returned, weighted=False, out=out) else: return _npi.average(a, axis=axis, weights=weights, returned=returned, out=out) @set_module('mxnet.ndarray.numpy') def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ """ mean(a, axis=None, dtype=None, out=None, keepdims=None) Compute the arithmetic mean along the specified axis. Returns the average of the array elements. The average is taken over the flattened array by default, otherwise over the specified axis. Parameters ---------- a : ndarray ndarray containing numbers whose mean is desired. axis : None or int or tuple of ints, optional Axis or axes along which the means are computed. The default is to compute the mean of the flattened array. If this is a tuple of ints, a mean is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the mean. For integer inputs, the default is float32; for floating point inputs, it is the same as the input dtype. out : ndarray, optional Alternate output array in which to place the result. The default is None; if provided, it must have the same shape and type as the expected output keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then keepdims will not be passed through to the mean method of sub-classes of ndarray, however any non-default value will be. If the sub-class method does not implement keepdims any exceptions will be raised. Returns ------- m : ndarray, see dtype parameter above If out=None, returns a new array containing the mean values, otherwise a reference to the output array is returned. Notes ----- This function differs from the original `numpy.mean <https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in the following way(s): - only ndarray is accepted as valid input, python iterables or scalar is not supported - default data type for integer input is float32 Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.mean(a) array(2.5) >>> a = np.zeros((2, 512*512), dtype=np.float32) >>> a[0,:] = 1.0 >>> a[1,:] = 0.1 >>> np.mean(a) array(0.55) >>> np.mean(a, dtype=np.float64) array(0.55) """ return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out) @set_module('mxnet.ndarray.numpy') def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments """ Compute the standard deviation along the specified axis. Returns the standard deviation, a measure of the spread of a distribution, of the array elements. The standard deviation is computed for the flattened array by default, otherwise over the specified axis. Parameters ---------- a : ndarray Calculate the standard deviation of these values. axis : None or int or tuple of ints, optional Axis or axes along which the standard deviation is computed. The default is to compute the standard deviation of the flattened array. .. versionadded:: 1.7.0 If this is a tuple of ints, a standard deviation is performed over multiple axes, instead of a single axis or all the axes as before. dtype : dtype, optional Type to use in computing the standard deviation. For arrays of integer type the default is float64, for arrays of float types it is the same as the array type. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape as the expected output but the type (of the calculated values) will be cast if necessary. ddof : int, optional Means Delta Degrees of Freedom. The divisor used in calculations is ``N - ddof``, where ``N`` represents the number of elements. By default `ddof` is zero. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then `keepdims` will not be passed through to the `std` method of sub-classes of `ndarray`, however any non-default value will be. If the sub-class' method does not implement `keepdims` any exceptions will be raised. Returns ------- standard_deviation : ndarray, see dtype parameter above. If `out` is None, return a new array containing the standard deviation, otherwise return a reference to the output array. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) 1.1180339887498949 # may vary >>> np.std(a, axis=0) array([1., 1.]) >>> np.std(a, axis=1) array([0.5, 0.5]) In single precision, std() can be inaccurate: >>> a = np.zeros((2, 512*512), dtype=np.float32) >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.std(a) array(0.45) >>> np.std(a, dtype=np.float64) array(0.45, dtype=float64) """ return _npi.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) @set_module('mxnet.ndarray.numpy') def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments """ Compute the variance along the specified axis. Returns the variance of the array elements, a measure of the spread of a distribution. The variance is computed for the flattened array by default, otherwise over the specified axis. Parameters ---------- a : ndarray Array containing numbers whose variance is desired. If `a` is not an array, a conversion is attempted. axis : None or int or tuple of ints, optional Axis or axes along which the variance is computed. The default is to compute the variance of the flattened array. .. versionadded:: 1.7.0 If this is a tuple of ints, a variance is performed over multiple axes, instead of a single axis or all the axes as before. dtype : data-type, optional Type to use in computing the variance. For arrays of integer type the default is `float32`; for arrays of float types it is the same as the array type. out : ndarray, optional Alternate output array in which to place the result. It must have the same shape as the expected output, but the type is cast if necessary. ddof : int, optional "Delta Degrees of Freedom": the divisor used in the calculation is ``N - ddof``, where ``N`` represents the number of elements. By default `ddof` is zero. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the input array. If the default value is passed, then `keepdims` will not be passed through to the `var` method of sub-classes of `ndarray`, however any non-default value will be. If the sub-class' method does not implement `keepdims` any exceptions will be raised. Returns ------- variance : ndarray, see dtype parameter above If ``out=None``, returns a new array containing the variance; otherwise, a reference to the output array is returned. Examples -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.var(a) array(1.25) >>> np.var(a, axis=0) array([1., 1.]) >>> np.var(a, axis=1) array([0.25, 0.25]) >>> a = np.zeros((2, 512*512), dtype=np.float32) >>> a[0, :] = 1.0 >>> a[1, :] = 0.1 >>> np.var(a) array(0.2025) >>> np.var(a, dtype=np.float64) array(0.2025, dtype=float64) >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 0.2025 """ return _npi.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out) # pylint: disable=redefined-outer-name @set_module('mxnet.ndarray.numpy') def indices(dimensions, dtype=_np.int32, ctx=None): """Return an array representing the indices of a grid. Compute an array where the subarrays contain index values 0,1,... varying only along the corresponding axis. Parameters ---------- dimensions : sequence of ints The shape of the grid. dtype : data-type, optional The desired data-type for the array. Default is `float32`. ctx : device context, optional Device context on which the memory is allocated. Default is `mxnet.context.current_context()`. Returns ------- grid : ndarray The array of grid indices, ``grid.shape = (len(dimensions),) + tuple(dimensions)``. Notes ----- The output shape is obtained by prepending the number of dimensions in front of the tuple of dimensions, i.e. if `dimensions` is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is ``(N,r0,...,rN-1)``. The subarrays ``grid[k]`` contains the N-D array of indices along the ``k-th`` axis. Explicitly:: grid[k,i0,i1,...,iN-1] = ik Examples -------- >>> grid = np.indices((2, 3)) >>> grid.shape (2, 2, 3) >>> grid[0] # row indices array([[0, 0, 0], [1, 1, 1]]) >>> grid[1] # column indices array([[0, 0, 0], [1, 1, 1]], dtype=int32) The indices can be used as an index into an array. >>> x = np.arange(20).reshape(5, 4) >>> row, col = np.indices((2, 3)) >>> x[row, col] array([[0., 1., 2.], [4., 5., 6.]]) Note that it would be more straightforward in the above example to extract the required elements directly with ``x[:2, :3]``. """ if isinstance(dimensions, (tuple, list)): if ctx is None: ctx = current_context() return _npi.indices(dimensions=dimensions, dtype=dtype, ctx=ctx) else: raise ValueError("The dimensions must be sequence of ints") # pylint: enable=redefined-outer-name @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def copysign(x1, x2, out=None, **kwargs): r""" Change the sign of x1 to that of x2, element-wise. If `x2` is a scalar, its sign will be copied to all elements of `x1`. Parameters ---------- x1 : ndarray or scalar Values to change the sign of. x2 : ndarray or scalar The sign of `x2` is copied to `x1`. out : ndarray or None, optional A location into which the result is stored. It must be of the right shape and right type to hold the output. If not provided or `None`,a freshly-allocated array is returned. Returns ------- out : ndarray or scalar The values of `x1` with the sign of `x2`. This is a scalar if both `x1` and `x2` are scalars. Notes ------- This function differs from the original `numpy.copysign <https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in the following aspects: - ``where`` param is not supported. Examples -------- >>> np.copysign(1.3, -1) -1.3 >>> 1/np.copysign(0, 1) inf >>> 1/np.copysign(0, -1) -inf >>> a = np.array([-1, 0, 1]) >>> np.copysign(a, -1.1) array([-1., -0., -1.]) >>> np.copysign(a, np.arange(3)-1) array([-1., 0., 1.]) """ return _ufunc_helper(x1, x2, _npi.copysign, _np.copysign, _npi.copysign_scalar, _npi.rcopysign_scalar, out) @set_module('mxnet.ndarray.numpy') def ravel(x, order='C'): r""" ravel(x) Return a contiguous flattened array. A 1-D array, containing the elements of the input, is returned. A copy is made only if needed. Parameters ---------- x : ndarray Input array. The elements in `x` are read in row-major, C-style order and packed as a 1-D array. order : `C`, optional Only support row-major, C-style order. Returns ------- y : ndarray y is an array of the same subtype as `x`, with shape ``(x.size,)``. Note that matrices are special cased for backward compatibility, if `x` is a matrix, then y is a 1-D ndarray. Notes ----- This function differs from the original numpy.arange in the following aspects: - Only support row-major, C-style order. Examples -------- It is equivalent to ``reshape(x, -1)``. >>> x = np.array([[1, 2, 3], [4, 5, 6]]) >>> print(np.ravel(x)) [1. 2. 3. 4. 5. 6.] >>> print(x.reshape(-1)) [1. 2. 3. 4. 5. 6.] >>> print(np.ravel(x.T)) [1. 4. 2. 5. 3. 6.] """ if order == 'F': raise NotImplementedError('order {} is not supported'.format(order)) if isinstance(x, numeric_types): return _np.reshape(x, -1) elif isinstance(x, NDArray): return _npi.reshape(x, -1) else: raise TypeError('type {} not supported'.format(str(type(x)))) def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name """ Converts a flat index or array of flat indices into a tuple of coordinate arrays. Parameters: ------------- indices : array_like An integer array whose elements are indices into the flattened version of an array of dimensions shape. Before version 1.6.0, this function accepted just one index value. shape : tuple of ints The shape of the array to use for unraveling indices. Returns: ------------- unraveled_coords : ndarray Each row in the ndarray has the same shape as the indices array. Each column in the ndarray represents the unravelled index Examples: ------------- >>> np.unravel_index([22, 41, 37], (7,6)) ([3. 6. 6.] [4. 5. 1.]) >>> np.unravel_index(1621, (6,7,8,9)) (3, 1, 4, 1) """ if order == 'C': if isinstance(indices, numeric_types): return _np.unravel_index(indices, shape) ret = _npi.unravel_index_fallback(indices, shape=shape) ret_list = [] for item in ret: ret_list += [item] return tuple(ret_list) else: raise NotImplementedError('Do not support column-major (Fortran-style) order at this moment') def diag_indices_from(arr): """ This returns a tuple of indices that can be used to access the main diagonal of an array a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is the usual diagonal, for a.ndim > 2 this is the set of indices to access a[i, i, ..., i] for i = [0..n-1]. Parameters: ------------- arr : ndarray Input array for acessing the main diagonal. All dimensions should have equal length. Return: ------------- diag: tuple of ndarray indices of the main diagonal. Examples: ------------- >>> a = np.arange(16).reshape(4, 4) >>> a array([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11], [12, 13, 14, 15]]) >>> idx = np.diag_indices_from(a) >>> idx (array([0, 1, 2, 3]), array([0, 1, 2, 3])) >>> a[idx] = 100 >>> a array([[100, 1, 2, 3], [ 4, 100, 6, 7], [ 8, 9, 100, 11], [ 12, 13, 14, 100]]) """ return tuple(_npi.diag_indices_from(arr)) @set_module('mxnet.ndarray.numpy') def hanning(M, dtype=_np.float32, ctx=None): r"""Return the Hanning window. The Hanning window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. dtype : str or numpy.dtype, optional An optional value type. Default is `float32`. Note that you need select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- blackman, hamming Notes ----- The Hanning window is defined as .. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 The Hanning was named for Julius von Hann, an Austrian meteorologist. It is also known as the Cosine Bell. Some authors prefer that it be called a Hann window, to help avoid confusion with the very similar Hamming window. Most references to the Hanning window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 106-108. .. [3] Wikipedia, "Window function", http://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hanning(12) array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 , 0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245, 0.07937312, 0. ]) Plot the window and its frequency response: >>> import matplotlib.pyplot as plt >>> window = np.hanning(51) >>> plt.plot(window.asnumpy()) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") Text(0.5, 1.0, 'Hann window') >>> plt.ylabel("Amplitude") Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") Text(0.5, 0, 'Sample') >>> plt.show() """ if ctx is None: ctx = current_context() return _npi.hanning(M, dtype=dtype, ctx=ctx) @set_module('mxnet.ndarray.numpy') def hamming(M, dtype=_np.float32, ctx=None): r"""Return the hamming window. The hamming window is a taper formed by using a weighted cosine. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. dtype : str or numpy.dtype, optional An optional value type. Default is `float32`. Note that you need select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : ndarray, shape(M,) The window, with the maximum value normalized to one (the value one appears only if `M` is odd). See Also -------- blackman, hanning Notes ----- The Hamming window is defined as .. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right) \qquad 0 \leq n \leq M-1 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey and is described in Blackman and Tukey. It was recommended for smoothing the truncated autocovariance function in the time domain. Most references to the Hamming window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. References ---------- .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The University of Alberta Press, 1975, pp. 109-110. .. [3] Wikipedia, "Window function", https://en.wikipedia.org/wiki/Window_function .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, "Numerical Recipes", Cambridge University Press, 1986, page 425. Examples -------- >>> np.hamming(12) array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 , 0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908, 0.15302327, 0.08000001]) Plot the window and its frequency response: >>> import matplotlib.pyplot as plt >>> window = np.hamming(51) >>> plt.plot(window.asnumpy()) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("hamming window") Text(0.5, 1.0, 'hamming window') >>> plt.ylabel("Amplitude") Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") Text(0.5, 0, 'Sample') >>> plt.show() """ if ctx is None: ctx = current_context() return _npi.hamming(M, dtype=dtype, ctx=ctx) @set_module('mxnet.ndarray.numpy') def blackman(M, dtype=_np.float32, ctx=None): r"""Return the Blackman window. The Blackman window is a taper formed by using the first three terms of a summation of cosines. It was designed to have close to the minimal leakage possible. It is close to optimal, only slightly worse than a Kaiser window. Parameters ---------- M : int Number of points in the output window. If zero or less, an empty array is returned. dtype : str or numpy.dtype, optional An optional value type. Default is `float32`. Note that you need select numpy.float32 or float64 in this operator. ctx : Context, optional An optional device context (default is the current default context). Returns ------- out : ndarray The window, with the maximum value normalized to one (the value one appears only if the number of samples is odd). See Also -------- hamming, hanning Notes ----- The Blackman window is defined as .. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1}) Most references to the Blackman window come from the signal processing literature, where it is used as one of many windowing functions for smoothing values. It is also known as an apodization (which means "removing the foot", i.e. smoothing discontinuities at the beginning and end of the sampled signal) or tapering function. It is known as a "near optimal" tapering function, almost as good (by some measures) as the kaiser window. References ---------- Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, Dover Publications, New York. Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. Examples -------- >>> np.blackman(12) array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01, 7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01, 4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08]) Plot the window and its frequency response: >>> import matplotlib.pyplot as plt >>> window = np.blackman(51) >>> plt.plot(window.asnumpy()) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("blackman window") Text(0.5, 1.0, 'blackman window') >>> plt.ylabel("Amplitude") Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") Text(0.5, 0, 'Sample') >>> plt.show() """ if ctx is None: ctx = current_context() return _npi.blackman(M, dtype=dtype, ctx=ctx) @set_module('mxnet.ndarray.numpy') def flip(m, axis=None, out=None): r""" flip(m, axis=None, out=None) Reverse the order of elements in an array along the given axis. The shape of the array is preserved, but the elements are reordered. Parameters ---------- m : ndarray or scalar Input array. axis : None or int or tuple of ints, optional Axis or axes along which to flip over. The default, axis=None, will flip over all of the axes of the input array. If axis is negative it counts from the last to the first axis. If axis is a tuple of ints, flipping is performed on all of the axes specified in the tuple. out : ndarray or scalar, optional Alternative output array in which to place the result. It must have the same shape and type as the expected output. Returns ------- out : ndarray or scalar A view of `m` with the entries of axis reversed. Since a view is returned, this operation is done in constant time. Examples -------- >>> A = np.arange(8).reshape((2,2,2)) >>> A array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) >>> np.flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], [4, 5]]]) >>> np.flip(A) array([[[7, 6], [5, 4]], [[3, 2], [1, 0]]]) >>> np.flip(A, (0, 2)) array([[[5, 4], [7, 6]], [[1, 0], [3, 2]]]) """ from ...numpy import ndarray if isinstance(m, numeric_types): return _np.flip(m, axis) elif isinstance(m, ndarray): return _npi.flip(m, axis, out=out) else: raise TypeError('type {} not supported'.format(str(type(m)))) @set_module('mxnet.ndarray.numpy') def flipud(m): r""" flipud(*args, **kwargs) Flip array in the up/down direction. Flip the entries in each column in the up/down direction. Rows are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array. Returns ------- out : array_like A view of `m` with the rows reversed. Since a view is returned, this operation is :math:`\mathcal O(1)`. See Also -------- fliplr : Flip array in the left/right direction. rot90 : Rotate array counterclockwise. Notes ----- Equivalent to ``m[::-1,...]``. Does not require the array to be two-dimensional. Examples -------- >>> A = np.diag(np.array([1.0, 2, 3])) >>> A array([[1., 0., 0.], [0., 2., 0.], [0., 0., 3.]]) >>> np.flipud(A) array([[0., 0., 3.], [0., 2., 0.], [1., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.flipud(A) == A[::-1,...]) array(True) >>> np.flipud(np.array([1,2])) array([2., 1.]) """ return flip(m, 0) @set_module('mxnet.ndarray.numpy') def fliplr(m): r""" fliplr(*args, **kwargs) Flip array in the left/right direction. Flip the entries in each row in the left/right direction. Columns are preserved, but appear in a different order than before. Parameters ---------- m : array_like Input array, must be at least 2-D. Returns ------- f : ndarray A view of `m` with the columns reversed. Since a view is returned, this operation is :math:`\mathcal O(1)`. See Also -------- flipud : Flip array in the up/down direction. rot90 : Rotate array counterclockwise. Notes ----- Equivalent to m[:,::-1]. Requires the array to be at least 2-D. Examples -------- >>> A = np.diag(np.array([1.,2.,3.])) >>> A array([[1., 0., 0.], [0., 2., 0.], [0., 0., 3.]]) >>> np.fliplr(A) array([[0., 0., 1.], [0., 2., 0.], [3., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.fliplr(A) == A[:,::-1,...]) array(True) """ return flip(m, 1) @set_module('mxnet.ndarray.numpy') def around(x, decimals=0, out=None, **kwargs): r""" around(x, decimals=0, out=None) Evenly round to the given number of decimals. Parameters ---------- x : ndarray or scalar Input data. decimals : int, optional Number of decimal places to round to (default: 0). If decimals is negative, it specifies the number of positions to the left of the decimal point. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and type as the expected output. Returns ------- rounded_array : ndarray or scalar An array of the same type as `x`, containing the rounded values. A reference to the result is returned. Notes ----- For values exactly halfway between rounded decimal values, NumPy rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0, -0.5 and 0.5 round to 0.0, etc. This function differs from the original numpy.prod in the following aspects: - Cannot cast type automatically. Dtype of `out` must be same as the expected one. - Cannot support complex-valued number. Examples -------- >>> np.around([0.37, 1.64]) array([ 0., 2.]) >>> np.around([0.37, 1.64], decimals=1) array([ 0.4, 1.6]) >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value array([ 0., 2., 2., 4., 4.]) >>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned array([ 1, 2, 3, 11]) >>> np.around([1, 2, 3, 11], decimals=-1) array([ 0, 0, 0, 10]) """ from ...numpy import ndarray if isinstance(x, numeric_types): return _np.around(x, decimals, **kwargs) elif isinstance(x, ndarray): return _npi.around(x, decimals, out=out, **kwargs) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.ndarray.numpy') def round(x, decimals=0, out=None, **kwargs): r""" round_(a, decimals=0, out=None) Round an array to the given number of decimals. See Also -------- around : equivalent function; see for details. """ from ...numpy import ndarray if isinstance(x, numeric_types): return _np.around(x, decimals, **kwargs) elif isinstance(x, ndarray): return _npi.around(x, decimals, out=out, **kwargs) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def arctan2(x1, x2, out=None, **kwargs): r""" Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly. The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is the signed angle in radians between the ray ending at the origin and passing through the point (1,0), and the ray ending at the origin and passing through the point (`x2`, `x1`). (Note the role reversal: the "`y`-coordinate" is the first function parameter, the "`x`-coordinate" is the second.) By IEEE convention, this function is defined for `x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see Notes for specific values). This function is not defined for complex-valued arguments; for the so-called argument of complex values, use `angle`. Parameters ---------- x1 : ndarray or scalar `y`-coordinates. x2 : ndarray or scalar `x`-coordinates. `x2` must be broadcastable to match the shape of `x1` or vice versa. out : ndarray or None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if `x1` and `x2` are scalars. Notes ----- *arctan2* is identical to the `atan2` function of the underlying C library. The following special values are defined in the C standard: [1]_ ====== ====== ================ `x1` `x2` `arctan2(x1,x2)` ====== ====== ================ +/- 0 +0 +/- 0 +/- 0 -0 +/- pi > 0 +/-inf +0 / +pi < 0 +/-inf -0 / -pi +/-inf +inf +/- (pi/4) +/-inf -inf +/- (3*pi/4) ====== ====== ================ Note that +0 and -0 are distinct floating point numbers, as are +inf and -inf. This function differs from the original numpy.arange in the following aspects: - Only support float16, float32 and float64. References ---------- .. [1] ISO/IEC standard 9899:1999, "Programming language C." Examples -------- Consider four points in different quadrants: >>> x = np.array([-1, +1, +1, -1]) >>> y = np.array([-1, -1, +1, +1]) >>> np.arctan2(y, x) * 180 / np.pi array([-135., -45., 45., 135.]) Note the order of the parameters. `arctan2` is defined also when `x2` = 0 and at several other special points, obtaining values in the range ``[-pi, pi]``: >>> x = np.array([1, -1]) >>> y = np.array([0, 0]) >>> np.arctan2(x, y) array([ 1.5707964, -1.5707964]) """ return _ufunc_helper(x1, x2, _npi.arctan2, _np.arctan2, _npi.arctan2_scalar, _npi.rarctan2_scalar, out=out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def hypot(x1, x2, out=None, **kwargs): r""" Given the "legs" of a right triangle, return its hypotenuse. Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or `x2` is scalar_like (i.e., unambiguously cast-able to a scalar type), it is broadcast for use with each element of the other argument. Parameters ---------- x1, x2 : ndarray Leg of the triangle(s). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. A tuple (possible only as a keyword argument) must have length equal to the number of outputs. Returns ------- z : ndarray The hypotenuse of the triangle(s). This is a scalar if both `x1` and `x2` are scalars. Notes ----- This function differs from the original numpy.arange in the following aspects: - Only support float16, float32 and float64. Examples -------- >>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3))) array([[ 5., 5., 5.], [ 5., 5., 5.], [ 5., 5., 5.]]) Example showing broadcast of scalar_like argument: >>> np.hypot(3*np.ones((3, 3)), [4]) array([[ 5., 5., 5.], [ 5., 5., 5.], [ 5., 5., 5.]]) """ return _ufunc_helper(x1, x2, _npi.hypot, _np.hypot, _npi.hypot_scalar, None, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def bitwise_and(x1, x2, out=None, **kwargs): r""" Compute the bit-wise XOR of two arrays element-wise. Parameters ---------- x1, x2 : ndarray or scalar Only integer and boolean types are handled. If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray Result. Examples -------- >>> np.bitwise_and(13, 17) 1 >>> np.bitwise_and(14, 13) 12 >>> np.bitwise_and(np.array([14,3], dtype='int32'), 13) array([12, 1], dtype=int32) >>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32')) array([0, 1], dtype=int32) >>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32')) array([ 2, 4, 16], dtype=int32) >>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool')) array([False, True]) """ return _ufunc_helper(x1, x2, _npi.bitwise_and, _np.bitwise_and, _npi.bitwise_and_scalar, None, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def bitwise_xor(x1, x2, out=None, **kwargs): r""" Compute the bit-wise XOR of two arrays element-wise. Parameters ---------- x1, x2 : ndarray or scalar Only integer and boolean types are handled. If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray Result. Examples -------- >>> np.bitwise_xor(13, 17) 28 >>> np.bitwise_xor(31, 5) 26 >>> np.bitwise_xor(np.array([31,3], dtype='int32'), 5) array([26, 6]) >>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32')) array([26, 5]) >>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool')) array([ True, False]) """ return _ufunc_helper(x1, x2, _npi.bitwise_xor, _np.bitwise_xor, _npi.bitwise_xor_scalar, None, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def bitwise_or(x1, x2, out=None, **kwargs): r""" Compute the bit-wise OR of two arrays element-wise. Parameters ---------- x1, x2 : ndarray or scalar Only integer and boolean types are handled. If x1.shape != x2.shape, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned. Returns ------- out : ndarray Result. Examples -------- >>> np.bitwise_or(13, 17) 29 >>> np.bitwise_or(31, 5) 31 >>> np.bitwise_or(np.array([31,3], dtype='int32'), 5) array([31, 7]) >>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32')) array([31, 7]) >>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool')) array([ True, True]) """ return _ufunc_helper(x1, x2, _npi.bitwise_or, _np.bitwise_or, _npi.bitwise_or_scalar, None, out) @set_module('mxnet.ndarray.numpy') @wrap_np_binary_func def ldexp(x1, x2, out=None, **kwargs): """ Returns x1 * 2**x2, element-wise. The mantissas `x1` and twos exponents `x2` are used to construct floating point numbers ``x1 * 2**x2``. Parameters ---------- x1 : ndarray or scalar Array of multipliers. x2 : ndarray or scalar, int Array of twos exponents. out : ndarray, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not, a freshly-allocated array is returned. Returns ------- y : ndarray or scalar The result of ``x1 * 2**x2``. This is a scalar if both `x1` and `x2` are scalars. Notes ----- Complex dtypes are not supported, they will raise a TypeError. Different from numpy, we allow x2 to be float besides int. `ldexp` is useful as the inverse of `frexp`, if used by itself it is more clear to simply use the expression ``x1 * 2**x2``. Examples -------- >>> np.ldexp(5, np.arange(4)) array([ 5., 10., 20., 40.]) """ return _ufunc_helper(x1, x2, _npi.ldexp, _np.ldexp, _npi.ldexp_scalar, _npi.rldexp_scalar, out) @set_module('mxnet.ndarray.numpy') def inner(a, b): r""" Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : ndarray If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray `out.shape = a.shape[:-1] + b.shape[:-1]` Raises ------ ValueError If the last dimension of `a` and `b` has different size. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-1,j0,...,js-1] = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 A multidimensional example: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> np.inner(a, b) array([[ 14, 38, 62], [ 86, 110, 134]]) """ return tensordot(a, b, [-1, -1]) @set_module('mxnet.ndarray.numpy') def outer(a, b): r""" Compute the outer product of two vectors. Given two vectors, ``a = [a0, a1, ..., aM]`` and ``b = [b0, b1, ..., bN]``, the outer product [1]_ is:: [[a0*b0 a0*b1 ... a0*bN ] [a1*b0 . [ ... . [aM*b0 aM*bN ]] Parameters ---------- a : (M,) ndarray First input vector. Input is flattened if not already 1-dimensional. b : (N,) ndarray Second input vector. Input is flattened if not already 1-dimensional. Returns ------- out : (M, N) ndarray ``out[i, j] = a[i] * b[j]`` See also -------- inner einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent. ufunc.outer : A generalization to N dimensions and other operations. ``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent. References ---------- .. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd ed., Baltimore, MD, Johns Hopkins University Press, 1996, pg. 8. Examples -------- Make a (*very* coarse) grid for computing a Mandelbrot set: >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5)) >>> rl array([[-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.], [-2., -1., 0., 1., 2.]]) """ return tensordot(a.flatten(), b.flatten(), 0) @set_module('mxnet.ndarray.numpy') def vdot(a, b): r""" Return the dot product of two vectors. Note that `vdot` handles multidimensional arrays differently than `dot`: it does *not* perform a matrix product, but flattens input arguments to 1-D vectors first. Consequently, it should only be used for vectors. Parameters ---------- a : ndarray First argument to the dot product. b : ndarray Second argument to the dot product. Returns ------- output : ndarray Dot product of `a` and `b`. See Also -------- dot : Return the dot product without using the complex conjugate of the first argument. Examples -------- Note that higher-dimensional arrays are flattened! >>> a = np.array([[1, 4], [5, 6]]) >>> b = np.array([[4, 1], [2, 2]]) >>> np.vdot(a, b) 30 >>> np.vdot(b, a) 30 >>> 1*4 + 4*1 + 5*2 + 6*2 30 """ return tensordot(a.flatten(), b.flatten(), 1) @set_module('mxnet.ndarray.numpy') def equal(x1, x2, out=None): """ Return (x1 == x2) element-wise. Parameters ---------- x1, x2 : ndarrays or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- not_equal, greater_equal, less_equal, greater, less Examples -------- >>> np.equal(np.ones(2, 1)), np.zeros(1, 3)) array([[False, False, False], [False, False, False]]) >>> np.equal(1, np.ones(1)) array([ True]) """ return _ufunc_helper(x1, x2, _npi.equal, _np.equal, _npi.equal_scalar, None, out) @set_module('mxnet.ndarray.numpy') def not_equal(x1, x2, out=None): """ Return (x1 != x2) element-wise. Parameters ---------- x1, x2 : ndarrays or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.not_equal(1, np.ones(1)) array([False]) """ return _ufunc_helper(x1, x2, _npi.not_equal, _np.not_equal, _npi.not_equal_scalar, None, out) @set_module('mxnet.ndarray.numpy') def greater(x1, x2, out=None): """ Return the truth value of (x1 > x2) element-wise. Parameters ---------- x1, x2 : ndarrays or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.greater(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.greater(1, np.ones(1)) array([False]) """ return _ufunc_helper(x1, x2, _npi.greater, _np.greater, _npi.greater_scalar, _npi.less_scalar, out) @set_module('mxnet.ndarray.numpy') def less(x1, x2, out=None): """ Return the truth value of (x1 < x2) element-wise. Parameters ---------- x1, x2 : ndarrays or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.less(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.less(1, np.ones(1)) array([False]) """ return _ufunc_helper(x1, x2, _npi.less, _np.less, _npi.less_scalar, _npi.greater_scalar, out) @set_module('mxnet.ndarray.numpy') def greater_equal(x1, x2, out=None): """ Return the truth value of (x1 >= x2) element-wise. Parameters ---------- x1, x2 : ndarrays or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3)) array([[ True, True, True], [ True, True, True]]) >>> np.greater_equal(1, np.ones(1)) array([True]) """ return _ufunc_helper(x1, x2, _npi.greater_equal, _np.greater_equal, _npi.greater_equal_scalar, _npi.less_equal_scalar, out) @set_module('mxnet.ndarray.numpy') def less_equal(x1, x2, out=None): """ Return the truth value of (x1 <= x2) element-wise. Parameters ---------- x1, x2 : ndarrays or scalars Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to a common shape (which becomes the shape of the output). out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If provided, it must have a shape that the inputs broadcast to. If not provided or `None`, a freshly-allocated array is returned. Returns ------- out : ndarray or scalar Output array of type bool, element-wise comparison of `x1` and `x2`. This is a scalar if both `x1` and `x2` are scalars. See Also -------- equal, greater, greater_equal, less, less_equal Examples -------- >>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3)) array([[False, False, False], [False, False, False]]) >>> np.less_equal(1, np.ones(1)) array([True]) """ return _ufunc_helper(x1, x2, _npi.less_equal, _np.less_equal, _npi.less_equal_scalar, _npi.greater_equal_scalar, out) @set_module('mxnet.ndarray.numpy') def rot90(m, k=1, axes=(0, 1)): """ Rotate an array by 90 degrees in the plane specified by axes. Rotation direction is from the first towards the second axis. Parameters ---------- m : ndarray Array of two or more dimensions. k : integer Number of times the array is rotated by 90 degrees. axes: (2,) array_like The array is rotated in the plane defined by the axes. Axes must be different. Returns ------- y : ndarray A rotated view of `m`. ----- rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1)) rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1)) Examples -------- >>> m = np.array([[1,2],[3,4]], 'int') >>> m array([[1, 2], [3, 4]], dtype=int64) >>> np.rot90(m) array([[2, 4], [1, 3]], dtype=int64) >>> np.rot90(m, 2) array([[4, 3], [2, 1]], dtype=int64) >>> m = np.arange(8).reshape((2,2,2)) >>> np.rot90(m, 1, (1,2)) array([[[1., 3.], [0., 2.]], [[5., 7.], [4., 6.]]]) """ return _npi.rot90(m, k=k, axes=axes) @set_module('mxnet.ndarray.numpy') def einsum(*operands, **kwargs): r""" einsum(subscripts, *operands, out=None, optimize=False) Evaluates the Einstein summation convention on the operands. Using the Einstein summation convention, many common multi-dimensional, linear algebraic array operations can be represented in a simple fashion. In *implicit* mode `einsum` computes these values. In *explicit* mode, `einsum` provides further flexibility to compute other array operations that might not be considered classical Einstein summation operations, by disabling, or forcing summation over specified subscript labels. See the notes and examples for clarification. Parameters ---------- subscripts : str Specifies the subscripts for summation as comma separated list of subscript labels. An implicit (classical Einstein summation) calculation is performed unless the explicit indicator '->' is included as well as subscript labels of the precise output form. operands : list of ndarray These are the arrays for the operation. out : ndarray, optional If provided, the calculation is done into this array. optimize : {False, True}, optional Controls if intermediate optimization should occur. No optimization will occur if False. Defaults to False. Returns ------- output : ndarray The calculation based on the Einstein summation convention. Notes ----- The Einstein summation convention can be used to compute many multi-dimensional, linear algebraic array operations. `einsum` provides a succinct way of representing these. A non-exhaustive list of these operations, which can be computed by `einsum`, is shown below along with examples: * Trace of an array, :py:func:`np.trace`. * Return a diagonal, :py:func:`np.diag`. * Array axis summations, :py:func:`np.sum`. * Transpositions and permutations, :py:func:`np.transpose`. * Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`. * Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`. * Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`. * Tensor contractions, :py:func:`np.tensordot`. The subscripts string is a comma-separated list of subscript labels, where each label refers to a dimension of the corresponding operand. Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)`` is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label appears only once, it is not summed, so ``np.einsum('i', a)`` produces a view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication and is equivalent to :py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent to :py:func:`np.trace(a) <np.trace>`. In *implicit mode*, the chosen subscripts are important since the axes of the output are reordered alphabetically. This means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while ``np.einsum('ji', a)`` takes its transpose. Additionally, ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while, ``np.einsum('ij,jh', a, b)`` returns the transpose of the multiplication since subscript 'h' precedes subscript 'i'. In *explicit mode* the output can be directly controlled by specifying output subscript labels. This requires the identifier '->' as well as the list of output subscript labels. This feature increases the flexibility of the function since summing can be disabled or forced when required. The call ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`, and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`. The difference is that `einsum` does not allow broadcasting by default. Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the order of the output subscript labels and therefore returns matrix multiplication, unlike the example above in implicit mode. To enable and control broadcasting, use an ellipsis. Default NumPy-style broadcasting is done by adding an ellipsis to the left of each term, like ``np.einsum('...ii->...i', a)``. To take the trace along the first and last axes, you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix product with the left-most indices instead of rightmost, one can do ``np.einsum('ij...,jk...->ik...', a, b)``. When there is only one operand, no axes are summed, and no output parameter is provided, a view into the operand is returned instead of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)`` produces a view. The ``optimize`` argument which will optimize the contraction order of an einsum expression. For a contraction with three or more operands this can greatly increase the computational efficiency at the cost of a larger memory footprint during computation. Typically a 'greedy' algorithm is applied which empirical tests have shown returns the optimal path in the majority of cases. 'optimal' is not supported for now. This function differs from the original `numpy.einsum <https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in the following way(s): - Does not support 'optimal' strategy - Does not support the alternative subscript like `einsum(op0, sublist0, op1, sublist1, ..., [sublistout])` - Does not produce view in any cases Examples -------- >>> a = np.arange(25).reshape(5,5) >>> b = np.arange(5) >>> c = np.arange(6).reshape(2,3) Trace of a matrix: >>> np.einsum('ii', a) array(60.) Extract the diagonal (requires explicit form): >>> np.einsum('ii->i', a) array([ 0., 6., 12., 18., 24.]) Sum over an axis (requires explicit form): >>> np.einsum('ij->i', a) array([ 10., 35., 60., 85., 110.]) >>> np.sum(a, axis=1) array([ 10., 35., 60., 85., 110.]) For higher dimensional arrays summing a single axis can be done with ellipsis: >>> np.einsum('...j->...', a) array([ 10., 35., 60., 85., 110.]) Compute a matrix transpose, or reorder any number of axes: >>> np.einsum('ji', c) array([[0., 3.], [1., 4.], [2., 5.]]) >>> np.einsum('ij->ji', c) array([[0., 3.], [1., 4.], [2., 5.]]) >>> np.transpose(c) array([[0., 3.], [1., 4.], [2., 5.]]) Vector inner products: >>> np.einsum('i,i', b, b) array(30.) Matrix vector multiplication: >>> np.einsum('ij,j', a, b) array([ 30., 80., 130., 180., 230.]) >>> np.dot(a, b) array([ 30., 80., 130., 180., 230.]) >>> np.einsum('...j,j', a, b) array([ 30., 80., 130., 180., 230.]) Broadcasting and scalar multiplication: >>> np.einsum('..., ...', np.array(3), c) array([[ 0., 3., 6.], [ 9., 12., 15.]]) >>> np.einsum(',ij', np.array(3), c) array([[ 0., 3., 6.], [ 9., 12., 15.]]) >>> np.multiply(3, c) array([[ 0., 3., 6.], [ 9., 12., 15.]]) Vector outer product: >>> np.einsum('i,j', np.arange(2)+1, b) array([[0., 1., 2., 3., 4.], [0., 2., 4., 6., 8.]]) Tensor contraction: >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> np.einsum('ijk,jil->kl', a, b) array([[4400., 4730.], [4532., 4874.], [4664., 5018.], [4796., 5162.], [4928., 5306.]]) Example of ellipsis use: >>> a = np.arange(6).reshape((3,2)) >>> b = np.arange(12).reshape((4,3)) >>> np.einsum('ki,jk->ij', a, b) array([[10., 28., 46., 64.], [13., 40., 67., 94.]]) >>> np.einsum('ki,...k->i...', a, b) array([[10., 28., 46., 64.], [13., 40., 67., 94.]]) >>> np.einsum('k...,jk', a, b) array([[10., 28., 46., 64.], [13., 40., 67., 94.]]) Chained array operations. For more complicated contractions, speed ups might be achieved by repeatedly computing a 'greedy' path. Performance improvements can be particularly significant with larger arrays: >>> a = np.ones(64).reshape(2,4,8) # Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.) >>> for iteration in range(500): ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) # Greedy `einsum` (faster optimal path approximation): ~0.117ms >>> for iteration in range(500): ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True) """ # Grab non-einsum kwargs; do not optimize by default. optimize_arg = kwargs.pop('optimize', False) out = kwargs.pop('out', None) subscripts = operands[0] operands = operands[1:] return _npi.einsum(*operands, subscripts=subscripts, out=out, optimize=int(optimize_arg)) @set_module('mxnet.ndarray.numpy') def nonzero(a): """ Return the indices of the elements that are non-zero. Returns a tuple of arrays, one for each dimension of `a`, containing the indices of the non-zero elements in that dimension. The values in `a` are always returned in row-major, C-style order. To group the indices by element, rather than dimension, use `argwhere`, which returns a row for each non-zero element. Parameters ---------- a : ndarray Input array. Returns ------- tuple_of_arrays : tuple Indices of elements that are non-zero. See Also -------- ndarray.nonzero : Equivalent ndarray method. Notes ----- While the nonzero values can be obtained with ``a[nonzero(a)]``, it is recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which will correctly handle 0-d arrays. Examples -------- >>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]]) >>> x array([[3, 0, 0], [0, 4, 0], [5, 6, 0]], dtype=int32) >>> np.nonzero(x) (array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64)) >>> x[np.nonzero(x)] array([3, 4, 5, 6]) >>> np.transpose(np.stack(np.nonzero(x))) array([[0, 0], [1, 1], [2, 0], [2, 1]], dtype=int64) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a boolean array and since False is interpreted as 0, np.nonzero(a > 3) yields the indices of the `a` where the condition is true. >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32) >>> a > 3 array([[False, False, False], [ True, True, True], [ True, True, True]]) >>> np.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64)) Using this result to index `a` is equivalent to using the mask directly: >>> a[np.nonzero(a > 3)] array([4, 5, 6, 7, 8, 9], dtype=int32) >>> a[a > 3] array([4, 5, 6, 7, 8, 9], dtype=int32) ``nonzero`` can also be called as a method of the array. >>> (a > 3).nonzero() (array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64)) """ out = _npi.nonzero(a).transpose() return tuple([out[i] for i in range(len(out))]) @set_module('mxnet.ndarray.numpy') def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments """ Compute the q-th percentile of the data along the specified axis. Returns the q-th percentile(s) of the array elements. Parameters ---------- a : ndarray Input array q : ndarray Percentile or sequence of percentiles to compute. axis : {int, tuple of int, None}, optional Axis or axes along which the percentiles are computed. The default is to compute the percentile(s) along a flattened version of the array. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. overwrite_input : bool, optional (Not supported yet) If True, then allow the input array a to be modified by intermediate calculations, to save memory. In this case, the contents of the input a after this function completes is undefined. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired percentile lies between two data points i < j: 'linear': i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. 'lower': i. 'higher': j. 'nearest': i or j, whichever is nearest. 'midpoint': (i + j) / 2. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array a. Returns ------- percentile : scalar or ndarray Output array. Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10, 7, 4], [ 3, 2, 1]]) >>> np.percentile(a, np.array(50)) array(3.5) >>> np.percentile(a, np.array(50), axis=0) array([6.5, 4.5, 2.5]) >>> np.percentile(a, np.array(50), axis=1) array([7., 2.]) >>> np.percentile(a, np.array(50), axis=1, keepdims=True) array([[7.], [2.]]) >>> m = np.percentile(a, np.array(50), axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, np.array(50), axis=0, out=out) array([6.5, 4.5, 2.5]) >>> m array([6.5, 4.5, 2.5]) """ if overwrite_input is not None: raise NotImplementedError('overwrite_input is not supported yet') if isinstance(q, numeric_types): return _npi.percentile(a, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=q, out=out) return _npi.percentile(a, q, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=None, out=out) @set_module('mxnet.ndarray.numpy') def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments """ Compute the q-th quantile of the data along the specified axis. New in version 1.15.0. Parameters ---------- a : ndarray Input array or object that can be converted to an array. q : ndarray Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive. axis : {int, tuple of int, None}, optional Axis or axes along which the quantiles are computed. The default is to compute the quantile(s) along a flattened version of the array. out : ndarray, optional Alternative output array in which to place the result. It must have the same shape and buffer length as the expected output, but the type (of the output) will be cast if necessary. interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} This optional parameter specifies the interpolation method to use when the desired quantile lies between two data points i < j: linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j. lower: i. higher: j. nearest: i or j, whichever is nearest. midpoint: (i + j) / 2. keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array a. Returns ------- quantile : ndarray If q is a single quantile and axis=None, then the result is a scalar. If multiple quantiles are given, first axis of the result corresponds to the quantiles. The other axes are the axes that remain after the reduction of a. If out is specified, that array is returned instead. See also -------- mean Notes ----- Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors as well as the interpolation parameter will determine the quantile if the normalized ranking does not match the location of q exactly. This function is the same as the median if q=0.5, the same as the minimum if q=0.0 and the same as the maximum if q=1.0. This function differs from the original `numpy.quantile <https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in the following aspects: - q must be ndarray type even if it is a scalar - do not support overwrite_input Examples -------- >>> a = np.array([[10, 7, 4], [3, 2, 1]]) >>> a array([[10., 7., 4.], [3., 2., 1.]]) >>> q = np.array(0.5) >>> q array(0.5) >>> np.quantile(a, q) array(3.5) >>> np.quantile(a, q, axis=0) array([6.5, 4.5, 2.5]) >>> np.quantile(a, q, axis=1) array([7., 2.]) >>> np.quantile(a, q, axis=1, keepdims=True) array([[7.], [2.]]) >>> m = np.quantile(a, q, axis=0) >>> out = np.zeros_like(m) >>> np.quantile(a, q, axis=0, out=out) array([6.5, 4.5, 2.5]) >>> out array([6.5, 4.5, 2.5]) """ if overwrite_input is not None: raise NotImplementedError('overwrite_input is not supported yet') if isinstance(q, numeric_types): return _npi.percentile(a, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=q * 100, out=out) return _npi.percentile(a, q * 100, axis=axis, interpolation=interpolation, keepdims=keepdims, q_scalar=None, out=out) @set_module('mxnet.ndarray.numpy') def shares_memory(a, b, max_work=None): """ Determine if two arrays share memory Parameters ---------- a, b : ndarray Input arrays Returns ------- out : bool See Also -------- may_share_memory Examples -------- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False This function differs from the original `numpy.shares_memory <https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in the following way(s): - Does not support `max_work`, it is a dummy argument - Actually it is same as `may_share_memory` in MXNet DeepNumPy """ return _npi.share_memory(a, b).item() @set_module('mxnet.ndarray.numpy') def may_share_memory(a, b, max_work=None): """ Determine if two arrays might share memory A return of True does not necessarily mean that the two arrays share any element. It just means that they *might*. Only the memory bounds of a and b are checked by default. Parameters ---------- a, b : ndarray Input arrays Returns ------- out : bool See Also -------- shares_memory Examples -------- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9])) False >>> x = np.zeros([3, 4]) >>> np.may_share_memory(x[:,0], x[:,1]) True This function differs from the original `numpy.may_share_memory <https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in the following way(s): - Does not support `max_work`, it is a dummy argument - Actually it is same as `shares_memory` in MXNet DeepNumPy """ return _npi.share_memory(a, b).item() @set_module('mxnet.ndarray.numpy') def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name r""" Calculate the n-th discrete difference along the given axis. Parameters ---------- a : ndarray Input array n : int, optional The number of times values are differenced. If zero, the input is returned as-is. axis : int, optional The axis along which the difference is taken, default is the last axis. prepend, append : ndarray, optional Not supported yet Returns ------- diff : ndarray The n-th differences. The shape of the output is the same as a except along axis where the dimension is smaller by n. The type of the output is the same as the type of the difference between any two elements of a. Examples -------- >>> x = np.array([1, 2, 4, 7, 0]) >>> np.diff(x) array([ 1, 2, 3, -7]) >>> np.diff(x, n=2) array([ 1, 1, -10]) >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) >>> np.diff(x) array([[2, 3, 4], [5, 1, 2]]) >>> np.diff(x, axis=0) array([[-1, 2, 0, -2]]) Notes ----- Optional inputs `prepend` and `append` are not supported yet """ if (prepend or append): raise NotImplementedError('prepend and append options are not supported yet') return _npi.diff(a, n=n, axis=axis) @set_module('mxnet.ndarray.numpy') def resize(a, new_shape): """ Return a new array with the specified shape. If the new array is larger than the original array, then the new array is filled with repeated copies of `a`. Note that this behavior is different from a.resize(new_shape) which fills with zeros instead of repeated copies of `a`. Parameters ---------- a : ndarray Array to be resized. new_shape : int or tuple of int Shape of resized array. Returns ------- reshaped_array : ndarray The new array is formed from the data in the old array, repeated if necessary to fill out the required number of elements. The data are repeated in the order that they are stored in memory. See Also -------- ndarray.resize : resize an array in-place. Notes ----- Warning: This functionality does **not** consider axes separately, i.e. it does not apply interpolation/extrapolation. It fills the return array with the required number of elements, taken from `a` as they are laid out in memory, disregarding strides and axes. (This is in case the new shape is smaller. For larger, see above.) This functionality is therefore not suitable to resize images, or data where each axis represents a separate and distinct entity. Examples -------- >>> a = np.array([[0, 1], [2, 3]]) >>> np.resize(a, (2, 3)) array([[0., 1., 2.], [3., 0., 1.]]) >>> np.resize(a, (1, 4)) array([[0., 1., 2., 3.]]) >>> np.resize(a,(2, 4)) array([[0., 1., 2., 3.], [0., 1., 2., 3.]]) """ return _npi.resize_fallback(a, new_shape=new_shape) @set_module('mxnet.ndarray.numpy') def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs): """ Replace NaN with zero and infinity with large finite numbers (default behaviour) or with the numbers defined by the user using the `nan`, `posinf` and/or `neginf` keywords. If `x` is inexact, NaN is replaced by zero or by the user defined value in `nan` keyword, infinity is replaced by the largest finite floating point values representable by ``x.dtype`` or by the user defined value in `posinf` keyword and -infinity is replaced by the most negative finite floating point values representable by ``x.dtype`` or by the user defined value in `neginf` keyword. For complex dtypes, the above is applied to each of the real and imaginary components of `x` separately. If `x` is not inexact, then no replacements are made. Parameters ---------- x : ndarray Input data. copy : bool, optional Whether to create a copy of `x` (True) or to replace values in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. nan : int, float, optional Value to be used to fill NaN values. If no value is passed then NaN values will be replaced with 0.0. posinf : int, float, optional Value to be used to fill positive infinity values. If no value is passed then positive infinity values will be replaced with a very large number. neginf : int, float, optional Value to be used to fill negative infinity values. If no value is passed then negative infinity values will be replaced with a very small (or negative) number. .. versionadded:: 1.13 Returns ------- out : ndarray `x`, with the non-finite values replaced. If `copy` is False, this may be `x` itself. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Examples -------- >>> np.nan_to_num(np.inf) 1.7976931348623157e+308 >>> np.nan_to_num(-np.inf) -1.7976931348623157e+308 >>> np.nan_to_num(np.nan) 0.0 >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) >>> np.nan_to_num(x) array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02, 1.2800000e+02]) >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02, 1.2800000e+02]) >>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0 array([[-inf, nan, inf], [ inf, inf, -inf]], dtype=float64) >>> np.nan_to_num(y) array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308], [ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64) >>> np.nan_to_num(y, nan=111111, posinf=222222) array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005], [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64) >>> y array([[-inf, nan, inf], [ inf, inf, -inf]], dtype=float64) >>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222) array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005], [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64) >>> y array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005], [ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64) """ if isinstance(x, numeric_types): return _np.nan_to_num(x, copy, nan, posinf, neginf) elif isinstance(x, NDArray): if x.dtype in ['int8', 'uint8', 'int32', 'int64']: return x if not copy: return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=x) return _npi.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf, out=None) else: raise TypeError('type {} not supported'.format(str(type(x)))) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def isnan(x, out=None, **kwargs): """ Test element-wise for NaN and return result as a boolean array. Parameters ---------- x : ndarray Input array. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or bool True where x is NaN, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This function differs from the original `numpy.isinf <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in the following aspects: - Does not support complex number for now - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. Examples -------- >>> np.isnan(np.nan) True >>> np.isnan(np.inf) False >>> np.isnan(np.array([np.log(-1.),1.,np.log(0)])) array([ True, False, False]) """ return _unary_func_helper(x, _npi.isnan, _np.isnan, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def isinf(x, out=None, **kwargs): """ Test element-wise for positive or negative infinity. Parameters ---------- x : ndarray Input array. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or bool True where x is positive or negative infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. This function differs from the original `numpy.isnan <https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in the following aspects: - Does not support complex number for now - Input type does not support Python native iterables(list, tuple, ...). - ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be the same as the expected output. - ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the same as the expected output. - ``out`` param does not support scalar input case. Examples -------- >>> np.isinf(np.inf) True >>> np.isinf(np.nan) False >>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan])) array([ True, True, False, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([True, True, True], dtype=np.bool_) >>> np.isinf(x, y) array([ True, False, True]) >>> y array([ True, False, True]) """ return _unary_func_helper(x, _npi.isinf, _np.isinf, out=out, **kwargs) @wrap_np_unary_func def isposinf(x, out=None, **kwargs): """ Test element-wise for positive infinity, return result as bool array. Parameters ---------- x : ndarray Input array. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or bool True where x is positive infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Examples -------- >>> np.isposinf(np.inf) True >>> np.isposinf(-np.inf) False >>> np.isposinf(np.nan) False >>> np.isposinf(np.array([-np.inf, 0., np.inf])) array([False, False, True]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([True, True, True], dtype=np.bool) >>> np.isposinf(x, y) array([False, False, True]) >>> y array([False, False, True]) """ return _unary_func_helper(x, _npi.isposinf, _np.isposinf, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def isneginf(x, out=None, **kwargs): """ Test element-wise for negative infinity, return result as bool array. Parameters ---------- x : ndarray Input array. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or bool True where x is negative infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Examples -------- >>> np.isneginf(-np.inf) True >>> np.isneginf(np.inf) False >>> np.isneginf(float('-inf')) True >>> np.isneginf(np.array([-np.inf, 0., np.inf])) array([ True, False, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([True, True, True], dtype=np.bool) >>> np.isneginf(x, y) array([ True, False, False]) >>> y array([ True, False, False]) """ return _unary_func_helper(x, _npi.isneginf, _np.isneginf, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') @wrap_np_unary_func def isfinite(x, out=None, **kwargs): """ Test element-wise for finiteness (not infinity or not Not a Number). Parameters ---------- x : ndarray Input array. out : ndarray or None, optional A location into which the result is stored. If provided, it must have the same shape and dtype as input ndarray. If not provided or `None`, a freshly-allocated array is returned. Returns ------- y : ndarray or bool True where x is negative infinity, false otherwise. This is a scalar if x is a scalar. Notes ----- Not a Number, positive infinity and negative infinity are considered to be non-finite. NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754). This means that Not a Number is not equivalent to infinity. Also that positive infinity is not equivalent to negative infinity. But infinity is equivalent to positive infinity. Errors result if the second argument is also supplied when x is a scalar input, or if first and second arguments have different shapes. Examples -------- >>> np.isfinite(1) True >>> np.isfinite(0) True >>> np.isfinite(np.nan) False >>> np.isfinite(np.inf) False >>> np.isfinite(-np.inf) False >>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)])) array([False, True, False]) >>> x = np.array([-np.inf, 0., np.inf]) >>> y = np.array([True, True, True], dtype=np.bool) >>> np.isfinite(x, y) array([False, True, False]) >>> y array([False, True, False]) """ return _unary_func_helper(x, _npi.isfinite, _np.isfinite, out=out, **kwargs) @set_module('mxnet.ndarray.numpy') def where(condition, x=None, y=None): # pylint: disable=too-many-return-statements """where(condition, [x, y]) Return elements chosen from `x` or `y` depending on `condition`. .. note:: When only `condition` is provided, this function is a shorthand for ``np.asarray(condition).nonzero()``. The rest of this documentation covers only the case where all three arguments are provided. Parameters ---------- condition : ndarray Where True, yield `x`, otherwise yield `y`. x, y : ndarray Values from which to choose. `x`, `y` and `condition` need to be broadcastable to some shape. `x` and `y` must have the same dtype. Returns ------- out : ndarray An array with elements from `x` where `condition` is True, and elements from `y` elsewhere. Notes ----- If all the arrays are 1-D, `where` is equivalent to:: [xv if c else yv for c, xv, yv in zip(condition, x, y)] This function differs from the original `numpy.where <https://docs.scipy.org/doc/numpy/reference/generated/numpy.where.html>`_ in the following way(s): - If `condition` is a scalar, this operator returns x or y directly without broadcasting. - If `condition` is ndarray, while both `x` and `y` are scalars, the output dtype will be `float32`. Examples -------- >>> a = np.arange(10) >>> a array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.]) >>> np.where(a < 5, a, 10*a) array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.]) This can be used on multidimensional arrays too: >>> cond = np.array([[True, False], [True, True]]) >>> x = np.array([[1, 2], [3, 4]]) >>> y = np.array([[9, 8], [7, 6]]) >>> np.where(cond, x, y) array([[1., 8.], [3., 4.]]) The shapes of x, y, and the condition are broadcast together: >>> x, y = onp.ogrid[:3, :4] >>> x = np.array(x) >>> y = np.array(y) >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast array([[10, 0, 0, 0], [10, 11, 1, 1], [10, 11, 12, 2]], dtype=int64) >>> a = np.array([[0, 1, 2], ... [0, 2, 4], ... [0, 3, 6]]) >>> np.where(a < 4, a, -1) # -1 is broadcast array([[ 0., 1., 2.], [ 0., 2., -1.], [ 0., 3., -1.]]) """ if x is None and y is None: return nonzero(condition) else: if isinstance(condition, numeric_types): if condition != 0: return x else: return y else: if isinstance(x, numeric_types) and isinstance(y, numeric_types): return _npi.where_scalar2(condition, float(x), float(y), out=None) elif isinstance(x, NDArray) and isinstance(y, NDArray): return _npi.where(condition, x, y, out=None) elif isinstance(y, NDArray): return _npi.where_lscalar(condition, y, float(x), out=None) elif isinstance(x, NDArray): return _npi.where_rscalar(condition, x, float(y), out=None) else: raise TypeError('type {0} and {1} not supported'.format(str(type(x)), str(type(y)))) @set_module('mxnet.ndarray.numpy') def polyval(p, x): """ Evaluate a polynomial at specific values. If p is of length N, this function returns the value: p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1] If x is a sequence, then p(x) is returned for each element of x. If x is another polynomial then the composite polynomial p(x(t)) is returned. Parameters ---------- p : ndarray 1D array of polynomial coefficients (including coefficients equal to zero) from highest degree to the constant term. x : ndarray An array of numbers, at which to evaluate p. Returns ------- values : ndarray Result array of polynomials Notes ----- This function differs from the original `numpy.polyval <https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in the following way(s): - Does not support poly1d. - X should be ndarray type even if it contains only one element. Examples -------- >>> p = np.array([3, 0, 1]) array([3., 0., 1.]) >>> x = np.array([5]) array([5.]) >>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1 array([76.]) >>> x = np.array([5, 4]) array([5., 4.]) >>> np.polyval(p, x) array([76., 49.]) """ from ...numpy import ndarray if isinstance(p, ndarray) and isinstance(x, ndarray): return _npi.polyval(p, x) elif not isinstance(p, ndarray) and not isinstance(x, ndarray): return _np.polyval(p, x) else: raise TypeError('type not supported') @set_module('mxnet.ndarray.numpy') def bincount(x, weights=None, minlength=0): """ Count number of occurrences of each value in array of non-negative ints. Parameters ---------- x : ndarray input array, 1 dimension, nonnegative ints. weights: ndarray input weigths same shape as x. (Optional) minlength: int A minimum number of bins for the output. (Optional) Returns -------- out : ndarray the result of binning the input array. The length of out is equal to amax(x)+1. Raises -------- Value Error If the input is not 1-dimensional, or contains elements with negative values, or if minlength is negative TypeError If the type of the input is float or complex. Examples -------- >>> np.bincount(np.arange(5)) array([1, 1, 1, 1, 1]) >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7])) array([1, 3, 1, 1, 0, 0, 0, 1]) >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23]) >>> np.bincount(x).size == np.amax(x)+1 True >>> np.bincount(np.arange(5, dtype=float)) Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: array cannot be safely cast to required type >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights >>> x = np.array([0, 1, 1, 2, 2, 2]) >>> np.bincount(x, weights=w) array([ 0.3, 0.7, 1.1]) """ if not isinstance(x, NDArray): raise TypeError("Input data should be NDarray") if minlength < 0: raise ValueError("Minlength value should greater than 0") if weights is None: return _npi.bincount(x, minlength=minlength, has_weights=False) return _npi.bincount(x, weights=weights, minlength=minlength, has_weights=True) @set_module('mxnet.ndarray.numpy') def pad(x, pad_width, mode='constant', **kwargs): # pylint: disable=too-many-arguments """ Pad an array. Parameters ---------- array : array_like of rank N The array to pad. pad_width : {sequence, array_like, int} Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths for each axis. ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. mode : str or function, optional One of the following string values or a user supplied function. 'constant' (default) Pads with a constant value. 'edge' Pads with the edge values of array. 'linear_ramp' not supported yet 'maximum' Pads with the maximum value of all of the vector along each axis. 'mean' not supported yet 'median' not supported yet 'minimum' Pads with the minimum value of all of the vector along each axis. 'reflect' Pads with the reflection of the vector mirrored on the first and last values of the vector along each axis. 'symmetric' Pads with the reflection of the vector mirrored along the edge of the array. 'wrap' not supported yet. 'empty' not supported yet. <function> not supported yet. stat_length : not supported yet constant_values : scalar, optional Used in 'constant'. The values to set the padded values for each axis. Default is 0. end_values : not supported yet reflect_type : {'even', 'odd'}, optional only support even now Returns ------- pad : ndarray Padded array of rank equal to `array` with shape increased according to `pad_width`. """ # pylint: disable = too-many-return-statements, inconsistent-return-statements if not _np.asarray(pad_width).dtype.kind == 'i': raise TypeError('`pad_width` must be of integral type.') if not isinstance(pad_width, tuple): raise TypeError("`pad_width` must be tuple.") if mode == "linear_ramp": raise ValueError("mode {'linear_ramp'} is not supported.") if mode == "wrap": raise ValueError("mode {'wrap'} is not supported.") if mode == "median": raise ValueError("mode {'median'} is not supported.") if mode == "mean": raise ValueError("mode {'mean'} is not supported.") if mode == "empty": raise ValueError("mode {'empty'} is not supported.") if callable(mode): raise ValueError("mode {'<function>'} is not supported.") allowedkwargs = { 'constant': ['constant_values'], 'edge': [], 'linear_ramp': ['end_values'], 'maximum': ['stat_length'], 'mean': ['stat_length'], 'median': ['stat_length'], 'minimum': ['stat_length'], 'reflect': ['reflect_type'], 'symmetric': ['reflect_type'], 'wrap': [], } if isinstance(mode, _np.compat.basestring): # Make sure have allowed kwargs appropriate for mode for key in kwargs: if key not in allowedkwargs[mode]: raise ValueError('%s keyword not in allowed keywords %s' %(key, allowedkwargs[mode])) unsupported_kwargs = set(kwargs) - set(allowedkwargs[mode]) if unsupported_kwargs: raise ValueError("unsupported keyword arguments for mode '{}': {}" .format(mode, unsupported_kwargs)) if mode == "constant": values = kwargs.get("constant_values", 0) if isinstance(values, tuple): raise TypeError("unsupported constant_values type: {'tuple'}.") _npi.pad(x, pad_width, mode='constant', constant_value=values) elif mode == "symmetric": values = kwargs.get("reflect_type", "even") if values != "even" and values is not None: raise ValueError("unsupported reflect_type '{}'".format(values)) return _npi.pad(x, pad_width, mode='symmetric', reflect_type="even") elif mode == "edge": return _npi.pad(x, pad_width, mode='edge') elif mode == "reflect": values = kwargs.get("reflect_type", "even") if values != "even" and values is not None: raise ValueError("unsupported reflect_type '{}'".format(values)) return _npi.pad(x, pad_width, mode='reflect', reflect_type="even") elif mode == "maximum": values = kwargs.get("stat_length", None) if values is not None: raise ValueError("unsupported stat_length '{}'".format(values)) return _npi.pad(x, pad_width, mode='maximum') elif mode == "minimum": values = kwargs.get("stat_length", None) if values is not None: raise ValueError("unsupported stat_length '{}'".format(values)) return _npi.pad(x, pad_width, mode='minimum') return _npi.pad(x, pad_width, mode='constant', constant_value=0)
apache-2.0
ptitjano/bokeh
examples/compat/mpl_contour.py
7
1028
# demo inspired by: http://matplotlib.org/examples/pylab_examples/contour_demo.html from bokeh import mpl from bokeh.plotting import output_file, show import matplotlib import matplotlib.mlab as mlab import matplotlib.pyplot as plt import numpy as np matplotlib.rcParams['xtick.direction'] = 'out' matplotlib.rcParams['ytick.direction'] = 'out' delta = 0.025 x = np.arange(-3.0, 3.0, delta) y = np.arange(-2.0, 2.0, delta) X, Y = np.meshgrid(x, y) Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1) # difference of Gaussians Z = 10.0 * (Z2 - Z1) # Create a simple contour plot with labels using default colors. The # inline argument to clabel will control whether the labels are draw # over the line segments of the contour, removing the lines beneath # the label plt.figure() CS = plt.contour(X, Y, Z) plt.clabel(CS, inline=1, fontsize=10) plt.title('Simplest default with labels') output_file("mpl_contour.html", title="mpl_contour.py example") show(mpl.to_bokeh())
bsd-3-clause
bd-j/magellanic
magellanic/sfhs/prediction_scripts/predicted_total.py
1
5894
import sys, pickle, copy import numpy as np import matplotlib.pyplot as pl import astropy.io.fits as pyfits import magellanic.regionsed as rsed import magellanic.mcutils as utils from magellanic.lfutils import * try: import fsps from sedpy import observate except ImportError: #you wont be able to predict the integrated spectrum or magnitudes # filterlist must be set to None in calls to total_cloud_data sps = None wlengths = {'2': '{4.5\mu m}', '4': '{8\mu m}'} dmod = {'smc':18.9, 'lmc':18.5} cloud_info = {} cloud_info['smc'] = [utils.smc_regions(), 20, 23, [7, 13, 16], [3,5,6]] cloud_info['lmc'] = [utils.lmc_regions(), 48, 38, [7, 11, 13, 16], [3,4,5,6]] def total_cloud_data(cloud, filternames = None, basti=False, lfstring=None, agb_dust=1.0, one_metal=None): ######### # SPS ######### # if filternames is not None: sps = fsps.StellarPopulation(add_agb_dust_model=True) sps.params['sfh'] = 0 sps.params['agb_dust'] = agb_dust dust = ['nodust', 'agbdust'] sps.params['imf_type'] = 0.0 #salpeter filterlist = observate.load_filters(filternames) else: filterlist = None ########## # SFHs ########## regions, nx, ny, zlist, zlist_basti = cloud_info[cloud.lower()] if basti: zlist = basti_zlist if 'header' in regions.keys(): rheader = regions.pop('header') #dump the header info from the reg. dict total_sfhs = None for n, dat in regions.iteritems(): total_sfhs = sum_sfhs(total_sfhs, dat['sfhs']) total_zmet = dat['zmet'] #collapse SFHs to one metallicity if one_metal is not None: ts = None for sfh in total_sfhs: ts = sum_sfhs(ts, sfh) total_sfh = ts zlist = [zlist[one_metal]] total_zmet = [total_zmet[one_metal]] ############# # LFs ############ bins = rsed.lfbins if lfstring is not None: # these are stored as a list of different metallicities lffiles = [lfstring.format(z) for z in zlist] lf_base = [read_villaume_lfs(f) for f in lffiles] #get LFs broken out by age and metallicity as well as the total lfs_zt, lf, logages = rsed.one_region_lfs(copy.deepcopy(total_sfhs), lf_base) else: lfs_zt, lf, logages = None, None, None ########### # SED ############ if filterlist is not None: spec, wave, mass = rsed.one_region_sed(copy.deepcopy(total_sfhs), total_zmet, sps) mags = observate.getSED(wave, spec*rsed.to_cgs, filterlist=filterlist) maggies = 10**(-0.4 * np.atleast_1d(mags)) else: maggies, mass = None, None ############# # Write output ############ total_values = {} total_values['agb_clf'] = lf total_values['agb_clfs_zt'] = lfs_zt total_values['clf_mags'] = bins total_values['logages'] = logages total_values['sed_ab_maggies'] = maggies total_values['sed_filters'] = filternames total_values['lffile'] = lfstring total_values['mstar'] = mass total_values['zlist'] = zlist return total_values, total_sfhs def sum_sfhs(sfhs1, sfhs2): """ Accumulate individual sets of SFHs into a total set of SFHs. This assumes that the individual SFH sets all have the same number and order of metallicities, and the same time binning. """ if sfhs1 is None: return copy.deepcopy(sfhs2) elif sfhs2 is None: return copy.deepcopy(sfhs1) else: out = copy.deepcopy(sfhs1) for s1, s2 in zip(out, sfhs2): s1['sfr'] += s2['sfr'] return out if __name__ == '__main__': filters = ['galex_NUV', 'spitzer_irac_ch2', 'spitzer_irac_ch4', 'spitzer_mips_24'] #filters = None ldir, cdir = 'lf_data/', 'composite_lfs/' outst = '{0}_n2teffcut.p' # total_cloud_data will loop over the appropriate (for the # isochrone) metallicities for a given lfst filename template lfst = '{0}z{{0:02.0f}}_tau{1:2.1f}_vega_irac{2}_n2_teffcut_lf.txt' basti = False agb_dust=1.0 agebins = np.arange(9)*0.3 + 7.4 #loop over clouds (and bands and agb_dust) to produce clfs for cloud in ['smc']: rdir = '{0}cclf_{1}_'.format(cdir, cloud) for band in ['2','4']: lfstring = lfst.format(ldir, agb_dust, band) dat, sfhs = total_cloud_data(cloud, filternames=filters, agb_dust=agb_dust, lfstring=lfstring, basti=basti) agebins = sfhs[0]['t1'][3:-1] outfile = lfstring.replace(ldir, rdir).replace('z{0:02.0f}_','').replace('.txt','.dat') write_clf_many([dat['clf_mags'], dat['agb_clf']], outfile, lfstring) #fig, ax = plot_weighted_lfs(dat, agebins = agebins, dm=dmod[cloud]) #fig.suptitle('{0} @ IRAC{1}'.format(cloud.upper(), band)) #fig.savefig('byage_clfs/{0}_clfs_by_age_and_Z_irac{1}'.format(cloud, band)) #pl.close(fig) colheads = (len(agebins)-1) * ' N<m(t={})' colheads = colheads.format(*(agebins[:-1]+agebins[1:])/2.) tbin_lfs = np.array([rebin_lfs(lf, ages, agebins) for lf, ages in zip(dat['agb_clfs_zt'], dat['logages'])]) write_clf_many([dat['clf_mags'], tbin_lfs.sum(axis=0)], outfile.replace(cdir,'byage_clfs/'), lfstring, colheads=colheads) pl.figure() for s, z in zip(sfhs, dat['zlist']): pl.step(s['t1'], s['sfr'], where='post', label='zind={0}'.format(z), linewidth=3) pl.legend(loc=0) pl.title(cloud.upper()) print(cloud, dat['mstar'])
gpl-2.0
bgris/ODL_bgris
lib/python3.5/site-packages/odl/util/graphics.py
1
15419
# Copyright 2014-2016 The ODL development group # # This file is part of ODL. # # ODL is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ODL is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with ODL. If not, see <http://www.gnu.org/licenses/>. """Functions for graphical output.""" # Imports for common Python 2/3 codebase from __future__ import print_function, division, absolute_import from future import standard_library standard_library.install_aliases() import numpy as np from odl.util.testutils import run_doctests from odl.util.utility import is_real_dtype __all__ = ('show_discrete_data',) def _safe_minmax(values): """Calculate min and max of array with guards for nan and inf.""" # Nan and inf guarded min and max minval = np.min(values[np.isfinite(values)]) maxval = np.max(values[np.isfinite(values)]) return minval, maxval def _colorbar_ticks(minval, maxval): """Return the ticks (values show) in the colorbar.""" return [minval, (maxval + minval) / 2., maxval] def _digits(minval, maxval): """Digits needed to comforatbly display values in [minval, maxval]""" if minval == maxval: return 3 else: return min(10, max(2, int(1 + abs(np.log10(maxval - minval))))) def _colorbar_format(minval, maxval): """Return the format string for the colorbar.""" return '%.{}f'.format(_digits(minval, maxval)) def _axes_info(grid, npoints=5): result = [] min_pt = grid.min() max_pt = grid.max() for axis in range(grid.ndim): xmin = min_pt[axis] xmax = max_pt[axis] points = np.linspace(xmin, xmax, npoints) indices = np.linspace(0, grid.shape[axis] - 1, npoints, dtype=int) tick_values = grid.coord_vectors[axis][indices] # Do not use corner point in case of a partition, use outer corner tick_values[[0, -1]] = xmin, xmax format_str = '{:.' + str(_digits(xmin, xmax)) + 'f}' tick_labels = [format_str.format(f) for f in tick_values] result += [(points, tick_labels)] return result def show_discrete_data(values, grid, title=None, method='', force_show=False, fig=None, **kwargs): """Display a discrete 1d or 2d function. Parameters ---------- values : `numpy.ndarray` The values to visualize grid : `TensorGrid` or `RectPartition` Grid of the values title : string, optional Set the title of the figure method : string, optional 1d methods: 'plot' : graph plot 'scatter' : scattered 2d points (2nd axis <-> value) 2d methods: 'imshow' : image plot with coloring according to value, including a colorbar. 'scatter' : cloud of scattered 3d points (3rd axis <-> value) 'wireframe', 'plot_wireframe' : surface plot force_show : bool, optional Whether the plot should be forced to be shown now or deferred until later. Note that some backends always displays the plot, regardless of this value. fig : `matplotlib.figure.Figure`, optional The figure to show in. Expected to be of same "style", as the figure given by this function. The most common usecase is that fig is the return value from an earlier call to this function. Default: New figure interp : {'nearest', 'linear'}, optional Interpolation method to use. Default: 'nearest' axis_labels : string, optional Axis labels, default: ['x', 'y'] update_in_place : bool, optional Update the content of the figure in place. Intended for faster real time plotting, typically ~5 times faster. This is only performed for ``method == 'imshow'`` with real data and ``fig != None``. Otherwise this parameter is treated as False. Default: False axis_fontsize : int, optional Fontsize for the axes. Default: 16 kwargs : {'figsize', 'saveto', ...} Extra keyword arguments passed on to display method See the Matplotlib functions for documentation of extra options. Returns ------- fig : `matplotlib.figure.Figure` The resulting figure. It is also shown to the user. See Also -------- matplotlib.pyplot.plot : Show graph plot matplotlib.pyplot.imshow : Show data as image matplotlib.pyplot.scatter : Show scattered 3d points """ # Importing pyplot takes ~2 sec, only import when needed. import matplotlib.pyplot as plt args_re = [] args_im = [] dsp_kwargs = {} sub_kwargs = {} arrange_subplots = (121, 122) # horzontal arrangement # Create axis labels which remember their original meaning axis_labels = kwargs.pop('axis_labels', ['x', 'y']) values_are_complex = not is_real_dtype(values.dtype) figsize = kwargs.pop('figsize', None) saveto = kwargs.pop('saveto', None) interp = kwargs.pop('interp', 'nearest') axis_fontsize = kwargs.pop('axis_fontsize', 16) # Check if we should and can update the plot in place update_in_place = kwargs.pop('update_in_place', False) if (update_in_place and (fig is None or values_are_complex or values.ndim != 2 or (values.ndim == 2 and method not in ('', 'imshow')))): update_in_place = False if values.ndim == 1: # TODO: maybe a plotter class would be better if not method: if interp == 'nearest': method = 'step' dsp_kwargs['where'] = 'mid' elif interp == 'linear': method = 'plot' else: method = 'plot' if method == 'plot' or method == 'step' or method == 'scatter': args_re += [grid.coord_vectors[0], values.real] args_im += [grid.coord_vectors[0], values.imag] else: raise ValueError('`method` {!r} not supported' ''.format(method)) elif values.ndim == 2: if not method: method = 'imshow' if method == 'imshow': args_re = [np.rot90(values.real)] args_im = [np.rot90(values.imag)] if values_are_complex else [] extent = [grid.min()[0], grid.max()[0], grid.min()[1], grid.max()[1]] if interp == 'nearest': interpolation = 'nearest' elif interp == 'linear': interpolation = 'bilinear' else: interpolation = 'none' dsp_kwargs.update({'interpolation': interpolation, 'cmap': 'bone', 'extent': extent, 'aspect': 'auto'}) elif method == 'scatter': pts = grid.points() args_re = [pts[:, 0], pts[:, 1], values.ravel().real] args_im = ([pts[:, 0], pts[:, 1], values.ravel().imag] if values_are_complex else []) sub_kwargs.update({'projection': '3d'}) elif method in ('wireframe', 'plot_wireframe'): method = 'plot_wireframe' x, y = grid.meshgrid args_re = [x, y, np.rot90(values.real)] args_im = ([x, y, np.rot90(values.imag)] if values_are_complex else []) sub_kwargs.update({'projection': '3d'}) else: raise ValueError('`method` {!r} not supported' ''.format(method)) else: raise NotImplementedError('no method for {}d display implemented' ''.format(values.ndim)) # Additional keyword args are passed on to the display method dsp_kwargs.update(**kwargs) if fig is not None: # Reuse figure if given as input if not isinstance(fig, plt.Figure): raise TypeError('`fig` {} not a matplotlib figure'.format(fig)) if not plt.fignum_exists(fig.number): # If figure does not exist, user either closed the figure or # is using IPython, in this case we need a new figure. fig = plt.figure(figsize=figsize) updatefig = False else: # Set current figure to given input fig = plt.figure(fig.number) updatefig = True if values.ndim > 1 and not update_in_place: # If the figure is larger than 1d, we can clear it since we # dont reuse anything. Keeping it causes performance problems. fig.clf() else: fig = plt.figure(figsize=figsize) updatefig = False if values_are_complex: # Real if len(fig.axes) == 0: # Create new axis if needed sub_re = plt.subplot(arrange_subplots[0], **sub_kwargs) sub_re.set_title('Real part') sub_re.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub_re.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub_re.set_ylabel('value') else: sub_re = fig.axes[0] display_re = getattr(sub_re, method) csub_re = display_re(*args_re, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow' and len(fig.axes) < 2: # Create colorbar if none seems to exist # Use clim from kwargs if given if 'clim' not in kwargs: minval_re, maxval_re = _safe_minmax(values.real) else: minval_re, maxval_re = kwargs['clim'] ticks_re = _colorbar_ticks(minval_re, maxval_re) format_re = _colorbar_format(minval_re, maxval_re) plt.colorbar(csub_re, orientation='horizontal', ticks=ticks_re, format=format_re) # Imaginary if len(fig.axes) < 3: sub_im = plt.subplot(arrange_subplots[1], **sub_kwargs) sub_im.set_title('Imaginary part') sub_im.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub_im.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub_im.set_ylabel('value') else: sub_im = fig.axes[2] display_im = getattr(sub_im, method) csub_im = display_im(*args_im, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow' and len(fig.axes) < 4: # Create colorbar if none seems to exist # Use clim from kwargs if given if 'clim' not in kwargs: minval_im, maxval_im = _safe_minmax(values.imag) else: minval_im, maxval_im = kwargs['clim'] ticks_im = _colorbar_ticks(minval_im, maxval_im) format_im = _colorbar_format(minval_im, maxval_im) plt.colorbar(csub_im, orientation='horizontal', ticks=ticks_im, format=format_im) else: if len(fig.axes) == 0: # Create new axis object if needed sub = plt.subplot(111, **sub_kwargs) sub.set_xlabel(axis_labels[0], fontsize=axis_fontsize) if values.ndim == 2: sub.set_ylabel(axis_labels[1], fontsize=axis_fontsize) else: sub.set_ylabel('value') try: # For 3d plots sub.set_zlabel('z') except AttributeError: pass else: sub = fig.axes[0] if update_in_place: import matplotlib as mpl imgs = [obj for obj in sub.get_children() if isinstance(obj, mpl.image.AxesImage)] if len(imgs) > 0 and updatefig: imgs[0].set_data(args_re[0]) csub = imgs[0] # Update min-max if 'clim' not in kwargs: minval, maxval = _safe_minmax(values) else: minval, maxval = kwargs['clim'] csub.set_clim(minval, maxval) else: display = getattr(sub, method) csub = display(*args_re, **dsp_kwargs) else: display = getattr(sub, method) csub = display(*args_re, **dsp_kwargs) # Axis ticks if method == 'imshow' and not grid.is_uniform: (xpts, xlabels), (ypts, ylabels) = _axes_info(grid) plt.xticks(xpts, xlabels) plt.yticks(ypts, ylabels) if method == 'imshow': # Add colorbar # Use clim from kwargs if given if 'clim' not in kwargs: minval, maxval = _safe_minmax(values) else: minval, maxval = kwargs['clim'] ticks = _colorbar_ticks(minval, maxval) format = _colorbar_format(minval, maxval) if len(fig.axes) < 2: # Create colorbar if none seems to exist plt.colorbar(mappable=csub, ticks=ticks, format=format) elif update_in_place: # If it exists and we should update it csub.colorbar.set_clim(minval, maxval) csub.colorbar.set_ticks(ticks) csub.colorbar.set_ticklabels([format % tick for tick in ticks]) csub.colorbar.draw_all() # Fixes overlapping stuff at the expense of potentially squashed subplots if not update_in_place: fig.tight_layout() if title is not None: if not values_are_complex: # Do not overwrite title for complex values plt.title(title) fig.canvas.manager.set_window_title(title) if updatefig or plt.isinteractive(): # If we are running in interactive mode, we can always show the fig # This causes an artifact, where users of `CallbackShow` without # interactive mode only shows the figure after the second iteration. plt.show(block=False) if not update_in_place: plt.draw() plt.pause(0.0001) else: try: sub.draw_artist(csub) fig.canvas.blit(fig.bbox) fig.canvas.update() fig.canvas.flush_events() except AttributeError: plt.draw() plt.pause(0.0001) if force_show: plt.show() if saveto is not None: fig.savefig(saveto) return fig if __name__ == '__main__': run_doctests()
gpl-3.0
emoronayuso/beeton
asterisk-bee/asteriskbee/api_status/scripts_graficas/recoge_marcas_graficas.py
1
2307
#!/usr/bin/python import matplotlib.pyplot as plt import numpy as np #import calendar from datetime import datetime from django.conf import settings settings.configure() import os #para conexion con la bases de datos de beeton (asteriskbee) import sqlite3 as dbapi ##Directorio de la aplicaion ### STATIC_ROOT = '/var/www/asterisk-bee/asteriskbee/' #directorio = settings.STATIC_ROOT+"api_status/" directorio = "/var/www/asterisk-bee/asteriskbee/api_status/" ##Numero de tuplas maximas por grafica num_cpu_dia = 20 def recoge_marcas(): #Conexion con la base de datos de estadisticas bbdd = dbapi.connect(directorio+"bbdd/estadisticas.db") cursor = bbdd.cursor() os.system("ps -e -o pcpu,cpu,nice,state,cputime,args --sort pcpu | sed '/^ 0.0 /d' > "+directorio+"scripts_graficas/temp/temp_cpu_dia; cat "+directorio+"scripts_graficas/temp/temp_cpu_dia | sed 's/^[ \t]*//;s/[ \t]*$//' | grep -v 'recoge_marcas_graficas.py' | cut -d ' ' -f 1 > "+directorio+"scripts_graficas/temp/temp_cpu_dia2") total = 0.0 f = open(directorio+'scripts_graficas/temp/temp_cpu_dia2','r') ##Leemos la primera linea para quitar el encabezado linea = f.readline() while True: linea = f.readline() if not linea: break #Quitamos el uso de la cpu del script que recoge las marcas else: total = total + float(linea) f.close() res = total # print str(res) #Creamos la consulta ordenada por fecha con_ordenada = """select * from api_status_marcas_graficas where tipo='cpu_dia' order by fecha_hora;""" cursor.execute(con_ordenada) p = cursor.fetchall() if len(p) < num_cpu_dia: #insetar en al base de datos insert = "insert into api_status_marcas_graficas (tipo,valor) values ('cpu_dia',?);" cursor.execute(insert ,(res,)) bbdd.commit() else: #Ordenar por fecha, eliminar el ultimo e introducir nuevo # strftime('%d-%m-%Y %H:%M',calldate) hora_actual = datetime.now() con_update = " update api_status_marcas_graficas set fecha_hora=datetime(?),valor=? where id=?; " # print "Antes del update, hora_actual->"+str(hora_actual)+"valor->"+str(res)+ " id->"+str(p[0][0]) cursor.execute(con_update ,(hora_actual,res,p[0][0])) bbdd.commit() ##Cerramos la conexion con la BBDD cursor.close() bbdd.close() if __name__ == "__main__": recoge_marcas()
gpl-3.0
crichardson17/starburst_atlas
Low_resolution_sims/Dusty_LowRes/Padova_inst/padova_inst_0/fullgrid/UV1.py
31
9315
import csv import matplotlib.pyplot as plt from numpy import * import scipy.interpolate import math from pylab import * from matplotlib.ticker import MultipleLocator, FormatStrFormatter import matplotlib.patches as patches from matplotlib.path import Path import os # ------------------------------------------------------------------------------------------------------ #inputs for file in os.listdir('.'): if file.endswith("1.grd"): gridfile1 = file for file in os.listdir('.'): if file.endswith("2.grd"): gridfile2 = file for file in os.listdir('.'): if file.endswith("3.grd"): gridfile3 = file # ------------------------ for file in os.listdir('.'): if file.endswith("1.txt"): Elines1 = file for file in os.listdir('.'): if file.endswith("2.txt"): Elines2 = file for file in os.listdir('.'): if file.endswith("3.txt"): Elines3 = file # ------------------------------------------------------------------------------------------------------ #Patches data #for the Kewley and Levesque data verts = [ (1., 7.97712125471966000000), # left, bottom (1., 9.57712125471966000000), # left, top (2., 10.57712125471970000000), # right, top (2., 8.97712125471966000000), # right, bottom (0., 0.), # ignored ] codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO, Path.CLOSEPOLY, ] path = Path(verts, codes) # ------------------------ #for the Kewley 01 data verts2 = [ (2.4, 9.243038049), # left, bottom (2.4, 11.0211893), # left, top (2.6, 11.0211893), # right, top (2.6, 9.243038049), # right, bottom (0, 0.), # ignored ] path = Path(verts, codes) path2 = Path(verts2, codes) # ------------------------- #for the Moy et al data verts3 = [ (1., 6.86712125471966000000), # left, bottom (1., 10.18712125471970000000), # left, top (3., 12.18712125471970000000), # right, top (3., 8.86712125471966000000), # right, bottom (0., 0.), # ignored ] path = Path(verts, codes) path3 = Path(verts3, codes) # ------------------------------------------------------------------------------------------------------ #the routine to add patches for others peoples' data onto our plots. def add_patches(ax): patch3 = patches.PathPatch(path3, facecolor='yellow', lw=0) patch2 = patches.PathPatch(path2, facecolor='green', lw=0) patch = patches.PathPatch(path, facecolor='red', lw=0) ax1.add_patch(patch3) ax1.add_patch(patch2) ax1.add_patch(patch) # ------------------------------------------------------------------------------------------------------ #the subplot routine def add_sub_plot(sub_num): numplots = 16 plt.subplot(numplots/4.,4,sub_num) rbf = scipy.interpolate.Rbf(x, y, z[:,sub_num-1], function='linear') zi = rbf(xi, yi) contour = plt.contour(xi,yi,zi, levels, colors='c', linestyles = 'dashed') contour2 = plt.contour(xi,yi,zi, levels2, colors='k', linewidths=1.5) plt.scatter(max_values[line[sub_num-1],2], max_values[line[sub_num-1],3], c ='k',marker = '*') plt.annotate(headers[line[sub_num-1]], xy=(8,11), xytext=(6,8.5), fontsize = 10) plt.annotate(max_values[line[sub_num-1],0], xy= (max_values[line[sub_num-1],2], max_values[line[sub_num-1],3]), xytext = (0, -10), textcoords = 'offset points', ha = 'right', va = 'bottom', fontsize=10) if sub_num == numplots / 2.: print "half the plots are complete" #axis limits yt_min = 8 yt_max = 23 xt_min = 0 xt_max = 12 plt.ylim(yt_min,yt_max) plt.xlim(xt_min,xt_max) plt.yticks(arange(yt_min+1,yt_max,1),fontsize=10) plt.xticks(arange(xt_min+1,xt_max,1), fontsize = 10) if sub_num in [2,3,4,6,7,8,10,11,12,14,15,16]: plt.tick_params(labelleft = 'off') else: plt.tick_params(labelleft = 'on') plt.ylabel('Log ($ \phi _{\mathrm{H}} $)') if sub_num in [1,2,3,4,5,6,7,8,9,10,11,12]: plt.tick_params(labelbottom = 'off') else: plt.tick_params(labelbottom = 'on') plt.xlabel('Log($n _{\mathrm{H}} $)') if sub_num == 1: plt.yticks(arange(yt_min+1,yt_max+1,1),fontsize=10) if sub_num == 13: plt.yticks(arange(yt_min,yt_max,1),fontsize=10) plt.xticks(arange(xt_min,xt_max,1), fontsize = 10) if sub_num == 16 : plt.xticks(arange(xt_min+1,xt_max+1,1), fontsize = 10) # --------------------------------------------------- #this is where the grid information (phi and hdens) is read in and saved to grid. grid1 = []; grid2 = []; grid3 = []; with open(gridfile1, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') for row in csvReader: grid1.append(row); grid1 = asarray(grid1) with open(gridfile2, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') for row in csvReader: grid2.append(row); grid2 = asarray(grid2) with open(gridfile3, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') for row in csvReader: grid3.append(row); grid3 = asarray(grid3) #here is where the data for each line is read in and saved to dataEmissionlines dataEmissionlines1 = []; dataEmissionlines2 = []; dataEmissionlines3 = []; with open(Elines1, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') headers = csvReader.next() for row in csvReader: dataEmissionlines1.append(row); dataEmissionlines1 = asarray(dataEmissionlines1) with open(Elines2, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') headers2 = csvReader.next() for row in csvReader: dataEmissionlines2.append(row); dataEmissionlines2 = asarray(dataEmissionlines2) with open(Elines3, 'rb') as f: csvReader = csv.reader(f,delimiter='\t') headers3 = csvReader.next() for row in csvReader: dataEmissionlines3.append(row); dataEmissionlines3 = asarray(dataEmissionlines3) print "import files complete" # --------------------------------------------------- #for concatenating grid #pull the phi and hdens values from each of the runs. exclude header lines grid1new = zeros((len(grid1[:,0])-1,2)) grid1new[:,0] = grid1[1:,6] grid1new[:,1] = grid1[1:,7] grid2new = zeros((len(grid2[:,0])-1,2)) x = array(17.00000) grid2new[:,0] = repeat(x,len(grid2[:,0])-1) grid2new[:,1] = grid2[1:,6] grid3new = zeros((len(grid3[:,0])-1,2)) grid3new[:,0] = grid3[1:,6] grid3new[:,1] = grid3[1:,7] grid = concatenate((grid1new,grid2new,grid3new)) hdens_values = grid[:,1] phi_values = grid[:,0] # --------------------------------------------------- #for concatenating Emission lines data Emissionlines = concatenate((dataEmissionlines1[:,1:],dataEmissionlines2[:,1:],dataEmissionlines3[:,1:])) #for lines headers = headers[1:] concatenated_data = zeros((len(Emissionlines),len(Emissionlines[0]))) max_values = zeros((len(concatenated_data[0]),4)) # --------------------------------------------------- #constructing grid by scaling #select the scaling factor #for 1215 #incident = Emissionlines[1:,4] #for 4860 incident = concatenated_data[:,57] #take the ratio of incident and all the lines and put it all in an array concatenated_data for i in range(len(Emissionlines)): for j in range(len(Emissionlines[0])): if math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) > 0: concatenated_data[i,j] = math.log(4860.*(float(Emissionlines[i,j])/float(Emissionlines[i,57])), 10) else: concatenated_data[i,j] == 0 # for 1215 #for i in range(len(Emissionlines)): # for j in range(len(Emissionlines[0])): # if math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) > 0: # concatenated_data[i,j] = math.log(1215.*(float(Emissionlines[i,j])/float(Emissionlines[i,4])), 10) # else: # concatenated_data[i,j] == 0 # --------------------------------------------------- #find the maxima to plot onto the contour plots for j in range(len(concatenated_data[0])): max_values[j,0] = max(concatenated_data[:,j]) max_values[j,1] = argmax(concatenated_data[:,j], axis = 0) max_values[j,2] = hdens_values[max_values[j,1]] max_values[j,3] = phi_values[max_values[j,1]] #to round off the maxima max_values[:,0] = [ '%.1f' % elem for elem in max_values[:,0] ] print "data arranged" # --------------------------------------------------- #Creating the grid to interpolate with for contours. gridarray = zeros((len(concatenated_data),2)) gridarray[:,0] = hdens_values gridarray[:,1] = phi_values x = gridarray[:,0] y = gridarray[:,1] # --------------------------------------------------- #change desired lines here! line = [0, #977 1, #991 2, #1026 5, #1216 91, #1218 6, #1239 7, #1240 8, #1243 9, #1263 10, #1304 11,#1308 12, #1397 13, #1402 14, #1406 16, #1486 17] #1531 #create z array for this plot z = concatenated_data[:,line[:]] # --------------------------------------------------- # Interpolate print "starting interpolation" xi, yi = linspace(x.min(), x.max(), 10), linspace(y.min(), y.max(), 10) xi, yi = meshgrid(xi, yi) # --------------------------------------------------- print "interpolatation complete; now plotting" #plot plt.subplots_adjust(wspace=0, hspace=0) #remove space between plots levels = arange(10**-1,10, .2) levels2 = arange(10**-2,10**2, 1) plt.suptitle("Dusty UV Lines", fontsize=14) # --------------------------------------------------- for i in range(16): add_sub_plot(i) ax1 = plt.subplot(4,4,1) add_patches(ax1) print "complete" plt.savefig('Dusty_UV_Lines.pdf') plt.clf() print "figure saved"
gpl-2.0
ywcui1990/nupic
examples/opf/clients/hotgym/prediction/one_gym/nupic_output.py
17
6193
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Provides two classes with the same signature for writing data out of NuPIC models. (This is a component of the One Hot Gym Prediction Tutorial.) """ import csv from collections import deque from abc import ABCMeta, abstractmethod # Try to import matplotlib, but we don't have to. try: import matplotlib matplotlib.use('TKAgg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.dates import date2num except ImportError: pass WINDOW = 100 class NuPICOutput(object): __metaclass__ = ABCMeta def __init__(self, names, showAnomalyScore=False): self.names = names self.showAnomalyScore = showAnomalyScore @abstractmethod def write(self, timestamps, actualValues, predictedValues, predictionStep=1): pass @abstractmethod def close(self): pass class NuPICFileOutput(NuPICOutput): def __init__(self, *args, **kwargs): super(NuPICFileOutput, self).__init__(*args, **kwargs) self.outputFiles = [] self.outputWriters = [] self.lineCounts = [] headerRow = ['timestamp', 'kw_energy_consumption', 'prediction'] for name in self.names: self.lineCounts.append(0) outputFileName = "%s_out.csv" % name print "Preparing to output %s data to %s" % (name, outputFileName) outputFile = open(outputFileName, "w") self.outputFiles.append(outputFile) outputWriter = csv.writer(outputFile) self.outputWriters.append(outputWriter) outputWriter.writerow(headerRow) def write(self, timestamps, actualValues, predictedValues, predictionStep=1): assert len(timestamps) == len(actualValues) == len(predictedValues) for index in range(len(self.names)): timestamp = timestamps[index] actual = actualValues[index] prediction = predictedValues[index] writer = self.outputWriters[index] if timestamp is not None: outputRow = [timestamp, actual, prediction] writer.writerow(outputRow) self.lineCounts[index] += 1 def close(self): for index, name in enumerate(self.names): self.outputFiles[index].close() print "Done. Wrote %i data lines to %s." % (self.lineCounts[index], name) class NuPICPlotOutput(NuPICOutput): def __init__(self, *args, **kwargs): super(NuPICPlotOutput, self).__init__(*args, **kwargs) # Turn matplotlib interactive mode on. plt.ion() self.dates = [] self.convertedDates = [] self.actualValues = [] self.predictedValues = [] self.actualLines = [] self.predictedLines = [] self.linesInitialized = False self.graphs = [] plotCount = len(self.names) plotHeight = max(plotCount * 3, 6) fig = plt.figure(figsize=(14, plotHeight)) gs = gridspec.GridSpec(plotCount, 1) for index in range(len(self.names)): self.graphs.append(fig.add_subplot(gs[index, 0])) plt.title(self.names[index]) plt.ylabel('KW Energy Consumption') plt.xlabel('Date') plt.tight_layout() def initializeLines(self, timestamps): for index in range(len(self.names)): print "initializing %s" % self.names[index] # graph = self.graphs[index] self.dates.append(deque([timestamps[index]] * WINDOW, maxlen=WINDOW)) self.convertedDates.append(deque( [date2num(date) for date in self.dates[index]], maxlen=WINDOW )) self.actualValues.append(deque([0.0] * WINDOW, maxlen=WINDOW)) self.predictedValues.append(deque([0.0] * WINDOW, maxlen=WINDOW)) actualPlot, = self.graphs[index].plot( self.dates[index], self.actualValues[index] ) self.actualLines.append(actualPlot) predictedPlot, = self.graphs[index].plot( self.dates[index], self.predictedValues[index] ) self.predictedLines.append(predictedPlot) self.linesInitialized = True def write(self, timestamps, actualValues, predictedValues, predictionStep=1): assert len(timestamps) == len(actualValues) == len(predictedValues) # We need the first timestamp to initialize the lines at the right X value, # so do that check first. if not self.linesInitialized: self.initializeLines(timestamps) for index in range(len(self.names)): self.dates[index].append(timestamps[index]) self.convertedDates[index].append(date2num(timestamps[index])) self.actualValues[index].append(actualValues[index]) self.predictedValues[index].append(predictedValues[index]) # Update data self.actualLines[index].set_xdata(self.convertedDates[index]) self.actualLines[index].set_ydata(self.actualValues[index]) self.predictedLines[index].set_xdata(self.convertedDates[index]) self.predictedLines[index].set_ydata(self.predictedValues[index]) self.graphs[index].relim() self.graphs[index].autoscale_view(True, True, True) plt.draw() plt.legend(('actual','predicted'), loc=3) def refreshGUI(self): """Give plot a pause, so data is drawn and GUI's event loop can run. """ plt.pause(0.0001) def close(self): plt.ioff() plt.show() NuPICOutput.register(NuPICFileOutput) NuPICOutput.register(NuPICPlotOutput)
agpl-3.0
jonyroda97/redbot-amigosprovaveis
lib/matplotlib/backends/backend_nbagg.py
2
9384
"""Interactive figures in the IPython notebook""" # Note: There is a notebook in # lib/matplotlib/backends/web_backend/nbagg_uat.ipynb to help verify # that changes made maintain expected behaviour. import datetime from base64 import b64encode import json import io import os import six from uuid import uuid4 as uuid import tornado.ioloop from IPython.display import display, Javascript, HTML try: # Jupyter/IPython 4.x or later from ipykernel.comm import Comm except ImportError: # Jupyter/IPython 3.x or earlier from IPython.kernel.comm import Comm from matplotlib import rcParams, is_interactive from matplotlib._pylab_helpers import Gcf from matplotlib.backends.backend_webagg_core import ( FigureCanvasWebAggCore, FigureManagerWebAgg, NavigationToolbar2WebAgg, TimerTornado) from matplotlib.backend_bases import ( _Backend, FigureCanvasBase, NavigationToolbar2) from matplotlib.figure import Figure from matplotlib import is_interactive from matplotlib.backends.backend_webagg_core import (FigureManagerWebAgg, FigureCanvasWebAggCore, NavigationToolbar2WebAgg, TimerTornado) from matplotlib.backend_bases import (ShowBase, NavigationToolbar2, FigureCanvasBase) def connection_info(): """ Return a string showing the figure and connection status for the backend. This is intended as a diagnostic tool, and not for general use. """ result = [] for manager in Gcf.get_all_fig_managers(): fig = manager.canvas.figure result.append('{0} - {0}'.format((fig.get_label() or "Figure {0}".format(manager.num)), manager.web_sockets)) if not is_interactive(): result.append('Figures pending show: {0}'.format(len(Gcf._activeQue))) return '\n'.join(result) # Note: Version 3.2 and 4.x icons # http://fontawesome.io/3.2.1/icons/ # http://fontawesome.io/ # the `fa fa-xxx` part targets font-awesome 4, (IPython 3.x) # the icon-xxx targets font awesome 3.21 (IPython 2.x) _FONT_AWESOME_CLASSES = { 'home': 'fa fa-home icon-home', 'back': 'fa fa-arrow-left icon-arrow-left', 'forward': 'fa fa-arrow-right icon-arrow-right', 'zoom_to_rect': 'fa fa-square-o icon-check-empty', 'move': 'fa fa-arrows icon-move', 'download': 'fa fa-floppy-o icon-save', None: None } class NavigationIPy(NavigationToolbar2WebAgg): # Use the standard toolbar items + download button toolitems = [(text, tooltip_text, _FONT_AWESOME_CLASSES[image_file], name_of_method) for text, tooltip_text, image_file, name_of_method in (NavigationToolbar2.toolitems + (('Download', 'Download plot', 'download', 'download'),)) if image_file in _FONT_AWESOME_CLASSES] class FigureManagerNbAgg(FigureManagerWebAgg): ToolbarCls = NavigationIPy def __init__(self, canvas, num): self._shown = False FigureManagerWebAgg.__init__(self, canvas, num) def display_js(self): # XXX How to do this just once? It has to deal with multiple # browser instances using the same kernel (require.js - but the # file isn't static?). display(Javascript(FigureManagerNbAgg.get_javascript())) def show(self): if not self._shown: self.display_js() self._create_comm() else: self.canvas.draw_idle() self._shown = True def reshow(self): """ A special method to re-show the figure in the notebook. """ self._shown = False self.show() @property def connected(self): return bool(self.web_sockets) @classmethod def get_javascript(cls, stream=None): if stream is None: output = io.StringIO() else: output = stream super(FigureManagerNbAgg, cls).get_javascript(stream=output) with io.open(os.path.join( os.path.dirname(__file__), "web_backend", "nbagg_mpl.js"), encoding='utf8') as fd: output.write(fd.read()) if stream is None: return output.getvalue() def _create_comm(self): comm = CommSocket(self) self.add_web_socket(comm) return comm def destroy(self): self._send_event('close') # need to copy comms as callbacks will modify this list for comm in list(self.web_sockets): comm.on_close() self.clearup_closed() def clearup_closed(self): """Clear up any closed Comms.""" self.web_sockets = set([socket for socket in self.web_sockets if socket.is_open()]) if len(self.web_sockets) == 0: self.canvas.close_event() def remove_comm(self, comm_id): self.web_sockets = set([socket for socket in self.web_sockets if not socket.comm.comm_id == comm_id]) class FigureCanvasNbAgg(FigureCanvasWebAggCore): def new_timer(self, *args, **kwargs): return TimerTornado(*args, **kwargs) class CommSocket(object): """ Manages the Comm connection between IPython and the browser (client). Comms are 2 way, with the CommSocket being able to publish a message via the send_json method, and handle a message with on_message. On the JS side figure.send_message and figure.ws.onmessage do the sending and receiving respectively. """ def __init__(self, manager): self.supports_binary = None self.manager = manager self.uuid = str(uuid()) # Publish an output area with a unique ID. The javascript can then # hook into this area. display(HTML("<div id=%r></div>" % self.uuid)) try: self.comm = Comm('matplotlib', data={'id': self.uuid}) except AttributeError: raise RuntimeError('Unable to create an IPython notebook Comm ' 'instance. Are you in the IPython notebook?') self.comm.on_msg(self.on_message) manager = self.manager self._ext_close = False def _on_close(close_message): self._ext_close = True manager.remove_comm(close_message['content']['comm_id']) manager.clearup_closed() self.comm.on_close(_on_close) def is_open(self): return not (self._ext_close or self.comm._closed) def on_close(self): # When the socket is closed, deregister the websocket with # the FigureManager. if self.is_open(): try: self.comm.close() except KeyError: # apparently already cleaned it up? pass def send_json(self, content): self.comm.send({'data': json.dumps(content)}) def send_binary(self, blob): # The comm is ascii, so we always send the image in base64 # encoded data URL form. data = b64encode(blob) if six.PY3: data = data.decode('ascii') data_uri = "data:image/png;base64,{0}".format(data) self.comm.send({'data': data_uri}) def on_message(self, message): # The 'supports_binary' message is relevant to the # websocket itself. The other messages get passed along # to matplotlib as-is. # Every message has a "type" and a "figure_id". message = json.loads(message['content']['data']) if message['type'] == 'closing': self.on_close() self.manager.clearup_closed() elif message['type'] == 'supports_binary': self.supports_binary = message['value'] else: self.manager.handle_json(message) @_Backend.export class _BackendNbAgg(_Backend): FigureCanvas = FigureCanvasNbAgg FigureManager = FigureManagerNbAgg @staticmethod def new_figure_manager_given_figure(num, figure): canvas = FigureCanvasNbAgg(figure) if rcParams['nbagg.transparent']: figure.patch.set_alpha(0) manager = FigureManagerNbAgg(canvas, num) if is_interactive(): manager.show() figure.canvas.draw_idle() canvas.mpl_connect('close_event', lambda event: Gcf.destroy(num)) return manager @staticmethod def trigger_manager_draw(manager): manager.show() @staticmethod def show(): from matplotlib._pylab_helpers import Gcf managers = Gcf.get_all_fig_managers() if not managers: return interactive = is_interactive() for manager in managers: manager.show() # plt.figure adds an event which puts the figure in focus # in the activeQue. Disable this behaviour, as it results in # figures being put as the active figure after they have been # shown, even in non-interactive mode. if hasattr(manager, '_cidgcf'): manager.canvas.mpl_disconnect(manager._cidgcf) if not interactive and manager in Gcf._activeQue: Gcf._activeQue.remove(manager)
gpl-3.0
matplotlib/cmocean
cmocean/rgb/dense.py
2
13693
from matplotlib.colors import ListedColormap from numpy import nan, inf # Used to reconstruct the colormap in pycam02ucs.cm.viscm parameters = {'xp': [16.121891585344997, 33.901145962549492, 5.5873058066040926, -14.703203914141397, -17.875928056390336, -5.3288735306278738], 'yp': [-2.5423728813559308, -13.425925925925895, -42.422027290448327, -35.333333333333314, -8.83264462809916, -2.1686159844054487], 'min_Jp': 15.0, 'max_Jp': 95.0} cm_data = [[ 0.21298394, 0.05589169, 0.14220951], [ 0.21780744, 0.0570005 , 0.14665582], [ 0.22261214, 0.05808842, 0.15115908], [ 0.22739756, 0.05915624, 0.15572185], [ 0.23216536, 0.06020099, 0.16034977], [ 0.23691745, 0.06121879, 0.1650498 ], [ 0.24164654, 0.06222163, 0.169816 ], [ 0.24635153, 0.06321115, 0.17465056], [ 0.25103114, 0.06418929, 0.1795555 ], [ 0.25568737, 0.06515168, 0.1845388 ], [ 0.26031556, 0.06610638, 0.18959733], [ 0.26491272, 0.06705861, 0.19473015], [ 0.26947709, 0.0680114 , 0.19993831], [ 0.27400681, 0.06896804, 0.20522255], [ 0.27849993, 0.06993211, 0.21058327], [ 0.28295501, 0.07090603, 0.21602205], [ 0.28737014, 0.0718934 , 0.22153921], [ 0.29174204, 0.07290112, 0.22713094], [ 0.29606871, 0.07393344, 0.23279613], [ 0.30034822, 0.07499465, 0.23853326], [ 0.30457867, 0.07608911, 0.24434046], [ 0.30875826, 0.07722111, 0.25021549], [ 0.31288529, 0.0783949 , 0.25615575], [ 0.3169582 , 0.07961456, 0.26215837], [ 0.32097556, 0.08088399, 0.26822019], [ 0.32493609, 0.08220684, 0.27433782], [ 0.3288387 , 0.08358647, 0.28050768], [ 0.33268245, 0.08502593, 0.28672608], [ 0.33646657, 0.08652789, 0.2929892 ], [ 0.34019047, 0.08809468, 0.29929318], [ 0.34385372, 0.08972821, 0.30563417], [ 0.34745604, 0.09143006, 0.31200825], [ 0.35099729, 0.0932014 , 0.31841152], [ 0.35447749, 0.09504303, 0.32484029], [ 0.35789677, 0.09695535, 0.33129096], [ 0.36125536, 0.09893846, 0.33776007], [ 0.36455362, 0.10099212, 0.34424427], [ 0.36779195, 0.10311585, 0.35074041], [ 0.37097085, 0.10530889, 0.35724546], [ 0.37409088, 0.10757029, 0.36375657], [ 0.37715263, 0.10989888, 0.37027108], [ 0.38015674, 0.11229336, 0.37678646], [ 0.38310387, 0.11475229, 0.38330035], [ 0.38599472, 0.11727411, 0.38981058], [ 0.38882999, 0.1198572 , 0.3963151 ], [ 0.39161037, 0.12249987, 0.402812 ], [ 0.3943366 , 0.12520039, 0.40929955], [ 0.39700936, 0.12795703, 0.41577611], [ 0.39962936, 0.13076802, 0.42224018], [ 0.40219729, 0.13363161, 0.42869038], [ 0.40471394, 0.13654614, 0.43512488], [ 0.40717995, 0.13950986, 0.44154258], [ 0.4095959 , 0.14252107, 0.44794287], [ 0.41196239, 0.14557814, 0.45432475], [ 0.41428002, 0.1486795 , 0.4606873 ], [ 0.41654936, 0.15182361, 0.46702967], [ 0.41877098, 0.15500903, 0.47335108], [ 0.4209454 , 0.15823432, 0.4796508 ], [ 0.42307313, 0.16149814, 0.48592814], [ 0.42515465, 0.16479918, 0.49218247], [ 0.42719043, 0.1681362 , 0.49841321], [ 0.42918111, 0.17150798, 0.50461925], [ 0.431127 , 0.17491341, 0.5108004 ], [ 0.43302838, 0.17835141, 0.5169565 ], [ 0.43488561, 0.18182099, 0.52308708], [ 0.43669905, 0.18532117, 0.5291917 ], [ 0.43846903, 0.18885105, 0.53526994], [ 0.44019583, 0.19240976, 0.54132138], [ 0.44187976, 0.19599648, 0.54734563], [ 0.44352106, 0.19961045, 0.5533423 ], [ 0.44512012, 0.2032509 , 0.55931077], [ 0.44667705, 0.20691717, 0.56525088], [ 0.44819199, 0.21060865, 0.57116243], [ 0.44966511, 0.21432473, 0.57704502], [ 0.45109659, 0.21806485, 0.58289828], [ 0.45248658, 0.22182847, 0.58872183], [ 0.45383521, 0.2256151 , 0.59451528], [ 0.45514261, 0.22942427, 0.60027826], [ 0.45640887, 0.23325554, 0.60601037], [ 0.45763398, 0.23710854, 0.61171135], [ 0.45881803, 0.24098289, 0.61738074], [ 0.4599611 , 0.24487823, 0.62301809], [ 0.46106323, 0.24879421, 0.62862296], [ 0.46212445, 0.25273054, 0.63419487], [ 0.46314479, 0.25668693, 0.63973335], [ 0.46412426, 0.2606631 , 0.6452379 ], [ 0.46506286, 0.2646588 , 0.650708 ], [ 0.46596031, 0.26867393, 0.65614343], [ 0.46681665, 0.27270825, 0.66154354], [ 0.467632 , 0.27676148, 0.66690758], [ 0.46840632, 0.28083345, 0.67223496], [ 0.46913959, 0.28492398, 0.67752502], [ 0.46983176, 0.28903289, 0.68277713], [ 0.47048281, 0.29316004, 0.68799058], [ 0.4710927 , 0.29730529, 0.69316468], [ 0.47166137, 0.30146848, 0.69829868], [ 0.47218867, 0.30564956, 0.70339194], [ 0.47267406, 0.30984863, 0.70844403], [ 0.47311806, 0.3140653 , 0.71345366], [ 0.47352067, 0.31829946, 0.71841996], [ 0.47388188, 0.322551 , 0.72334205], [ 0.47420168, 0.32681981, 0.728219 ], [ 0.47448009, 0.33110575, 0.73304987], [ 0.47471715, 0.33540873, 0.73783366], [ 0.4749129 , 0.33972863, 0.74256938], [ 0.47506742, 0.34406531, 0.74725597], [ 0.4751808 , 0.34841867, 0.75189235], [ 0.47525316, 0.35278857, 0.75647742], [ 0.47528466, 0.35717487, 0.76101004], [ 0.47527514, 0.36157758, 0.76548918], [ 0.47522479, 0.36599656, 0.76991363], [ 0.47513427, 0.37043147, 0.77428199], [ 0.47500393, 0.37488213, 0.77859297], [ 0.47483412, 0.37934834, 0.7828453 ], [ 0.4746253 , 0.38382989, 0.78703766], [ 0.47437795, 0.38832654, 0.7911687 ], [ 0.47409263, 0.39283807, 0.79523708], [ 0.47376999, 0.39736419, 0.79924139], [ 0.47341074, 0.40190463, 0.80318024], [ 0.47301567, 0.40645908, 0.80705223], [ 0.47258566, 0.41102721, 0.81085591], [ 0.47212171, 0.41560865, 0.81458986], [ 0.4716249 , 0.42020304, 0.81825263], [ 0.47109642, 0.42480997, 0.82184277], [ 0.47053758, 0.42942898, 0.82535887], [ 0.4699498 , 0.43405962, 0.82879947], [ 0.46933466, 0.43870139, 0.83216318], [ 0.46869383, 0.44335376, 0.83544858], [ 0.46802917, 0.44801616, 0.83865432], [ 0.46734263, 0.45268799, 0.84177905], [ 0.46663636, 0.45736864, 0.84482148], [ 0.46591265, 0.46205743, 0.84778034], [ 0.46517394, 0.46675366, 0.85065444], [ 0.46442285, 0.47145661, 0.85344263], [ 0.46366216, 0.4761655 , 0.85614385], [ 0.46289481, 0.48087955, 0.85875708], [ 0.46212297, 0.48559831, 0.8612812 ], [ 0.4613509 , 0.49032052, 0.86371555], [ 0.46058208, 0.49504528, 0.86605942], [ 0.45982017, 0.49977167, 0.86831217], [ 0.45906898, 0.50449872, 0.87047333], [ 0.4583325 , 0.50922545, 0.87254251], [ 0.45761487, 0.51395086, 0.87451947], [ 0.45692037, 0.51867392, 0.87640412], [ 0.45625342, 0.52339359, 0.87819649], [ 0.45561856, 0.52810881, 0.87989676], [ 0.45502044, 0.53281852, 0.88150529], [ 0.45446291, 0.53752203, 0.8830221 ], [ 0.45395166, 0.5422179 , 0.88444824], [ 0.45349173, 0.54690499, 0.88578463], [ 0.45308803, 0.55158223, 0.88703226], [ 0.45274551, 0.55624857, 0.8881923 ], [ 0.45246908, 0.56090297, 0.88926607], [ 0.45226366, 0.5655444 , 0.89025507], [ 0.45213406, 0.57017185, 0.89116092], [ 0.45208461, 0.57478456, 0.89198505], [ 0.45212047, 0.57938135, 0.89272981], [ 0.45224622, 0.5839613 , 0.89339735], [ 0.45246621, 0.58852353, 0.89398987], [ 0.45278458, 0.59306722, 0.89450974], [ 0.45320531, 0.59759159, 0.89495941], [ 0.45373211, 0.60209592, 0.89534144], [ 0.45436847, 0.60657953, 0.8956585 ], [ 0.45511768, 0.61104174, 0.89591342], [ 0.45598269, 0.61548199, 0.89610905], [ 0.45696613, 0.61989976, 0.89624827], [ 0.45807033, 0.62429458, 0.89633399], [ 0.45929732, 0.62866605, 0.89636919], [ 0.46064879, 0.63301382, 0.89635684], [ 0.46212629, 0.6373375 , 0.89630027], [ 0.46373081, 0.6416369 , 0.89620239], [ 0.46546305, 0.64591186, 0.89606608], [ 0.46732345, 0.65016224, 0.89589433], [ 0.46931216, 0.65438798, 0.89569008], [ 0.47142903, 0.65858902, 0.89545627], [ 0.47367364, 0.66276538, 0.89519579], [ 0.47604536, 0.66691708, 0.89491161], [ 0.47854335, 0.67104413, 0.89460702], [ 0.48116628, 0.67514678, 0.89428415], [ 0.48391278, 0.67922522, 0.89394566], [ 0.48678129, 0.68327963, 0.89359417], [ 0.48977007, 0.68731025, 0.89323218], [ 0.4928772 , 0.69131735, 0.89286215], [ 0.49610063, 0.69530122, 0.89248647], [ 0.49943822, 0.69926217, 0.89210744], [ 0.50288765, 0.70320047, 0.89172772], [ 0.50644655, 0.70711649, 0.89134936], [ 0.51011248, 0.71101066, 0.8909741 ], [ 0.51388294, 0.71488334, 0.89060393], [ 0.51775541, 0.71873493, 0.89024078], [ 0.52172732, 0.72256583, 0.8898865 ], [ 0.5257961 , 0.72637645, 0.88954287], [ 0.52995915, 0.7301672 , 0.8892116 ], [ 0.53421391, 0.7339385 , 0.88889434], [ 0.5385578 , 0.73769077, 0.88859267], [ 0.5429883 , 0.74142444, 0.88830811], [ 0.54750281, 0.74513991, 0.88804246], [ 0.5520989 , 0.74883762, 0.88779685], [ 0.55677422, 0.75251799, 0.88757251], [ 0.56152638, 0.75618144, 0.88737072], [ 0.56635309, 0.75982839, 0.88719273], [ 0.57125208, 0.76345922, 0.88703974], [ 0.57622118, 0.76707435, 0.8869129 ], [ 0.58125826, 0.77067417, 0.88681333], [ 0.58636126, 0.77425906, 0.88674212], [ 0.59152819, 0.7778294 , 0.88670031], [ 0.59675713, 0.78138555, 0.88668891], [ 0.60204624, 0.78492789, 0.88670892], [ 0.60739371, 0.78845676, 0.88676131], [ 0.61279785, 0.79197249, 0.886847 ], [ 0.61825699, 0.79547544, 0.88696697], [ 0.62376953, 0.79896592, 0.88712212], [ 0.62933401, 0.80244424, 0.88731328], [ 0.63494897, 0.80591071, 0.88754133], [ 0.64061303, 0.80936562, 0.88780715], [ 0.64632485, 0.81280925, 0.88811162], [ 0.65208315, 0.81624189, 0.88845562], [ 0.65788673, 0.81966379, 0.88884001], [ 0.6637344 , 0.82307522, 0.88926568], [ 0.66962506, 0.82647642, 0.88973352], [ 0.67555762, 0.82986764, 0.89024441], [ 0.68153106, 0.83324911, 0.89079928], [ 0.68754438, 0.83662105, 0.89139904], [ 0.69359663, 0.83998369, 0.89204464], [ 0.69968688, 0.84333724, 0.89273702], [ 0.70581423, 0.84668191, 0.89347718], [ 0.71197782, 0.85001791, 0.8942661 ], [ 0.7181769 , 0.85334541, 0.89510469], [ 0.72441053, 0.85666464, 0.89599414], [ 0.73067788, 0.8599758 , 0.89693553], [ 0.73697811, 0.8632791 , 0.89793 ], [ 0.74331039, 0.86657473, 0.89897869], [ 0.74967389, 0.86986292, 0.90008279], [ 0.75606778, 0.87314387, 0.90124351], [ 0.76249117, 0.87641781, 0.90246212], [ 0.7689432 , 0.87968498, 0.90373988], [ 0.77542295, 0.88294564, 0.9050781 ], [ 0.78192947, 0.88620003, 0.90647814], [ 0.78846179, 0.88944845, 0.90794134], [ 0.79501887, 0.89269119, 0.9094691 ], [ 0.80159965, 0.89592859, 0.91106281], [ 0.80820295, 0.899161 , 0.91272391], [ 0.81482754, 0.90238881, 0.91445386], [ 0.82147215, 0.90561245, 0.91625407], [ 0.82813543, 0.90883237, 0.91812595], [ 0.83481598, 0.91204906, 0.92007088], [ 0.84151229, 0.91526306, 0.92209023], [ 0.84822279, 0.91847494, 0.92418529], [ 0.85494584, 0.92168533, 0.92635732], [ 0.8616797 , 0.9248949 , 0.92860749], [ 0.86842255, 0.92810438, 0.9309369 ], [ 0.87517248, 0.93131455, 0.93334654], [ 0.88192751, 0.93452625, 0.93583728], [ 0.88868558, 0.93774038, 0.93840987], [ 0.89544454, 0.94095789, 0.94106488], [ 0.90220216, 0.9441798 , 0.94380273]] test_cm = ListedColormap(cm_data, name=__file__) if __name__ == "__main__": import matplotlib.pyplot as plt import numpy as np try: from viscm import viscm viscm(test_cm) except ImportError: print("viscm not found, falling back on simple display") plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=test_cm) plt.show()
mit
kmkolasinski/Quantulaba
plots/plot_lattice.py
2
1492
#!/usr/bin/python import numpy as np import matplotlib.pyplot as plt import csv from matplotlib.collections import LineCollection file = "lattice.dat" #ax = plt.gca(projection='3d') pscale=1.0 lscale=10.0 fig, ax = plt. subplots() ax.set_aspect('equal') desired=[1,2] with open(file, 'r') as fin: reader=csv.reader(fin) result=[[(s) for s in row] for i,row in enumerate(reader) if i in desired] minCorner = map(float,result[0][0].split()) maxCorner = map(float,result[1][0].split()) xWidth = abs(minCorner[0]-maxCorner[0]) yWidth = abs(minCorner[1]-maxCorner[1]) zWidth = abs(minCorner[2]-maxCorner[2]) ax.scatter(minCorner[0],minCorner[1],s=0) ax.scatter(maxCorner[0],maxCorner[1],s=0) ax.margins(0.1) data = np.loadtxt(file,skiprows=4) no_lines = np.size(data[:,0]) wlist = [] lines = [] for i in range(no_lines): lines.append([ (data[i,0],data[i,1]) , (data[i,3],data[i,4]) ]) wlist.extend([data[i,6]*lscale]) lc = LineCollection(lines, linewidths=wlist,colors='black',lw=1.0) ax.add_collection(lc) wlist = [] points = [] for i in range(no_lines): if(data[i,6] > 1.0): points.append([data[i,0],data[i,1] ]) wlist.extend([data[i,6]*pscale]) points = np.array(points) wlist = np.array(wlist) if(np.size(points) > 0): ax.scatter(points[:,0],points[:,1], cmap='PuBu', c=wlist , s=50 , edgecolors='k' , zorder=2 ) plt.savefig("lattice.pdf")
mit
lovexiaov/SandwichApp
venv/lib/python2.7/site-packages/py2app/build_app.py
9
77527
""" Mac OS X .app build command for distutils Originally (loosely) based on code from py2exe's build_exe.py by Thomas Heller. """ from __future__ import print_function import imp import sys import os import zipfile import plistlib import shlex import shutil import textwrap import pkg_resources import collections from modulegraph import modulegraph from py2app.apptemplate.setup import main as script_executable from py2app.util import mergecopy, make_exec try: from cStringIO import StringIO except ImportError: from io import StringIO from itertools import chain from setuptools import Command from distutils.util import convert_path from distutils import log from distutils.errors import * from modulegraph.find_modules import find_modules, parse_mf_results, find_needed_modules from modulegraph.modulegraph import SourceModule, Package, Script from modulegraph import zipio import macholib.dyld import macholib.MachOStandalone import macholib.MachO from macholib.util import flipwritable from py2app.create_appbundle import create_appbundle from py2app.create_pluginbundle import create_pluginbundle from py2app.util import \ fancy_split, byte_compile, make_loader, imp_find_module, \ copy_tree, fsencoding, strip_files, in_system_path, makedirs, \ iter_platform_files, find_version, skipscm, momc, copy_file, \ copy_resource from py2app.filters import \ not_stdlib_filter, not_system_filter, has_filename_filter from py2app import recipes from distutils.sysconfig import get_config_var, get_config_h_filename PYTHONFRAMEWORK=get_config_var('PYTHONFRAMEWORK') PLUGIN_SUFFIXES = { '.qlgenerator': 'QuickLook', '.mdimporter': 'Spotlight', '.xpc': 'XPCServices', '.service': 'Services', '.prefPane': 'PreferencePanes', '.iaplugin': 'InternetAccounts', '.action': 'Automator', } try: basestring except NameError: basestring = str def rewrite_tkinter_load_commands(tkinter_path): print("rewrite_tk", tkinter_path) m = macholib.MachO.MachO(tkinter_path) tcl_path = None tk_path = None rewrite_map = {} for header in m.headers: for idx, name, other in header.walkRelocatables(): if other.endswith('/Tk'): if tk_path is not None and other != tk_path: raise DistutilsPlatformError('_tkinter is linked to different Tk paths') tk_path = other elif other.endswith('/Tcl'): if tcl_path is not None and other != tcl_path: raise DistutilsPlatformError('_tkinter is linked to different Tcl paths') tcl_path = other if tcl_path is None or 'Tcl.framework' not in tcl_path: raise DistutilsPlatformError('_tkinter is not linked a Tcl.framework') if tk_path is None or 'Tk.framework' not in tk_path: raise DistutilsPlatformError('_tkinter is not linked a Tk.framework') system_tcl_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tcl.framework/Versions') if nm != 'Current'] system_tk_versions = [nm for nm in os.listdir('/System/Library/Frameworks/Tk.framework/Versions') if nm != 'Current'] if not tcl_path.startswith('/System/Library/Frameworks'): # ../Versions/8.5/Tcl ver = os.path.basename(os.path.dirname(tcl_path)) if ver not in system_tcl_versions: raise DistutilsPlatformError('_tkinter is linked to a version of Tcl not in /System') rewrite_map[tcl_path] = '/System/Library/Frameworks/Tcl.framework/Versions/%s/Tcl'%(ver,) if not tk_path.startswith('/System/Library/Frameworks'): # ../Versions/8.5/Tk ver = os.path.basename(os.path.dirname(tk_path)) if ver not in system_tk_versions: raise DistutilsPlatformError('_tkinter is linked to a version of Tk not in /System') rewrite_map[tk_path] = '/System/Library/Frameworks/Tk.framework/Versions/%s/Tk'%(ver,) if rewrite_map: print("Relinking _tkinter.so to system Tcl/Tk") rewroteAny = False for header in m.headers: for idx, name, other in header.walkRelocatables(): data = rewrite_map.get(other) if data: if header.rewriteDataForCommand(idx, data.encode(sys.getfilesystemencoding())): rewroteAny = True if rewroteAny: old_mode = flipwritable(m.filename) try: with open(m.filename, 'rb+') as f: for header in m.headers: f.seek(0) header.write(f) f.seek(0, 2) f.flush() finally: flipwritable(m.filename, old_mode) else: print("_tkinter already linked against system Tcl/Tk") def get_zipfile(dist, semi_standalone=False): if sys.version_info[0] == 3: if semi_standalone: return "python%d.%d/site-packages.zip"%(sys.version_info[:2]) else: return "python%d%d.zip"%(sys.version_info[:2]) return getattr(dist, "zipfile", None) or "site-packages.zip" def framework_copy_condition(src): # Skip Headers, .svn, and CVS dirs return skipscm(src) and os.path.basename(src) != 'Headers' class PythonStandalone(macholib.MachOStandalone.MachOStandalone): def __init__(self, appbuilder, *args, **kwargs): super(PythonStandalone, self).__init__(*args, **kwargs) self.appbuilder = appbuilder def copy_dylib(self, src): dest = os.path.join(self.dest, os.path.basename(src)) if os.path.islink(src): dest = os.path.join(self.dest, os.path.basename(os.path.realpath(src))) # Ensure that the orginal name also exists, avoids problems when # the filename is used from Python (see issue #65) # # NOTE: The if statement checks that the target link won't # point to itself, needed for systems like homebrew that # store symlinks in "public" locations that point to # files of the same name in a per-package install location. link_dest = os.path.join(self.dest, os.path.basename(src)) if os.path.basename(link_dest) != os.path.basename(dest): os.symlink(os.path.basename(dest), link_dest) else: dest = os.path.join(self.dest, os.path.basename(src)) return self.appbuilder.copy_dylib(src, dest) def copy_framework(self, info): destfn = self.appbuilder.copy_framework(info, self.dest) dest = os.path.join(self.dest, info['shortname'] + '.framework') self.pending.append((destfn, iter_platform_files(dest))) return destfn def iterRecipes(module=recipes): for name in dir(module): if name.startswith('_'): continue check = getattr(getattr(module, name), 'check', None) if check is not None: yield (name, check) # A very loosely defined "target". We assume either a "script" or "modules" # attribute. Some attributes will be target specific. class Target(object): def __init__(self, **kw): self.__dict__.update(kw) # If modules is a simple string, assume they meant list m = self.__dict__.get("modules") if m and isinstance(m, basestring): self.modules = [m] def get_dest_base(self): dest_base = getattr(self, "dest_base", None) if dest_base: return dest_base script = getattr(self, "script", None) if script: return os.path.basename(os.path.splitext(script)[0]) modules = getattr(self, "modules", None) assert modules, "no script, modules or dest_base specified" return modules[0].split(".")[-1] def validate(self): resources = getattr(self, "resources", []) for r_filename in resources: if not os.path.isfile(r_filename): raise DistutilsOptionError( "Resource filename '%s' does not exist" % (r_filename,)) def validate_target(dist, attr, value): res = FixupTargets(value, "script") other = {"app": "plugin", "plugin": "app"} if res and getattr(dist, other[attr]): # XXX - support apps and plugins? raise DistutilsOptionError( "You must specify either app or plugin, not both") def FixupTargets(targets, default_attribute): if not targets: return targets try: targets = eval(targets) except: pass ret = [] for target_def in targets: if isinstance(target_def, basestring): # Create a default target object, with the string as the attribute target = Target(**{default_attribute: target_def}) else: d = getattr(target_def, "__dict__", target_def) if default_attribute not in d: raise DistutilsOptionError( "This target class requires an attribute '%s'" % (default_attribute,)) target = Target(**d) target.validate() ret.append(target) return ret def normalize_data_file(fn): if isinstance(fn, basestring): fn = convert_path(fn) return ('', [fn]) return fn def is_system(): prefix = sys.prefix if os.path.exists(os.path.join(prefix, ".Python")): fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt") if os.path.exists(fn): with open(fn, 'rU') as fp: prefix = fp.read().strip() return in_system_path(prefix) def installation_info(version=None): if version is None: version = sys.version if is_system(): return version[:3] + " (FORCED: Using vendor Python)" else: return version[:3] class py2app(Command): description = "create a Mac OS X application or plugin from Python scripts" # List of option tuples: long name, short name (None if no short # name), and help string. user_options = [ ("app=", None, "application bundle to be built"), ("plugin=", None, "plugin bundle to be built"), ('optimize=', 'O', "optimization level: -O1 for \"python -O\", " "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), ("includes=", 'i', "comma-separated list of modules to include"), ("packages=", 'p', "comma-separated list of packages to include"), ("iconfile=", None, "Icon file to use"), ("excludes=", 'e', "comma-separated list of modules to exclude"), ("dylib-excludes=", 'E', "comma-separated list of frameworks or dylibs to exclude"), ("datamodels=", None, "xcdatamodels to be compiled and copied into Resources"), ("mappingmodels=", None, "xcmappingmodels to be compiled and copied into Resources"), ("resources=", 'r', "comma-separated list of additional data files and folders to include (not for code!)"), ("frameworks=", 'f', "comma-separated list of additional frameworks and dylibs to include"), ("plist=", 'P', "Info.plist template file, dict, or plistlib.Plist"), ("extension=", None, "Bundle extension [default:.app for app, .plugin for plugin]"), ("graph", 'g', "output module dependency graph"), ("xref", 'x', "output module cross-reference as html"), ("no-strip", None, "do not strip debug and local symbols from output"), #("compressed", 'c', # "create a compressed zipfile"), ("no-chdir", 'C', "do not change to the data directory (Contents/Resources) [forced for plugins]"), #("no-zip", 'Z', # "do not use a zip file (XXX)"), ("semi-standalone", 's', "depend on an existing installation of Python " + installation_info()), ("alias", 'A', "Use an alias to current source file (for development only!)"), ("argv-emulation", 'a', "Use argv emulation [disabled for plugins]."), ("argv-inject=", None, "Inject some commands into the argv"), ("emulate-shell-environment", None, "Emulate the shell environment you get in a Terminal window"), ("use-pythonpath", None, "Allow PYTHONPATH to effect the interpreter's environment"), ("use-faulthandler", None, "Enable the faulthandler in the generated bundle (Python 3.3 or later)"), ("verbose-interpreter", None, "Start python in verbose mode"), ('bdist-base=', 'b', 'base directory for build library (default is build)'), ('dist-dir=', 'd', "directory to put final built distributions in (default is dist)"), ('site-packages', None, "include the system and user site-packages into sys.path"), ("strip", 'S', "strip debug and local symbols from output (on by default, for compatibility)"), ("prefer-ppc", None, "Force application to run translated on i386 (LSPrefersPPC=True)"), ('debug-modulegraph', None, 'Drop to pdb console after the module finding phase is complete'), ("debug-skip-macholib", None, "skip macholib phase (app will not be standalone!)"), ("arch=", None, "set of architectures to use (fat, fat3, universal, intel, i386, ppc, x86_64; default is the set for the current python binary)"), ("qt-plugins=", None, "set of Qt plugins to include in the application bundle (default None)"), ("matplotlib-backends=", None, "set of matplotlib backends to include (default: include entire package)"), ("extra-scripts=", None, "set of scripts to include in the application bundle, next to the main application script"), ("include-plugins=", None, "List of plugins to include"), ("force-system-tk", None, "Ensure that Tkinter is linked against Apple's build of Tcl/Tk"), ("report-missing-from-imports", None, "Report the list of missing names for 'from module import name'"), ("no-report-missing-conditional-import", None, "Don't report missing modules when they appear to be conditional imports"), ] boolean_options = [ #"compressed", "xref", "strip", "no-strip", "site-packages", "semi-standalone", "alias", "argv-emulation", #"no-zip", "use-pythonpath", "use-faulthandler", "verbose-interpreter", "no-chdir", "debug-modulegraph", "debug-skip-macholib", "graph", "prefer-ppc", "emulate-shell-environment", "force-system-tk", "report-missing-from-imports", "no-report-missing-conditional-import", ] def initialize_options (self): self.app = None self.plugin = None self.bdist_base = None self.xref = False self.graph = False self.no_zip = 0 self.optimize = 0 if hasattr(sys, 'flags'): self.optimize = sys.flags.optimize self.arch = None self.strip = True self.no_strip = False self.iconfile = None self.extension = None self.alias = 0 self.argv_emulation = 0 self.emulate_shell_environment = 0 self.argv_inject = None self.no_chdir = 0 self.site_packages = False self.use_pythonpath = False self.use_faulthandler = False self.verbose_interpreter = False self.includes = None self.packages = None self.excludes = None self.dylib_excludes = None self.frameworks = None self.resources = None self.datamodels = None self.mappingmodels = None self.plist = None self.compressed = True self.semi_standalone = is_system() self.dist_dir = None self.debug_skip_macholib = False self.debug_modulegraph = False self.prefer_ppc = False self.filters = [] self.eggs = [] self.qt_plugins = None self.matplotlib_backends = None self.extra_scripts = None self.include_plugins = None self.force_system_tk = False self.report_missing_from_imports = False self.no_report_missing_conditional_import = False def finalize_options (self): if not self.strip: self.no_strip = True elif self.no_strip: self.strip = False self.optimize = int(self.optimize) if self.argv_inject and isinstance(self.argv_inject, basestring): self.argv_inject = shlex.split(self.argv_inject) self.includes = set(fancy_split(self.includes)) self.includes.add('encodings.*') if self.use_faulthandler: self.includes.add('faulthandler') #if sys.version_info[:2] >= (3, 2): # self.includes.add('pkgutil') # self.includes.add('imp') self.packages = set(fancy_split(self.packages)) self.excludes = set(fancy_split(self.excludes)) self.excludes.add('readline') # included by apptemplate self.excludes.add('site') if getattr(self.distribution, 'install_requires', None): self.includes.add('pkg_resources') self.eggs = pkg_resources.require(self.distribution.install_requires) # Setuptools/distribute style namespace packages uses # __import__('pkg_resources'), and that import isn't detected at the # moment. Forcefully include pkg_resources. self.includes.add('pkg_resources') dylib_excludes = fancy_split(self.dylib_excludes) self.dylib_excludes = [] for fn in dylib_excludes: try: res = macholib.dyld.framework_find(fn) except ValueError: try: res = macholib.dyld.dyld_find(fn) except ValueError: res = fn self.dylib_excludes.append(res) self.resources = fancy_split(self.resources) frameworks = fancy_split(self.frameworks) self.frameworks = [] for fn in frameworks: try: res = macholib.dyld.framework_find(fn) except ValueError: res = macholib.dyld.dyld_find(fn) while res in self.dylib_excludes: self.dylib_excludes.remove(res) self.frameworks.append(res) if not self.plist: self.plist = {} if isinstance(self.plist, basestring): self.plist = plistlib.Plist.fromFile(self.plist) if isinstance(self.plist, plistlib.Dict): self.plist = dict(self.plist.__dict__) else: self.plist = dict(self.plist) self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('bdist_base', 'bdist_base')) if self.semi_standalone: self.filters.append(not_stdlib_filter) if self.iconfile is None and 'CFBundleIconFile' not in self.plist: # Default is the generic applet icon in the framework iconfile = os.path.join(sys.prefix, 'Resources', 'Python.app', 'Contents', 'Resources', 'PythonApplet.icns') if os.path.exists(iconfile): self.iconfile = iconfile self.runtime_preferences = list(self.get_runtime_preferences()) self.qt_plugins = fancy_split(self.qt_plugins) self.matplotlib_backends = fancy_split(self.matplotlib_backends) self.extra_scripts = fancy_split(self.extra_scripts) self.include_plugins = fancy_split(self.include_plugins) if self.datamodels: print("WARNING: the datamodels option is deprecated, add model files to the list of resources") if self.mappingmodels: print("WARNING: the mappingmodels option is deprecated, add model files to the list of resources") def get_default_plist(self): # XXX - this is all single target stuff plist = {} target = self.targets[0] version = self.distribution.get_version() if version == '0.0.0': try: version = find_version(target.script) except ValueError: pass if not isinstance(version, basestring): raise DistutilsOptionError("Version must be a string") if sys.version_info[0] > 2 and isinstance(version, type('a'.encode('ascii'))): raise DistutilsOptionError("Version must be a string") plist['CFBundleVersion'] = version name = self.distribution.get_name() if name == 'UNKNOWN': base = target.get_dest_base() name = os.path.basename(base) plist['CFBundleName'] = name return plist def get_runtime(self, prefix=None, version=None): # XXX - this is a bit of a hack! # ideally we'd use dylib functions to figure this out if prefix is None: prefix = sys.prefix if version is None: version = sys.version version = version[:3] info = None if os.path.exists(os.path.join(prefix, ".Python")): # We're in a virtualenv environment, locate the real prefix fn = os.path.join(prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt") if os.path.exists(fn): with open(fn, 'rU') as fp: prefix = fp.read().strip() try: fmwk = macholib.dyld.framework_find(prefix) except ValueError: info = None else: info = macholib.dyld.framework_info(fmwk) if info is not None: dylib = info['name'] runtime = os.path.join(info['location'], info['name']) else: dylib = 'libpython%s.dylib' % (sys.version[:3],) runtime = os.path.join(prefix, 'lib', dylib) return dylib, runtime def symlink(self, src, dst): try: os.remove(dst) except OSError: pass os.symlink(src, dst) def get_runtime_preferences(self, prefix=None, version=None): dylib, runtime = self.get_runtime(prefix=prefix, version=version) yield os.path.join('@executable_path', '..', 'Frameworks', dylib) if self.semi_standalone or self.alias: yield runtime def run(self): if get_config_var('PYTHONFRAMEWORK') is None: if not get_config_var('Py_ENABLE_SHARED'): raise DistutilsPlatformError("This python does not have a shared library or framework") else: # Issue .. in py2app's tracker, and issue .. in python's tracker: a unix-style shared # library build did not read the application environment correctly. The collection of # if statements below gives a clean error message when py2app is started, instead of # building a bundle that will give a confusing error message when started. msg = "py2app is not supported for a shared library build with this version of python" if sys.version_info[:2] < (2,7): raise DistutilsPlatformError(msg) elif sys.version_info[:2] == (2,7) and sys.version[3] < 4: raise DistutilsPlatformError(msg) elif sys.version_info[0] == 3 and sys.version_info[1] < 2: raise DistutilsPlatformError(msg) elif sys.version_info[0] == 3 and sys.version_info[1] == 2 and sys.version_info[3] < 3: raise DistutilsPlatformError(msg) elif sys.version_info[0] == 3 and sys.version_info[1] == 3 and sys.version_info[3] < 1: raise DistutilsPlatformError(msg) if hasattr(self.distribution, "install_requires") \ and self.distribution.install_requires: self.distribution.fetch_build_eggs(self.distribution.install_requires) build = self.reinitialize_command('build') build.build_base = self.bdist_base build.run() self.create_directories() self.fixup_distribution() self.initialize_plist() sys_old_path = sys.path[:] extra_paths = [ os.path.dirname(target.script) for target in self.targets ] extra_paths.extend([build.build_platlib, build.build_lib]) self.additional_paths = [ os.path.abspath(p) for p in extra_paths if p is not None ] sys.path[:0] = self.additional_paths # this needs additional_paths self.initialize_prescripts() try: self._run() finally: sys.path = sys_old_path def iter_datamodels(self, resdir): for (path, files) in (normalize_data_file(fn) for fn in (self.datamodels or ())): path = fsencoding(path) for fn in files: fn = fsencoding(fn) basefn, ext = os.path.splitext(fn) if ext != '.xcdatamodel': basefn = fn fn += '.xcdatamodel' destfn = os.path.basename(basefn) + '.mom' yield fn, os.path.join(resdir, path, destfn) def compile_datamodels(self, resdir): for src, dest in self.iter_datamodels(resdir): print("compile datamodel", src, "->", dest) self.mkpath(os.path.dirname(dest)) momc(src, dest) def iter_mappingmodels(self, resdir): for (path, files) in (normalize_data_file(fn) for fn in (self.mappingmodels or ())): path = fsencoding(path) for fn in files: fn = fsencoding(fn) basefn, ext = os.path.splitext(fn) if ext != '.xcmappingmodel': basefn = fn fn += '.xcmappingmodel' destfn = os.path.basename(basefn) + '.cdm' yield fn, os.path.join(resdir, path, destfn) def compile_mappingmodels(self, resdir): for src, dest in self.iter_mappingmodels(resdir): self.mkpath(os.path.dirname(dest)) mapc(src, dest) def iter_extra_plugins(self): for item in self.include_plugins: if isinstance(item, (list, tuple)): subdir, path = item else: ext = os.path.splitext(item)[1] try: subdir = PLUGIN_SUFFIXES[ext] path = item except KeyError: raise DistutilsOptionError("Cannot determine subdirectory for plugin %s"%(item,)) yield path, os.path.join(subdir, os.path.basename(path)) def iter_data_files(self): dist = self.distribution allres = chain(getattr(dist, 'data_files', ()) or (), self.resources) for (path, files) in (normalize_data_file(fn) for fn in allres): path = fsencoding(path) for fn in files: fn = fsencoding(fn) yield fn, os.path.join(path, os.path.basename(fn)) def collect_scripts(self): # these contains file names scripts = set() for target in self.targets: scripts.add(target.script) scripts.update([ k for k in target.prescripts if isinstance(k, basestring) ]) if hasattr(target, 'extra_scripts'): scripts.update(target.extra_scripts) scripts.update(self.extra_scripts) return scripts def get_plist_options(self): result = dict( PyOptions=dict( use_pythonpath=bool(self.use_pythonpath), site_packages=bool(self.site_packages), alias=bool(self.alias), argv_emulation=bool(self.argv_emulation), emulate_shell_environment=bool(self.emulate_shell_environment), no_chdir=bool(self.no_chdir), prefer_ppc=self.prefer_ppc, verbose=self.verbose_interpreter, use_faulthandler=self.use_faulthandler, ), ) if self.optimize: result['PyOptions']['optimize'] = self.optimize return result def initialize_plist(self): plist = self.get_default_plist() for target in self.targets: plist.update(getattr(target, 'plist', {})) plist.update(self.plist) plist.update(self.get_plist_options()) if self.iconfile: iconfile = self.iconfile if not os.path.exists(iconfile): iconfile = iconfile + '.icns' if not os.path.exists(iconfile): raise DistutilsOptionError("icon file must exist: %r" % (self.iconfile,)) self.resources.append(iconfile) plist['CFBundleIconFile'] = os.path.basename(iconfile) if self.prefer_ppc: plist['LSPrefersPPC'] = True self.plist = plist return plist def run_alias(self): self.app_files = [] for target in self.targets: extra_scripts = list(self.extra_scripts) if hasattr(target, 'extra_scripts'): extra_scripts.update(extra_scripts) dst = self.build_alias_executable(target, target.script, extra_scripts) self.app_files.append(dst) for fn in extra_scripts: if fn.endswith('.py'): fn = fn[:-3] elif fn.endswith('.pyw'): fn = fn[:-4] src_fn = script_executable(arch=self.arch, secondary=True) tgt_fn = os.path.join(target.appdir, 'Contents', 'MacOS', os.path.basename(fn)) mergecopy(src_fn, tgt_fn) make_exec(tgt_fn) def collect_recipedict(self): return dict(iterRecipes()) def get_modulefinder(self): if self.debug_modulegraph: debug = 4 else: debug = 0 return find_modules( scripts=self.collect_scripts(), includes=self.includes, packages=self.packages, excludes=self.excludes, debug=debug, ) def collect_filters(self): return [has_filename_filter] + list(self.filters) def process_recipes(self, mf, filters, flatpackages, loader_files): rdict = self.collect_recipedict() while True: for name, check in rdict.items(): rval = check(self, mf) if rval is None: continue # we can pull this off so long as we stop the iter del rdict[name] print('*** using recipe: %s ***' % (name,)) if rval.get('packages'): self.packages.update(rval['packages']) find_needed_modules(mf, packages=rval['packages']) for pkg in rval.get('flatpackages', ()): if isinstance(pkg, basestring): pkg = (os.path.basename(pkg), pkg) flatpackages[pkg[0]] = pkg[1] filters.extend(rval.get('filters', ())) loader_files.extend(rval.get('loader_files', ())) newbootstraps = list(map(self.get_bootstrap, rval.get('prescripts', ()))) if rval.get('includes'): find_needed_modules(mf, includes=rval['includes']) if rval.get('resources'): self.resources.extend(rval['resources']) for fn in newbootstraps: if isinstance(fn, basestring): mf.run_script(fn) for target in self.targets: target.prescripts.extend(newbootstraps) break else: break def _run(self): try: if self.alias: self.run_alias() else: self.run_normal() except: raise # XXX - remove when not debugging # distutils sucks import pdb, sys, traceback traceback.print_exc() pdb.post_mortem(sys.exc_info()[2]) print("Done!") def filter_dependencies(self, mf, filters): print("*** filtering dependencies ***") nodes_seen, nodes_removed, nodes_orphaned = mf.filterStack(filters) print('%d total' % (nodes_seen,)) print('%d filtered' % (nodes_removed,)) print('%d orphaned' % (nodes_orphaned,)) print('%d remaining' % (nodes_seen - nodes_removed,)) def get_appname(self): return self.plist['CFBundleName'] def build_xref(self, mf, flatpackages): for target in self.targets: base = target.get_dest_base() appdir = os.path.join(self.dist_dir, os.path.dirname(base)) appname = self.get_appname() dgraph = os.path.join(appdir, appname + '.html') print("*** creating dependency html: %s ***" % (os.path.basename(dgraph),)) with open(dgraph, 'w') as fp: mf.create_xref(fp) def build_graph(self, mf, flatpackages): for target in self.targets: base = target.get_dest_base() appdir = os.path.join(self.dist_dir, os.path.dirname(base)) appname = self.get_appname() dgraph = os.path.join(appdir, appname + '.dot') print("*** creating dependency graph: %s ***" % (os.path.basename(dgraph),)) with open(dgraph, 'w') as fp: mf.graphreport(fp, flatpackages=flatpackages) def finalize_modulefinder(self, mf): for item in mf.flatten(): if isinstance(item, Package) and item.filename == '-': if sys.version_info[:2] <= (3,3): fn = os.path.join(self.temp_dir, 'empty_package', '__init__.py') if not os.path.exists(fn): dn = os.path.dirname(fn) if not os.path.exists(dn): os.makedirs(dn) with open(fn, 'w') as fp: pass item.filename = fn py_files, extensions = parse_mf_results(mf) # Remove all top-level scripts from the list of python files, # those get treated differently. py_files = [ item for item in py_files if not isinstance(item, Script) ] extensions = list(extensions) return py_files, extensions def collect_packagedirs(self): return list(filter(os.path.exists, [ os.path.join(os.path.realpath(self.get_bootstrap(pkg)), '') for pkg in self.packages ])) def run_normal(self): mf = self.get_modulefinder() filters = self.collect_filters() flatpackages = {} loader_files = [] self.process_recipes(mf, filters, flatpackages, loader_files) if self.debug_modulegraph: import pdb pdb.Pdb().set_trace() self.filter_dependencies(mf, filters) if self.graph: self.build_graph(mf, flatpackages) if self.xref: self.build_xref(mf, flatpackages) py_files, extensions = self.finalize_modulefinder(mf) pkgdirs = self.collect_packagedirs() self.create_binaries(py_files, pkgdirs, extensions, loader_files) missing = [] syntax_error = [] invalid_bytecode = [] for module in mf.nodes(): if isinstance(module, modulegraph.MissingModule): if module.identifier != '__main__': missing.append(module) elif isinstance(module, modulegraph.InvalidSourceModule): syntax_error.append(module) elif hasattr(modulegraph, 'InvalidCompiledModule') and isinstance(module, modulegraph.InvalidCompiledModule): invalid_bytecode.append(module) if missing: missing_unconditional = collections.defaultdict(set) missing_fromimport = collections.defaultdict(set) missing_fromimport_conditional = collections.defaultdict(set) missing_conditional = collections.defaultdict(set) for module in sorted(missing): for m in mf.getReferers(module): if m is None: continue # XXX try: ed = mf.edgeData(m, module) except KeyError: ed = None if hasattr(modulegraph, 'DependencyInfo') and isinstance(ed, modulegraph.DependencyInfo): c = missing_unconditional if ed.conditional or ed.function: if ed.fromlist: c = missing_fromimport_conditional else: c = missing_conditional elif ed.fromlist: c = missing_fromimport c[module.identifier].add(m.identifier) else: missing_unconditional[module.identifier].add(m.identifier) if missing_unconditional: log.warn("Modules not found (unconditional imports):") for m in sorted(missing_unconditional): log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_unconditional[m])))) log.warn("") if missing_conditional and not self.no_report_missing_conditional_import: log.warn("Modules not found (conditional imports):") for m in sorted(missing_conditional): log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_conditional[m])))) log.warn("") if self.report_missing_from_imports and ( missing_fromimport or ( not self.no_report_missing_conditional_import and missing_fromimport_conditional)): log.warn("Modules not found ('from ... import y'):") for m in sorted(missing_fromimport): log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport[m])))) if not self.no_report_missing_conditional_import and missing_fromimport_conditional: log.warn("") log.warn("Conditional:") for m in sorted(missing_fromimport_conditional): log.warn(" * %s (%s)" % (m, ", ".join(sorted(missing_fromimport_conditional[m])))) log.warn("") if syntax_error: log.warn("Modules with syntax errors:") for module in sorted(syntax_error): log.warn(" * %s"%(module.identifier)) log.warn("") if invalid_bytecode: log.warn("Modules with invalid bytecode:") for module in sorted(invalid_bytecode): log.warn(" * %s"%(module.identifier)) log.warn("") def create_directories(self): bdist_base = self.bdist_base if self.semi_standalone: self.bdist_dir = os.path.join(bdist_base, 'python%s-semi_standalone' % (sys.version[:3],), 'app') else: self.bdist_dir = os.path.join(bdist_base, 'python%s-standalone' % (sys.version[:3],), 'app') if os.path.exists(self.bdist_dir): shutil.rmtree(self.bdist_dir) self.collect_dir = os.path.abspath( os.path.join(self.bdist_dir, "collect")) self.mkpath(self.collect_dir) self.temp_dir = os.path.abspath(os.path.join(self.bdist_dir, "temp")) self.mkpath(self.temp_dir) self.dist_dir = os.path.abspath(self.dist_dir) self.mkpath(self.dist_dir) self.lib_dir = os.path.join(self.bdist_dir, os.path.dirname(get_zipfile(self.distribution, self.semi_standalone))) self.mkpath(self.lib_dir) self.ext_dir = os.path.join(self.lib_dir, 'lib-dynload') self.mkpath(self.ext_dir) self.framework_dir = os.path.join(self.bdist_dir, 'Frameworks') self.mkpath(self.framework_dir) def create_binaries(self, py_files, pkgdirs, extensions, loader_files): print("*** create binaries ***") dist = self.distribution pkgexts = [] copyexts = [] extmap = {} def packagefilter(mod, pkgdirs=pkgdirs): fn = os.path.realpath(getattr(mod, 'filename', None)) if fn is None: return None for pkgdir in pkgdirs: if fn.startswith(pkgdir): return None return fn if pkgdirs: py_files = list(filter(packagefilter, py_files)) for ext in extensions: fn = packagefilter(ext) if fn is None: fn = os.path.realpath(getattr(ext, 'filename', None)) pkgexts.append(ext) else: if '.' in ext.identifier: py_files.append(self.create_loader(ext)) copyexts.append(ext) extmap[fn] = ext # byte compile the python modules into the target directory print("*** byte compile python files ***") byte_compile(py_files, target_dir=self.collect_dir, optimize=self.optimize, force=self.force, verbose=self.verbose, dry_run=self.dry_run) for item in py_files: if not isinstance(item, Package): continue self.copy_package_data(item, self.collect_dir) self.lib_files = [] self.app_files = [] # create the shared zipfile containing all Python modules archive_name = os.path.join(self.lib_dir, get_zipfile(dist, self.semi_standalone)) for path, files in loader_files: dest = os.path.join(self.collect_dir, path) self.mkpath(dest) for fn in files: destfn = os.path.join(dest, os.path.basename(fn)) if os.path.isdir(fn): self.copy_tree(fn, destfn, preserve_symlinks=False) else: self.copy_file(fn, destfn) arcname = self.make_lib_archive(archive_name, base_dir=self.collect_dir, verbose=self.verbose, dry_run=self.dry_run) # XXX: this doesn't work with python3 #self.lib_files.append(arcname) # build the executables for target in self.targets: extra_scripts = list(self.extra_scripts) if hasattr(target, 'extra_scripts'): extra_scripts.extend(target.extra_scripts) dst = self.build_executable( target, arcname, pkgexts, copyexts, target.script, extra_scripts) exp = os.path.join(dst, 'Contents', 'MacOS') execdst = os.path.join(exp, 'python') if self.semi_standalone: self.symlink(sys.executable, execdst) else: if os.path.exists(os.path.join(sys.prefix, ".Python")): fn = os.path.join(sys.prefix, "lib", "python%d.%d"%(sys.version_info[:2]), "orig-prefix.txt") if os.path.exists(fn): with open(fn, 'rU') as fp: prefix = fp.read().strip() rest_path = os.path.normpath(sys.executable)[len(os.path.normpath(sys.prefix))+1:] if rest_path.startswith('.'): rest_path = rest_path[1:] if PYTHONFRAMEWORK: # When we're using a python framework bin/python refers to a stub executable # that we don't want use, we need the executable in Resources/Python.app dpath = os.path.join(prefix, 'Resources', 'Python.app', 'Contents', 'MacOS') self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst) else: self.copy_file(os.path.join(prefix, rest_path), execdst) else: if PYTHONFRAMEWORK: # When we're using a python framework bin/python refers to a stub executable # that we don't want use, we need the executable in Resources/Python.app dpath = os.path.join(sys.prefix, 'Resources', 'Python.app', 'Contents', 'MacOS') self.copy_file(os.path.join(dpath, PYTHONFRAMEWORK), execdst) else: self.copy_file(sys.executable, execdst) if not self.debug_skip_macholib: if self.force_system_tk: print("force system tk") resdir = os.path.join(dst, 'Contents', 'Resources') pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2])) ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir)) tkinter_path = os.path.join(ext_dir, '_tkinter.so') if os.path.exists(tkinter_path): rewrite_tkinter_load_commands(tkinter_path) else: print("tkinter not found at", tkinter_path) mm = PythonStandalone(self, dst, executable_path=exp) dylib, runtime = self.get_runtime() if self.semi_standalone: mm.excludes.append(runtime) else: mm.mm.run_file(runtime) for exclude in self.dylib_excludes: info = macholib.dyld.framework_info(exclude) if info is not None: exclude = os.path.join( info['location'], info['shortname'] + '.framework') mm.excludes.append(exclude) for fmwk in self.frameworks: mm.mm.run_file(fmwk) platfiles = mm.run() if self.strip: platfiles = self.strip_dsym(platfiles) self.strip_files(platfiles) self.app_files.append(dst) def copy_package_data(self, package, target_dir): """ Copy any package data in a python package into the target_dir. This is a bit of a hack, it would be better to identify python eggs and copy those in whole. """ exts = [ i[0] for i in imp.get_suffixes() ] exts.append('.py') exts.append('.pyc') exts.append('.pyo') def datafilter(item): for e in exts: if item.endswith(e): return False return True target_dir = os.path.join(target_dir, *(package.identifier.split('.'))) for dname in package.packagepath: filenames = list(filter(datafilter, zipio.listdir(dname))) for fname in filenames: if fname in ('.svn', 'CVS', '.hg', '.git'): # Scrub revision manager junk continue if fname in ('__pycache__',): # Ignore PEP 3147 bytecode cache continue if fname.startswith('.') and fname.endswith('.swp'): # Ignore vim(1) temporary files continue if fname.endswith('~') or fname.endswith('.orig'): # Ignore backup files for common tools (hg, emacs, ...) continue pth = os.path.join(dname, fname) # Check if we have found a package, exclude those if zipio.isdir(pth): # XXX: the 'and not' part is wrong, need to fix zipio.isdir for p in zipio.listdir(pth): if p.startswith('__init__.') and p[8:] in exts: break else: if os.path.isfile(pth): # Avoid extracting a resource file that happens # to be zipfile. # XXX: Need API in zipio for nicer code. copy_file(pth, os.path.join(target_dir, fname)) else: copy_tree(pth, os.path.join(target_dir, fname)) continue elif zipio.isdir(pth) and ( zipio.isfile(os.path.join(pth, '__init__.py')) or zipio.isfile(os.path.join(pth, '__init__.pyc')) or zipio.isfile(os.path.join(pth, '__init__.pyo'))): # Subdirectory is a python package, these will get included later on # when the subpackage itself is included, ignore for now. pass else: copy_file(pth, os.path.join(target_dir, fname)) def strip_dsym(self, platfiles): """ Remove .dSYM directories in the bundled application """ # # .dSYM directories are contain detached debugging information and # should be completely removed when the "strip" option is specified. # if self.dry_run: return platfiles for dirpath, dnames, fnames in os.walk(self.appdir): for nm in list(dnames): if nm.endswith('.dSYM'): print("removing debug info: %s/%s"%(dirpath, nm)) shutil.rmtree(os.path.join(dirpath, nm)) dnames.remove(nm) return [file for file in platfiles if '.dSYM' not in file] def strip_files(self, files): unstripped = 0 stripfiles = [] for fn in files: unstripped += os.stat(fn).st_size stripfiles.append(fn) log.info('stripping %s', os.path.basename(fn)) strip_files(stripfiles, dry_run=self.dry_run, verbose=self.verbose) stripped = 0 for fn in stripfiles: stripped += os.stat(fn).st_size log.info('stripping saved %d bytes (%d / %d)', unstripped - stripped, stripped, unstripped) def copy_dylib(self, src, dst): # will be copied from the framework? if src != sys.executable: force, self.force = self.force, True self.copy_file(src, dst) self.force = force return dst def copy_versioned_framework(self, info, dst): # XXX - Boy is this ugly, but it makes sense because the developer # could have both Python 2.3 and 2.4, or Tk 8.4 and 8.5, etc. # Saves a good deal of space, and I'm pretty sure this ugly # hack is correct in the general case. version = info['version'] if version is None: return self.raw_copy_framework(info, dst) short = info['shortname'] + '.framework' infile = os.path.join(info['location'], short) outfile = os.path.join(dst, short) vsplit = os.path.join(infile, 'Versions').split(os.sep) def condition(src, vsplit=vsplit, version=version): srcsplit = src.split(os.sep) if ( len(srcsplit) > len(vsplit) and srcsplit[:len(vsplit)] == vsplit and srcsplit[len(vsplit)] != version and not os.path.islink(src) ): return False # Skip Headers, .svn, and CVS dirs return framework_copy_condition(src) return self.copy_tree(infile, outfile, preserve_symlinks=True, condition=condition) def copy_framework(self, info, dst): force, self.force = self.force, True if info['shortname'] == PYTHONFRAMEWORK: self.copy_python_framework(info, dst) else: self.copy_versioned_framework(info, dst) self.force = force return os.path.join(dst, info['name']) def raw_copy_framework(self, info, dst): short = info['shortname'] + '.framework' infile = os.path.join(info['location'], short) outfile = os.path.join(dst, short) return self.copy_tree(infile, outfile, preserve_symlinks=True, condition=framework_copy_condition) def copy_python_framework(self, info, dst): # XXX - In this particular case we know exactly what we can # get away with.. should this be extended to the general # case? Per-framework recipes? includedir = get_config_var('CONFINCLUDEPY') configdir = get_config_var('LIBPL') if includedir is None: includedir = 'python%d.%d'%(sys.version_info[:2]) else: includedir = os.path.basename(includedir) if configdir is None: configdir = 'config' else: configdir = os.path.basename(configdir) indir = os.path.dirname(os.path.join(info['location'], info['name'])) outdir = os.path.dirname(os.path.join(dst, info['name'])) self.mkpath(os.path.join(outdir, 'Resources')) pydir = 'python%s.%s'%(sys.version_info[:2]) # Create a symlink "for Python.frameworks/Versions/Current". This # is required for the Mac App-store. os.symlink( os.path.basename(outdir), os.path.join(os.path.dirname(outdir), "Current")) # Likewise for two links in the root of the framework: os.symlink( 'Versions/Current/Resources', os.path.join(os.path.dirname(os.path.dirname(outdir)), 'Resources')) os.symlink( os.path.join('Versions/Current', PYTHONFRAMEWORK), os.path.join(os.path.dirname(os.path.dirname(outdir)), PYTHONFRAMEWORK)) # Experiment for issue 57 if not os.path.exists(os.path.join(indir, 'include')): alt = os.path.join(indir, 'Versions/Current') if os.path.exists(os.path.join(alt, 'include')): indir = alt # distutils looks for some files relative to sys.executable, which # means they have to be in the framework... self.mkpath(os.path.join(outdir, 'include')) self.mkpath(os.path.join(outdir, 'include', includedir)) self.mkpath(os.path.join(outdir, 'lib')) self.mkpath(os.path.join(outdir, 'lib', pydir)) self.mkpath(os.path.join(outdir, 'lib', pydir, configdir)) fmwkfiles = [ os.path.basename(info['name']), 'Resources/Info.plist', 'include/%s/pyconfig.h'%(includedir), ] if '_sysconfigdata' not in sys.modules: fmwkfiles.append( 'lib/%s/%s/Makefile'%(pydir, configdir) ) for fn in fmwkfiles: self.copy_file( os.path.join(indir, fn), os.path.join(outdir, fn)) def fixup_distribution(self): dist = self.distribution # Trying to obtain app and plugin from dist for backward compatibility # reasons. app = dist.app plugin = dist.plugin # If we can get suitable values from self.app and self.plugin, we prefer # them. if self.app is not None or self.plugin is not None: app = self.app plugin = self.plugin # Convert our args into target objects. dist.app = FixupTargets(app, "script") dist.plugin = FixupTargets(plugin, "script") if dist.app and dist.plugin: # XXX - support apps and plugins? raise DistutilsOptionError( "You must specify either app or plugin, not both") elif dist.app: self.style = 'app' self.targets = dist.app elif dist.plugin: self.style = 'plugin' self.targets = dist.plugin else: raise DistutilsOptionError( "You must specify either app or plugin") if len(self.targets) != 1: # XXX - support multiple targets? raise DistutilsOptionError( "Multiple targets not currently supported") if not self.extension: self.extension = '.' + self.style # make sure all targets use the same directory, this is # also the directory where the pythonXX.dylib must reside paths = set() for target in self.targets: paths.add(os.path.dirname(target.get_dest_base())) if len(paths) > 1: raise DistutilsOptionError( "all targets must use the same directory: %s" % ([p for p in paths],)) if paths: app_dir = paths.pop() # the only element if os.path.isabs(app_dir): raise DistutilsOptionError( "app directory must be relative: %s" % (app_dir,)) self.app_dir = os.path.join(self.dist_dir, app_dir) self.mkpath(self.app_dir) else: # Do we allow to specify no targets? # We can at least build a zipfile... self.app_dir = self.lib_dir def initialize_prescripts(self): prescripts = [] prescripts.append('reset_sys_path') if self.semi_standalone: prescripts.append('semi_standalone_path') if 0 and sys.version_info[:2] >= (3, 2) and not self.alias: # Python 3.2 or later requires a more complicated # bootstrap prescripts.append('import_encodings') if os.path.exists(os.path.join(sys.prefix, ".Python")): # We're in a virtualenv, which means sys.path # will be broken in alias builds unless we fix # it. if self.alias or self.semi_standalone: prescripts.append("virtualenv") prescripts.append(StringIO('_fixup_virtualenv(%r)' % (sys.real_prefix,))) if self.site_packages or self.alias: import site global_site_packages = not os.path.exists( os.path.join(os.path.dirname(site.__file__), 'no-global-site-packages.txt')) prescripts.append('virtualenv_site_packages') prescripts.append(StringIO('_site_packages(%r, %r, %d)' % ( sys.prefix, sys.real_prefix, global_site_packages))) elif self.site_packages or self.alias: prescripts.append('site_packages') if is_system(): prescripts.append('system_path_extras') #if self.style == 'app': # prescripts.append('setup_pkgresource') included_subpkg = [pkg for pkg in self.packages if '.' in pkg] if included_subpkg: prescripts.append('setup_included_subpackages') prescripts.append(StringIO('_path_hooks = %r'%( included_subpkg))) if self.emulate_shell_environment: prescripts.append('emulate_shell_environment') if self.argv_emulation and self.style == 'app': prescripts.append('argv_emulation') if 'CFBundleDocumentTypes' not in self.plist: self.plist['CFBundleDocumentTypes'] = [ { 'CFBundleTypeOSTypes' : [ '****', 'fold', 'disk', ], 'CFBundleTypeRole': 'Viewer' }, ] if self.argv_inject is not None: prescripts.append('argv_inject') prescripts.append( StringIO('_argv_inject(%r)\n' % (self.argv_inject,))) if self.style == 'app' and not self.no_chdir: prescripts.append('chdir_resource') if not self.alias: prescripts.append('disable_linecache') prescripts.append('boot_' + self.style) else: # Add ctypes prescript because it is needed to # find libraries in the bundle, but we don't run # recipes and hence the ctypes recipe is not used # for alias builds. prescripts.append('ctypes_setup') if self.additional_paths: prescripts.append('path_inject') prescripts.append( StringIO('_path_inject(%r)\n' % (self.additional_paths,))) prescripts.append('boot_alias' + self.style) newprescripts = [] for s in prescripts: if isinstance(s, basestring): newprescripts.append( self.get_bootstrap('py2app.bootstrap.' + s)) else: newprescripts.append(s) for target in self.targets: prescripts = getattr(target, 'prescripts', []) target.prescripts = newprescripts + prescripts def get_bootstrap(self, bootstrap): if isinstance(bootstrap, basestring): if not os.path.exists(bootstrap): bootstrap = imp_find_module(bootstrap)[1] return bootstrap def get_bootstrap_data(self, bootstrap): bootstrap = self.get_bootstrap(bootstrap) if not isinstance(bootstrap, basestring): return bootstrap.getvalue() else: with open(bootstrap, 'rU') as fp: return fp.read() def create_pluginbundle(self, target, script, use_runtime_preference=True): base = target.get_dest_base() appdir = os.path.join(self.dist_dir, os.path.dirname(base)) appname = self.get_appname() print("*** creating plugin bundle: %s ***" % (appname,)) if self.runtime_preferences and use_runtime_preference: self.plist.setdefault( 'PyRuntimeLocations', self.runtime_preferences) appdir, plist = create_pluginbundle( appdir, appname, plist=self.plist, extension=self.extension, arch=self.arch, ) appdir = fsencoding(appdir) resdir = os.path.join(appdir, 'Contents', 'Resources') return appdir, resdir, plist def create_appbundle(self, target, script, use_runtime_preference=True): base = target.get_dest_base() appdir = os.path.join(self.dist_dir, os.path.dirname(base)) appname = self.get_appname() print("*** creating application bundle: %s ***" % (appname,)) if self.runtime_preferences and use_runtime_preference: self.plist.setdefault( 'PyRuntimeLocations', self.runtime_preferences) pythonInfo = self.plist.setdefault('PythonInfoDict', {}) py2appInfo = pythonInfo.setdefault('py2app', {}).update(dict( alias=bool(self.alias), )) appdir, plist = create_appbundle( appdir, appname, plist=self.plist, extension=self.extension, arch=self.arch, ) appdir = fsencoding(appdir) resdir = os.path.join(appdir, 'Contents', 'Resources') return appdir, resdir, plist def create_bundle(self, target, script, use_runtime_preference=True): fn = getattr(self, 'create_%sbundle' % (self.style,)) return fn( target, script, use_runtime_preference=use_runtime_preference ) def iter_frameworks(self): for fn in self.frameworks: fmwk = macholib.dyld.framework_info(fn) if fmwk is None: yield fn else: basename = fmwk['shortname'] + '.framework' yield os.path.join(fmwk['location'], basename) def build_alias_executable(self, target, script, extra_scripts): # Build an alias executable for the target appdir, resdir, plist = self.create_bundle(target, script) # symlink python executable execdst = os.path.join(appdir, 'Contents', 'MacOS', 'python') prefixPathExecutable = os.path.join(sys.prefix, 'bin', 'python') if os.path.exists(prefixPathExecutable): pyExecutable = prefixPathExecutable else: pyExecutable = sys.executable self.symlink(pyExecutable, execdst) # make PYTHONHOME pyhome = os.path.join(resdir, 'lib', 'python' + sys.version[:3]) realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3]) makedirs(pyhome) if self.optimize: self.symlink('../../site.pyo', os.path.join(pyhome, 'site.pyo')) else: self.symlink('../../site.pyc', os.path.join(pyhome, 'site.pyc')) self.symlink( os.path.join(realhome, 'config'), os.path.join(pyhome, 'config')) # symlink data files # XXX: fixme: need to integrate automatic data conversion for src, dest in self.iter_data_files(): dest = os.path.join(resdir, dest) if src == dest: continue makedirs(os.path.dirname(dest)) try: copy_resource(src, dest, dry_run=self.dry_run, symlink=1) except: import traceback traceback.print_exc() raise plugindir = os.path.join(appdir, 'Contents', 'Library') for src, dest in self.iter_extra_plugins(): dest = os.path.join(plugindir, dest) if src == dest: continue makedirs(os.path.dirname(dest)) try: copy_resource(src, dest, dry_run=self.dry_run) except: import traceback traceback.print_exc() raise # symlink frameworks for src in self.iter_frameworks(): dest = os.path.join( appdir, 'Contents', 'Frameworks', os.path.basename(src)) if src == dest: continue makedirs(os.path.dirname(dest)) self.symlink(os.path.abspath(src), dest) self.compile_datamodels(resdir) self.compile_mappingmodels(resdir) bootfn = '__boot__' bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w') for fn in target.prescripts: bootfile.write(self.get_bootstrap_data(fn)) bootfile.write('\n\n') bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.realpath(script),)) script_map = {} for fn in extra_scripts: tgt = os.path.realpath(fn) fn = os.path.basename(fn) if fn.endswith('.py'): script_map[fn[:-3]] = tgt elif fn.endswith('.py'): script_map[fn[:-4]] = tgt else: script_map[fn] = tgt bootfile.write("SCRIPT_MAP=%r\n"%(script_map,)) bootfile.write('try:\n') bootfile.write(' _run()\n') bootfile.write('except KeyboardInterrupt:\n') bootfile.write(' pass\n') bootfile.close() target.appdir = appdir return appdir def build_executable(self, target, arcname, pkgexts, copyexts, script, extra_scripts): # Build an executable for the target appdir, resdir, plist = self.create_bundle(target, script) self.appdir = appdir self.resdir = resdir self.plist = plist for fn in extra_scripts: if fn.endswith('.py'): fn = fn[:-3] elif fn.endswith('.pyw'): fn = fn[:-4] src_fn = script_executable(arch=self.arch, secondary=True) tgt_fn = os.path.join(self.appdir, 'Contents', 'MacOS', os.path.basename(fn)) mergecopy(src_fn, tgt_fn) make_exec(tgt_fn) site_path = os.path.join(resdir, 'site.py') byte_compile([ SourceModule('site', site_path), ], target_dir=resdir, optimize=self.optimize, force=self.force, verbose=self.verbose, dry_run=self.dry_run) if not self.dry_run: os.unlink(site_path) includedir = get_config_var('CONFINCLUDEPY') configdir = get_config_var('LIBPL') if includedir is None: includedir = 'python%d.%d'%(sys.version_info[:2]) else: includedir = os.path.basename(includedir) if configdir is None: configdir = 'config' else: configdir = os.path.basename(configdir) self.compile_datamodels(resdir) self.compile_mappingmodels(resdir) bootfn = '__boot__' bootfile = open(os.path.join(resdir, bootfn + '.py'), 'w') for fn in target.prescripts: bootfile.write(self.get_bootstrap_data(fn)) bootfile.write('\n\n') bootfile.write("DEFAULT_SCRIPT=%r\n"%(os.path.basename(script),)) script_map = {} for fn in extra_scripts: fn = os.path.basename(fn) if fn.endswith('.py'): script_map[fn[:-3]] = fn elif fn.endswith('.py'): script_map[fn[:-4]] = fn else: script_map[fn] = fn bootfile.write("SCRIPT_MAP=%r\n"%(script_map,)) bootfile.write('_run()\n') bootfile.close() self.copy_file(script, resdir) for fn in extra_scripts: self.copy_file(fn, resdir) pydir = os.path.join(resdir, 'lib', 'python%s.%s'%(sys.version_info[:2])) if sys.version_info[0] == 2 or self.semi_standalone: arcdir = os.path.join(resdir, 'lib', 'python' + sys.version[:3]) else: arcdir = os.path.join(resdir, 'lib') realhome = os.path.join(sys.prefix, 'lib', 'python' + sys.version[:3]) self.mkpath(pydir) # The site.py file needs to be a two locations # 1) in lib/pythonX.Y, to be found during normal startup and # by the 'python' executable # 2) in the resources directory next to the script for # semistandalone builds (the lib/pythonX.Y directory is too # late on sys.path to be found in that case). # if self.optimize: self.symlink('../../site.pyo', os.path.join(pydir, 'site.pyo')) else: self.symlink('../../site.pyc', os.path.join(pydir, 'site.pyc')) cfgdir = os.path.join(pydir, configdir) realcfg = os.path.join(realhome, configdir) real_include = os.path.join(sys.prefix, 'include') if self.semi_standalone: self.symlink(realcfg, cfgdir) self.symlink(real_include, os.path.join(resdir, 'include')) else: self.mkpath(cfgdir) if '_sysconfigdata' not in sys.modules: # Recent enough versions of Python 2.7 and 3.x have # an _sysconfigdata module and don't need the Makefile # to provide the sysconfig data interface. Don't copy # them. for fn in 'Makefile', 'Setup', 'Setup.local', 'Setup.config': rfn = os.path.join(realcfg, fn) if os.path.exists(rfn): self.copy_file(rfn, os.path.join(cfgdir, fn)) inc_dir = os.path.join(resdir, 'include', includedir) self.mkpath(inc_dir) self.copy_file(get_config_h_filename(), os.path.join(inc_dir, 'pyconfig.h')) self.copy_file(arcname, arcdir) if sys.version_info[0] != 2: import zlib self.copy_file(zlib.__file__, os.path.dirname(arcdir)) ext_dir = os.path.join(pydir, os.path.basename(self.ext_dir)) self.copy_tree(self.ext_dir, ext_dir, preserve_symlinks=True) self.copy_tree(self.framework_dir, os.path.join(appdir, 'Contents', 'Frameworks'), preserve_symlinks=True) for pkg_name in self.packages: pkg = self.get_bootstrap(pkg_name) print('XXXX', pkg_name, pkg) if self.semi_standalone: # For semi-standalone builds don't copy packages # from the stdlib into the app bundle, even when # they are mentioned in self.packages. p = Package(pkg_name, pkg) if not not_stdlib_filter(p): continue dst = os.path.join(pydir, pkg_name) self.mkpath(dst) self.copy_tree(pkg, dst) # FIXME: The python files should be bytecompiled # here (see issue 101) for copyext in copyexts: fn = os.path.join(ext_dir, (copyext.identifier.replace('.', os.sep) + os.path.splitext(copyext.filename)[1]) ) self.mkpath(os.path.dirname(fn)) copy_file(copyext.filename, fn, dry_run=self.dry_run) for src, dest in self.iter_data_files(): dest = os.path.join(resdir, dest) if src == dest: continue makedirs(os.path.dirname(dest)) copy_resource(src, dest, dry_run=self.dry_run) plugindir = os.path.join(appdir, 'Contents', 'Library') for src, dest in self.iter_extra_plugins(): dest = os.path.join(plugindir, dest) if src == dest: continue makedirs(os.path.dirname(dest)) copy_resource(src, dest, dry_run=self.dry_run) target.appdir = appdir return appdir def create_loader(self, item): # Hm, how to avoid needless recreation of this file? slashname = item.identifier.replace('.', os.sep) pathname = os.path.join(self.temp_dir, "%s.py" % slashname) if os.path.exists(pathname): if self.verbose: print("skipping python loader for extension %r" % (item.identifier,)) else: self.mkpath(os.path.dirname(pathname)) # and what about dry_run? if self.verbose: print("creating python loader for extension %r" % (item.identifier,)) fname = slashname + os.path.splitext(item.filename)[1] source = make_loader(fname) if not self.dry_run: with open(pathname, "w") as fp: fp.write(source) else: return return SourceModule(item.identifier, pathname) def make_lib_archive(self, zip_filename, base_dir, verbose=0, dry_run=0): # Like distutils "make_archive", except we can specify the # compression to use - default is ZIP_STORED to keep the # runtime performance up. # Also, we don't append '.zip' to the filename. from distutils.dir_util import mkpath mkpath(os.path.dirname(zip_filename), dry_run=dry_run) if self.compressed: compression = zipfile.ZIP_DEFLATED else: compression = zipfile.ZIP_STORED if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=compression) save_cwd = os.getcwd() os.chdir(base_dir) for dirpath, dirnames, filenames in os.walk('.'): if filenames: # Ensure that there are directory entries for # all directories in the zipfile. This is a # workaround for <http://bugs.python.org/issue14905>: # zipimport won't consider 'pkg/foo.py' to be in # namespace package 'pkg' unless there is an # entry for the directory (or there is a # pkg/__init__.py file as well) z.write(dirpath, dirpath) for fn in filenames: path = os.path.normpath(os.path.join(dirpath, fn)) if os.path.isfile(path): z.write(path, path) os.chdir(save_cwd) z.close() return zip_filename def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1, condition=None): """Copy an entire directory tree respecting verbose, dry-run, and force flags. This version doesn't bork on existing symlinks """ return copy_tree( infile, outfile, preserve_mode,preserve_times,preserve_symlinks, not self.force, dry_run=self.dry_run, condition=condition)
apache-2.0
jjhelmus/scipy
scipy/signal/filter_design.py
14
135076
"""Filter design. """ from __future__ import division, print_function, absolute_import import warnings import math import numpy import numpy as np from numpy import (atleast_1d, poly, polyval, roots, real, asarray, resize, pi, absolute, logspace, r_, sqrt, tan, log10, arctan, arcsinh, sin, exp, cosh, arccosh, ceil, conjugate, zeros, sinh, append, concatenate, prod, ones, array, mintypecode) from numpy.polynomial.polynomial import polyval as npp_polyval from scipy import special, optimize from scipy.special import comb, factorial from scipy._lib._numpy_compat import polyvalfromroots __all__ = ['findfreqs', 'freqs', 'freqz', 'tf2zpk', 'zpk2tf', 'normalize', 'lp2lp', 'lp2hp', 'lp2bp', 'lp2bs', 'bilinear', 'iirdesign', 'iirfilter', 'butter', 'cheby1', 'cheby2', 'ellip', 'bessel', 'band_stop_obj', 'buttord', 'cheb1ord', 'cheb2ord', 'ellipord', 'buttap', 'cheb1ap', 'cheb2ap', 'ellipap', 'besselap', 'BadCoefficients', 'freqs_zpk', 'freqz_zpk', 'tf2sos', 'sos2tf', 'zpk2sos', 'sos2zpk', 'group_delay', 'sosfreqz', 'iirnotch', 'iirpeak'] class BadCoefficients(UserWarning): """Warning about badly conditioned filter coefficients""" pass abs = absolute def findfreqs(num, den, N, kind='ba'): """ Find array of frequencies for computing the response of an analog filter. Parameters ---------- num, den : array_like, 1-D The polynomial coefficients of the numerator and denominator of the transfer function of the filter or LTI system, where the coefficients are ordered from highest to lowest degree. Or, the roots of the transfer function numerator and denominator (i.e. zeroes and poles). N : int The length of the array to be computed. kind : str {'ba', 'zp'}, optional Specifies whether the numerator and denominator are specified by their polynomial coefficients ('ba'), or their roots ('zp'). Returns ------- w : (N,) ndarray A 1-D array of frequencies, logarithmically spaced. Examples -------- Find a set of nine frequencies that span the "interesting part" of the frequency response for the filter with the transfer function H(s) = s / (s^2 + 8s + 25) >>> from scipy import signal >>> signal.findfreqs([1, 0], [1, 8, 25], N=9) array([ 1.00000000e-02, 3.16227766e-02, 1.00000000e-01, 3.16227766e-01, 1.00000000e+00, 3.16227766e+00, 1.00000000e+01, 3.16227766e+01, 1.00000000e+02]) """ if kind == 'ba': ep = atleast_1d(roots(den)) + 0j tz = atleast_1d(roots(num)) + 0j elif kind == 'zp': ep = atleast_1d(den) + 0j tz = atleast_1d(num) + 0j else: raise ValueError("input must be one of {'ba', 'zp'}") if len(ep) == 0: ep = atleast_1d(-1000) + 0j ez = r_['-1', numpy.compress(ep.imag >= 0, ep, axis=-1), numpy.compress((abs(tz) < 1e5) & (tz.imag >= 0), tz, axis=-1)] integ = abs(ez) < 1e-10 hfreq = numpy.around(numpy.log10(numpy.max(3 * abs(ez.real + integ) + 1.5 * ez.imag)) + 0.5) lfreq = numpy.around(numpy.log10(0.1 * numpy.min(abs(real(ez + integ)) + 2 * ez.imag)) - 0.5) w = logspace(lfreq, hfreq, N) return w def freqs(b, a, worN=None, plot=None): """ Compute frequency response of analog filter. Given the M-order numerator `b` and N-order denominator `a` of an analog filter, compute its frequency response:: b[0]*(jw)**M + b[1]*(jw)**(M-1) + ... + b[M] H(w) = ---------------------------------------------- a[0]*(jw)**N + a[1]*(jw)**(N-1) + ... + a[N] Parameters ---------- b : array_like Numerator of a linear filter. a : array_like Denominator of a linear filter. worN : {None, int, array_like}, optional If None, then compute at 200 frequencies around the interesting parts of the response curve (determined by pole-zero locations). If a single integer, then compute at that many frequencies. Otherwise, compute the response at the angular frequencies (e.g. rad/s) given in `worN`. plot : callable, optional A callable that takes two arguments. If given, the return parameters `w` and `h` are passed to plot. Useful for plotting the frequency response inside `freqs`. Returns ------- w : ndarray The angular frequencies at which `h` was computed. h : ndarray The frequency response. See Also -------- freqz : Compute the frequency response of a digital filter. Notes ----- Using Matplotlib's "plot" function as the callable for `plot` produces unexpected results, this plots the real part of the complex transfer function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``. Examples -------- >>> from scipy.signal import freqs, iirfilter >>> b, a = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1') >>> w, h = freqs(b, a, worN=np.logspace(-1, 2, 1000)) >>> import matplotlib.pyplot as plt >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.xlabel('Frequency') >>> plt.ylabel('Amplitude response [dB]') >>> plt.grid() >>> plt.show() """ if worN is None: w = findfreqs(b, a, 200) elif isinstance(worN, int): N = worN w = findfreqs(b, a, N) else: w = worN w = atleast_1d(w) s = 1j * w h = polyval(b, s) / polyval(a, s) if plot is not None: plot(w, h) return w, h def freqs_zpk(z, p, k, worN=None): """ Compute frequency response of analog filter. Given the zeros `z`, poles `p`, and gain `k` of a filter, compute its frequency response:: (jw-z[0]) * (jw-z[1]) * ... * (jw-z[-1]) H(w) = k * ---------------------------------------- (jw-p[0]) * (jw-p[1]) * ... * (jw-p[-1]) Parameters ---------- z : array_like Zeroes of a linear filter p : array_like Poles of a linear filter k : scalar Gain of a linear filter worN : {None, int, array_like}, optional If None, then compute at 200 frequencies around the interesting parts of the response curve (determined by pole-zero locations). If a single integer, then compute at that many frequencies. Otherwise, compute the response at the angular frequencies (e.g. rad/s) given in `worN`. Returns ------- w : ndarray The angular frequencies at which `h` was computed. h : ndarray The frequency response. See Also -------- freqs : Compute the frequency response of an analog filter in TF form freqz : Compute the frequency response of a digital filter in TF form freqz_zpk : Compute the frequency response of a digital filter in ZPK form Notes ----- .. versionadded: 0.19.0 Examples -------- >>> from scipy.signal import freqs_zpk, iirfilter >>> z, p, k = iirfilter(4, [1, 10], 1, 60, analog=True, ftype='cheby1', ... output='zpk') >>> w, h = freqs_zpk(z, p, k, worN=np.logspace(-1, 2, 1000)) >>> import matplotlib.pyplot as plt >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.xlabel('Frequency') >>> plt.ylabel('Amplitude response [dB]') >>> plt.grid() >>> plt.show() """ k = np.asarray(k) if k.size > 1: raise ValueError('k must be a single scalar gain') if worN is None: w = findfreqs(z, p, 200, kind='zp') elif isinstance(worN, int): N = worN w = findfreqs(z, p, N, kind='zp') else: w = worN w = atleast_1d(w) s = 1j * w num = polyvalfromroots(s, z) den = polyvalfromroots(s, p) h = k * num/den return w, h def freqz(b, a=1, worN=None, whole=False, plot=None): """ Compute the frequency response of a digital filter. Given the M-order numerator `b` and N-order denominator `a` of a digital filter, compute its frequency response:: jw -jw -jwM jw B(e ) b[0] + b[1]e + .... + b[M]e H(e ) = ---- = ----------------------------------- jw -jw -jwN A(e ) a[0] + a[1]e + .... + a[N]e Parameters ---------- b : array_like numerator of a linear filter a : array_like denominator of a linear filter worN : {None, int, array_like}, optional If None (default), then compute at 512 frequencies equally spaced around the unit circle. If a single integer, then compute at that many frequencies. If an array_like, compute the response at the frequencies given (in radians/sample). whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, pi radians/sample (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to 2*pi radians/sample. plot : callable A callable that takes two arguments. If given, the return parameters `w` and `h` are passed to plot. Useful for plotting the frequency response inside `freqz`. Returns ------- w : ndarray The normalized frequencies at which `h` was computed, in radians/sample. h : ndarray The frequency response, as complex numbers. See Also -------- sosfreqz Notes ----- Using Matplotlib's "plot" function as the callable for `plot` produces unexpected results, this plots the real part of the complex transfer function, not the magnitude. Try ``lambda w, h: plot(w, abs(h))``. Examples -------- >>> from scipy import signal >>> b = signal.firwin(80, 0.5, window=('kaiser', 8)) >>> w, h = signal.freqz(b) >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.title('Digital filter frequency response') >>> ax1 = fig.add_subplot(111) >>> plt.plot(w, 20 * np.log10(abs(h)), 'b') >>> plt.ylabel('Amplitude [dB]', color='b') >>> plt.xlabel('Frequency [rad/sample]') >>> ax2 = ax1.twinx() >>> angles = np.unwrap(np.angle(h)) >>> plt.plot(w, angles, 'g') >>> plt.ylabel('Angle (radians)', color='g') >>> plt.grid() >>> plt.axis('tight') >>> plt.show() """ b, a = map(atleast_1d, (b, a)) if whole: lastpoint = 2 * pi else: lastpoint = pi if worN is None: N = 512 w = numpy.linspace(0, lastpoint, N, endpoint=False) elif isinstance(worN, int): N = worN w = numpy.linspace(0, lastpoint, N, endpoint=False) else: w = worN w = atleast_1d(w) zm1 = exp(-1j * w) h = polyval(b[::-1], zm1) / polyval(a[::-1], zm1) if plot is not None: plot(w, h) return w, h def freqz_zpk(z, p, k, worN=None, whole=False): """ Compute the frequency response of a digital filter in ZPK form. Given the Zeros, Poles and Gain of a digital filter, compute its frequency response:: :math:`H(z)=k \prod_i (z - Z[i]) / \prod_j (z - P[j])` where :math:`k` is the `gain`, :math:`Z` are the `zeros` and :math:`P` are the `poles`. Parameters ---------- z : array_like Zeroes of a linear filter p : array_like Poles of a linear filter k : scalar Gain of a linear filter worN : {None, int, array_like}, optional If None (default), then compute at 512 frequencies equally spaced around the unit circle. If a single integer, then compute at that many frequencies. If an array_like, compute the response at the frequencies given (in radians/sample). whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, pi radians/sample (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to 2*pi radians/sample. Returns ------- w : ndarray The normalized frequencies at which `h` was computed, in radians/sample. h : ndarray The frequency response. See Also -------- freqs : Compute the frequency response of an analog filter in TF form freqs_zpk : Compute the frequency response of an analog filter in ZPK form freqz : Compute the frequency response of a digital filter in TF form Notes ----- .. versionadded: 0.19.0 Examples -------- >>> from scipy import signal >>> z, p, k = signal.butter(4, 0.2, output='zpk') >>> w, h = signal.freqz_zpk(z, p, k) >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.title('Digital filter frequency response') >>> ax1 = fig.add_subplot(111) >>> plt.plot(w, 20 * np.log10(abs(h)), 'b') >>> plt.ylabel('Amplitude [dB]', color='b') >>> plt.xlabel('Frequency [rad/sample]') >>> ax2 = ax1.twinx() >>> angles = np.unwrap(np.angle(h)) >>> plt.plot(w, angles, 'g') >>> plt.ylabel('Angle (radians)', color='g') >>> plt.grid() >>> plt.axis('tight') >>> plt.show() """ z, p = map(atleast_1d, (z, p)) if whole: lastpoint = 2 * pi else: lastpoint = pi if worN is None: N = 512 w = numpy.linspace(0, lastpoint, N, endpoint=False) elif isinstance(worN, int): N = worN w = numpy.linspace(0, lastpoint, N, endpoint=False) else: w = worN w = atleast_1d(w) zm1 = exp(1j * w) h = k * polyvalfromroots(zm1, z) / polyvalfromroots(zm1, p) return w, h def group_delay(system, w=None, whole=False): r"""Compute the group delay of a digital filter. The group delay measures by how many samples amplitude envelopes of various spectral components of a signal are delayed by a filter. It is formally defined as the derivative of continuous (unwrapped) phase:: d jw D(w) = - -- arg H(e) dw Parameters ---------- system : tuple of array_like (b, a) Numerator and denominator coefficients of a filter transfer function. w : {None, int, array-like}, optional If None (default), then compute at 512 frequencies equally spaced around the unit circle. If a single integer, then compute at that many frequencies. If array, compute the delay at the frequencies given (in radians/sample). whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, pi radians/sample (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to ``2*pi`` radians/sample. Returns ------- w : ndarray The normalized frequencies at which the group delay was computed, in radians/sample. gd : ndarray The group delay. Notes ----- The similar function in MATLAB is called `grpdelay`. If the transfer function :math:`H(z)` has zeros or poles on the unit circle, the group delay at corresponding frequencies is undefined. When such a case arises the warning is raised and the group delay is set to 0 at those frequencies. For the details of numerical computation of the group delay refer to [1]_. .. versionadded: 0.16.0 See Also -------- freqz : Frequency response of a digital filter References ---------- .. [1] Richard G. Lyons, "Understanding Digital Signal Processing, 3rd edition", p. 830. Examples -------- >>> from scipy import signal >>> b, a = signal.iirdesign(0.1, 0.3, 5, 50, ftype='cheby1') >>> w, gd = signal.group_delay((b, a)) >>> import matplotlib.pyplot as plt >>> plt.title('Digital filter group delay') >>> plt.plot(w, gd) >>> plt.ylabel('Group delay [samples]') >>> plt.xlabel('Frequency [rad/sample]') >>> plt.show() """ if w is None: w = 512 if isinstance(w, int): if whole: w = np.linspace(0, 2 * pi, w, endpoint=False) else: w = np.linspace(0, pi, w, endpoint=False) w = np.atleast_1d(w) b, a = map(np.atleast_1d, system) c = np.convolve(b, a[::-1]) cr = c * np.arange(c.size) z = np.exp(-1j * w) num = np.polyval(cr[::-1], z) den = np.polyval(c[::-1], z) singular = np.absolute(den) < 10 * EPSILON if np.any(singular): warnings.warn( "The group delay is singular at frequencies [{0}], setting to 0". format(", ".join("{0:.3f}".format(ws) for ws in w[singular])) ) gd = np.zeros_like(w) gd[~singular] = np.real(num[~singular] / den[~singular]) - a.size + 1 return w, gd def _validate_sos(sos): """Helper to validate a SOS input""" sos = np.atleast_2d(sos) if sos.ndim != 2: raise ValueError('sos array must be 2D') n_sections, m = sos.shape if m != 6: raise ValueError('sos array must be shape (n_sections, 6)') if not (sos[:, 3] == 1).all(): raise ValueError('sos[:, 3] should be all ones') return sos, n_sections def sosfreqz(sos, worN=None, whole=False): """ Compute the frequency response of a digital filter in SOS format. Given `sos`, an array with shape (n, 6) of second order sections of a digital filter, compute the frequency response of the system function:: B0(z) B1(z) B{n-1}(z) H(z) = ----- * ----- * ... * --------- A0(z) A1(z) A{n-1}(z) for z = exp(omega*1j), where B{k}(z) and A{k}(z) are numerator and denominator of the transfer function of the k-th second order section. Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. Each row corresponds to a second-order section, with the first three columns providing the numerator coefficients and the last three providing the denominator coefficients. worN : {None, int, array_like}, optional If None (default), then compute at 512 frequencies equally spaced around the unit circle. If a single integer, then compute at that many frequencies. If an array_like, compute the response at the frequencies given (in radians/sample). whole : bool, optional Normally, frequencies are computed from 0 to the Nyquist frequency, pi radians/sample (upper-half of unit-circle). If `whole` is True, compute frequencies from 0 to 2*pi radians/sample. Returns ------- w : ndarray The normalized frequencies at which `h` was computed, in radians/sample. h : ndarray The frequency response, as complex numbers. See Also -------- freqz, sosfilt Notes ----- .. versionadded:: 0.19.0 Examples -------- Design a 15th-order bandpass filter in SOS format. >>> from scipy import signal >>> sos = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', ... output='sos') Compute the frequency response at 1500 points from DC to Nyquist. >>> w, h = signal.sosfreqz(sos, worN=1500) Plot the response. >>> import matplotlib.pyplot as plt >>> plt.subplot(2, 1, 1) >>> db = 20*np.log10(np.abs(h)) >>> plt.plot(w/np.pi, db) >>> plt.ylim(-75, 5) >>> plt.grid(True) >>> plt.yticks([0, -20, -40, -60]) >>> plt.ylabel('Gain [dB]') >>> plt.title('Frequency Response') >>> plt.subplot(2, 1, 2) >>> plt.plot(w/np.pi, np.angle(h)) >>> plt.grid(True) >>> plt.yticks([-np.pi, -0.5*np.pi, 0, 0.5*np.pi, np.pi], ... [r'$-\\pi$', r'$-\\pi/2$', '0', r'$\\pi/2$', r'$\\pi$']) >>> plt.ylabel('Phase [rad]') >>> plt.xlabel('Normalized frequency (1.0 = Nyquist)') >>> plt.show() If the same filter is implemented as a single transfer function, numerical error corrupts the frequency response: >>> b, a = signal.ellip(15, 0.5, 60, (0.2, 0.4), btype='bandpass', ... output='ba') >>> w, h = signal.freqz(b, a, worN=1500) >>> plt.subplot(2, 1, 1) >>> db = 20*np.log10(np.abs(h)) >>> plt.plot(w/np.pi, db) >>> plt.subplot(2, 1, 2) >>> plt.plot(w/np.pi, np.angle(h)) >>> plt.show() """ sos, n_sections = _validate_sos(sos) if n_sections == 0: raise ValueError('Cannot compute frequencies with no sections') h = 1. for row in sos: w, rowh = freqz(row[:3], row[3:], worN=worN, whole=whole) h *= rowh return w, h def _cplxreal(z, tol=None): """ Split into complex and real parts, combining conjugate pairs. The 1D input vector `z` is split up into its complex (`zc`) and real (`zr`) elements. Every complex element must be part of a complex-conjugate pair, which are combined into a single number (with positive imaginary part) in the output. Two complex numbers are considered a conjugate pair if their real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. Parameters ---------- z : array_like Vector of complex numbers to be sorted and split tol : float, optional Relative tolerance for testing realness and conjugate equality. Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for float64) Returns ------- zc : ndarray Complex elements of `z`, with each pair represented by a single value having positive imaginary part, sorted first by real part, and then by magnitude of imaginary part. The pairs are averaged when combined to reduce error. zr : ndarray Real elements of `z` (those having imaginary part less than `tol` times their magnitude), sorted by value. Raises ------ ValueError If there are any complex numbers in `z` for which a conjugate cannot be found. See Also -------- _cplxpair Examples -------- >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] >>> zc, zr = _cplxreal(a) >>> print zc [ 1.+1.j 2.+1.j 2.+1.j 2.+2.j] >>> print zr [ 1. 3. 4.] """ z = atleast_1d(z) if z.size == 0: return z, z elif z.ndim != 1: raise ValueError('_cplxreal only accepts 1D input') if tol is None: # Get tolerance from dtype of input tol = 100 * np.finfo((1.0 * z).dtype).eps # Sort by real part, magnitude of imaginary part (speed up further sorting) z = z[np.lexsort((abs(z.imag), z.real))] # Split reals from conjugate pairs real_indices = abs(z.imag) <= tol * abs(z) zr = z[real_indices].real if len(zr) == len(z): # Input is entirely real return array([]), zr # Split positive and negative halves of conjugates z = z[~real_indices] zp = z[z.imag > 0] zn = z[z.imag < 0] if len(zp) != len(zn): raise ValueError('Array contains complex value with no matching ' 'conjugate.') # Find runs of (approximately) the same real part same_real = np.diff(zp.real) <= tol * abs(zp[:-1]) diffs = numpy.diff(concatenate(([0], same_real, [0]))) run_starts = numpy.where(diffs > 0)[0] run_stops = numpy.where(diffs < 0)[0] # Sort each run by their imaginary parts for i in range(len(run_starts)): start = run_starts[i] stop = run_stops[i] + 1 for chunk in (zp[start:stop], zn[start:stop]): chunk[...] = chunk[np.lexsort([abs(chunk.imag)])] # Check that negatives match positives if any(abs(zp - zn.conj()) > tol * abs(zn)): raise ValueError('Array contains complex value with no matching ' 'conjugate.') # Average out numerical inaccuracy in real vs imag parts of pairs zc = (zp + zn.conj()) / 2 return zc, zr def _cplxpair(z, tol=None): """ Sort into pairs of complex conjugates. Complex conjugates in `z` are sorted by increasing real part. In each pair, the number with negative imaginary part appears first. If pairs have identical real parts, they are sorted by increasing imaginary magnitude. Two complex numbers are considered a conjugate pair if their real and imaginary parts differ in magnitude by less than ``tol * abs(z)``. The pairs are forced to be exact complex conjugates by averaging the positive and negative values. Purely real numbers are also sorted, but placed after the complex conjugate pairs. A number is considered real if its imaginary part is smaller than `tol` times the magnitude of the number. Parameters ---------- z : array_like 1-dimensional input array to be sorted. tol : float, optional Relative tolerance for testing realness and conjugate equality. Default is ``100 * spacing(1)`` of `z`'s data type (i.e. 2e-14 for float64) Returns ------- y : ndarray Complex conjugate pairs followed by real numbers. Raises ------ ValueError If there are any complex numbers in `z` for which a conjugate cannot be found. See Also -------- _cplxreal Examples -------- >>> a = [4, 3, 1, 2-2j, 2+2j, 2-1j, 2+1j, 2-1j, 2+1j, 1+1j, 1-1j] >>> z = _cplxpair(a) >>> print(z) [ 1.-1.j 1.+1.j 2.-1.j 2.+1.j 2.-1.j 2.+1.j 2.-2.j 2.+2.j 1.+0.j 3.+0.j 4.+0.j] """ z = atleast_1d(z) if z.size == 0 or np.isrealobj(z): return np.sort(z) if z.ndim != 1: raise ValueError('z must be 1-dimensional') zc, zr = _cplxreal(z, tol) # Interleave complex values and their conjugates, with negative imaginary # parts first in each pair zc = np.dstack((zc.conj(), zc)).flatten() z = np.append(zc, zr) return z def tf2zpk(b, a): r"""Return zero, pole, gain (z, p, k) representation from a numerator, denominator representation of a linear filter. Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. Returns ------- z : ndarray Zeros of the transfer function. p : ndarray Poles of the transfer function. k : float System gain. Notes ----- If some values of `b` are too close to 0, they are removed. In that case, a BadCoefficients warning is emitted. The `b` and `a` arrays are interpreted as coefficients for positive, descending powers of the transfer function variable. So the inputs :math:`b = [b_0, b_1, ..., b_M]` and :math:`a =[a_0, a_1, ..., a_N]` can represent an analog filter of the form: .. math:: H(s) = \frac {b_0 s^M + b_1 s^{(M-1)} + \cdots + b_M} {a_0 s^N + a_1 s^{(N-1)} + \cdots + a_N} or a discrete-time filter of the form: .. math:: H(z) = \frac {b_0 z^M + b_1 z^{(M-1)} + \cdots + b_M} {a_0 z^N + a_1 z^{(N-1)} + \cdots + a_N} This "positive powers" form is found more commonly in controls engineering. If `M` and `N` are equal (which is true for all filters generated by the bilinear transform), then this happens to be equivalent to the "negative powers" discrete-time form preferred in DSP: .. math:: H(z) = \frac {b_0 + b_1 z^{-1} + \cdots + b_M z^{-M}} {a_0 + a_1 z^{-1} + \cdots + a_N z^{-N}} Although this is true for common filters, remember that this is not true in the general case. If `M` and `N` are not equal, the discrete-time transfer function coefficients must first be converted to the "positive powers" form before finding the poles and zeros. """ b, a = normalize(b, a) b = (b + 0.0) / a[0] a = (a + 0.0) / a[0] k = b[0] b /= b[0] z = roots(b) p = roots(a) return z, p, k def zpk2tf(z, p, k): """ Return polynomial transfer function representation from zeros and poles Parameters ---------- z : array_like Zeros of the transfer function. p : array_like Poles of the transfer function. k : float System gain. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. """ z = atleast_1d(z) k = atleast_1d(k) if len(z.shape) > 1: temp = poly(z[0]) b = zeros((z.shape[0], z.shape[1] + 1), temp.dtype.char) if len(k) == 1: k = [k[0]] * z.shape[0] for i in range(z.shape[0]): b[i] = k[i] * poly(z[i]) else: b = k * poly(z) a = atleast_1d(poly(p)) # Use real output if possible. Copied from numpy.poly, since # we can't depend on a specific version of numpy. if issubclass(b.dtype.type, numpy.complexfloating): # if complex roots are all complex conjugates, the roots are real. roots = numpy.asarray(z, complex) pos_roots = numpy.compress(roots.imag > 0, roots) neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots)) if len(pos_roots) == len(neg_roots): if numpy.all(numpy.sort_complex(neg_roots) == numpy.sort_complex(pos_roots)): b = b.real.copy() if issubclass(a.dtype.type, numpy.complexfloating): # if complex roots are all complex conjugates, the roots are real. roots = numpy.asarray(p, complex) pos_roots = numpy.compress(roots.imag > 0, roots) neg_roots = numpy.conjugate(numpy.compress(roots.imag < 0, roots)) if len(pos_roots) == len(neg_roots): if numpy.all(numpy.sort_complex(neg_roots) == numpy.sort_complex(pos_roots)): a = a.real.copy() return b, a def tf2sos(b, a, pairing='nearest'): """ Return second-order sections from transfer function representation Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. pairing : {'nearest', 'keep_odd'}, optional The method to use to combine pairs of poles and zeros into sections. See `zpk2sos`. Returns ------- sos : ndarray Array of second-order filter coefficients, with shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. See Also -------- zpk2sos, sosfilt Notes ----- It is generally discouraged to convert from TF to SOS format, since doing so usually will not improve numerical precision errors. Instead, consider designing filters in ZPK format and converting directly to SOS. TF is converted to SOS by first converting to ZPK format, then converting ZPK to SOS. .. versionadded:: 0.16.0 """ return zpk2sos(*tf2zpk(b, a), pairing=pairing) def sos2tf(sos): """ Return a single transfer function from a series of second-order sections Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. Returns ------- b : ndarray Numerator polynomial coefficients. a : ndarray Denominator polynomial coefficients. Notes ----- .. versionadded:: 0.16.0 """ sos = np.asarray(sos) b = [1.] a = [1.] n_sections = sos.shape[0] for section in range(n_sections): b = np.polymul(b, sos[section, :3]) a = np.polymul(a, sos[section, 3:]) return b, a def sos2zpk(sos): """ Return zeros, poles, and gain of a series of second-order sections Parameters ---------- sos : array_like Array of second-order filter coefficients, must have shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. Returns ------- z : ndarray Zeros of the transfer function. p : ndarray Poles of the transfer function. k : float System gain. Notes ----- .. versionadded:: 0.16.0 """ sos = np.asarray(sos) n_sections = sos.shape[0] z = np.empty(n_sections*2, np.complex128) p = np.empty(n_sections*2, np.complex128) k = 1. for section in range(n_sections): zpk = tf2zpk(sos[section, :3], sos[section, 3:]) z[2*section:2*(section+1)] = zpk[0] p[2*section:2*(section+1)] = zpk[1] k *= zpk[2] return z, p, k def _nearest_real_complex_idx(fro, to, which): """Get the next closest real or complex element based on distance""" assert which in ('real', 'complex') order = np.argsort(np.abs(fro - to)) mask = np.isreal(fro[order]) if which == 'complex': mask = ~mask return order[np.where(mask)[0][0]] def zpk2sos(z, p, k, pairing='nearest'): """ Return second-order sections from zeros, poles, and gain of a system Parameters ---------- z : array_like Zeros of the transfer function. p : array_like Poles of the transfer function. k : float System gain. pairing : {'nearest', 'keep_odd'}, optional The method to use to combine pairs of poles and zeros into sections. See Notes below. Returns ------- sos : ndarray Array of second-order filter coefficients, with shape ``(n_sections, 6)``. See `sosfilt` for the SOS filter format specification. See Also -------- sosfilt Notes ----- The algorithm used to convert ZPK to SOS format is designed to minimize errors due to numerical precision issues. The pairing algorithm attempts to minimize the peak gain of each biquadratic section. This is done by pairing poles with the nearest zeros, starting with the poles closest to the unit circle. *Algorithms* The current algorithms are designed specifically for use with digital filters. (The output coefficents are not correct for analog filters.) The steps in the ``pairing='nearest'`` and ``pairing='keep_odd'`` algorithms are mostly shared. The ``nearest`` algorithm attempts to minimize the peak gain, while ``'keep_odd'`` minimizes peak gain under the constraint that odd-order systems should retain one section as first order. The algorithm steps and are as follows: As a pre-processing step, add poles or zeros to the origin as necessary to obtain the same number of poles and zeros for pairing. If ``pairing == 'nearest'`` and there are an odd number of poles, add an additional pole and a zero at the origin. The following steps are then iterated over until no more poles or zeros remain: 1. Take the (next remaining) pole (complex or real) closest to the unit circle to begin a new filter section. 2. If the pole is real and there are no other remaining real poles [#]_, add the closest real zero to the section and leave it as a first order section. Note that after this step we are guaranteed to be left with an even number of real poles, complex poles, real zeros, and complex zeros for subsequent pairing iterations. 3. Else: 1. If the pole is complex and the zero is the only remaining real zero*, then pair the pole with the *next* closest zero (guaranteed to be complex). This is necessary to ensure that there will be a real zero remaining to eventually create a first-order section (thus keeping the odd order). 2. Else pair the pole with the closest remaining zero (complex or real). 3. Proceed to complete the second-order section by adding another pole and zero to the current pole and zero in the section: 1. If the current pole and zero are both complex, add their conjugates. 2. Else if the pole is complex and the zero is real, add the conjugate pole and the next closest real zero. 3. Else if the pole is real and the zero is complex, add the conjugate zero and the real pole closest to those zeros. 4. Else (we must have a real pole and real zero) add the next real pole closest to the unit circle, and then add the real zero closest to that pole. .. [#] This conditional can only be met for specific odd-order inputs with the ``pairing == 'keep_odd'`` method. .. versionadded:: 0.16.0 Examples -------- Design a 6th order low-pass elliptic digital filter for a system with a sampling rate of 8000 Hz that has a pass-band corner frequency of 1000 Hz. The ripple in the pass-band should not exceed 0.087 dB, and the attenuation in the stop-band should be at least 90 dB. In the following call to `signal.ellip`, we could use ``output='sos'``, but for this example, we'll use ``output='zpk'``, and then convert to SOS format with `zpk2sos`: >>> from scipy import signal >>> z, p, k = signal.ellip(6, 0.087, 90, 1000/(0.5*8000), output='zpk') Now convert to SOS format. >>> sos = signal.zpk2sos(z, p, k) The coefficients of the numerators of the sections: >>> sos[:, :3] array([[ 0.0014154 , 0.00248707, 0.0014154 ], [ 1. , 0.72965193, 1. ], [ 1. , 0.17594966, 1. ]]) The symmetry in the coefficients occurs because all the zeros are on the unit circle. The coefficients of the denominators of the sections: >>> sos[:, 3:] array([[ 1. , -1.32543251, 0.46989499], [ 1. , -1.26117915, 0.6262586 ], [ 1. , -1.25707217, 0.86199667]]) The next example shows the effect of the `pairing` option. We have a system with three poles and three zeros, so the SOS array will have shape (2, 6). The means there is, in effect, an extra pole and an extra zero at the origin in the SOS representation. >>> z1 = np.array([-1, -0.5-0.5j, -0.5+0.5j]) >>> p1 = np.array([0.75, 0.8+0.1j, 0.8-0.1j]) With ``pairing='nearest'`` (the default), we obtain >>> signal.zpk2sos(z1, p1, 1) array([[ 1. , 1. , 0.5 , 1. , -0.75, 0. ], [ 1. , 1. , 0. , 1. , -1.6 , 0.65]]) The first section has the zeros {-0.5-0.05j, -0.5+0.5j} and the poles {0, 0.75}, and the second section has the zeros {-1, 0} and poles {0.8+0.1j, 0.8-0.1j}. Note that the extra pole and zero at the origin have been assigned to different sections. With ``pairing='keep_odd'``, we obtain: >>> signal.zpk2sos(z1, p1, 1, pairing='keep_odd') array([[ 1. , 1. , 0. , 1. , -0.75, 0. ], [ 1. , 1. , 0.5 , 1. , -1.6 , 0.65]]) The extra pole and zero at the origin are in the same section. The first section is, in effect, a first-order section. """ # TODO in the near future: # 1. Add SOS capability to `filtfilt`, `freqz`, etc. somehow (#3259). # 2. Make `decimate` use `sosfilt` instead of `lfilter`. # 3. Make sosfilt automatically simplify sections to first order # when possible. Note this might make `sosfiltfilt` a bit harder (ICs). # 4. Further optimizations of the section ordering / pole-zero pairing. # See the wiki for other potential issues. valid_pairings = ['nearest', 'keep_odd'] if pairing not in valid_pairings: raise ValueError('pairing must be one of %s, not %s' % (valid_pairings, pairing)) if len(z) == len(p) == 0: return array([[k, 0., 0., 1., 0., 0.]]) # ensure we have the same number of poles and zeros, and make copies p = np.concatenate((p, np.zeros(max(len(z) - len(p), 0)))) z = np.concatenate((z, np.zeros(max(len(p) - len(z), 0)))) n_sections = (max(len(p), len(z)) + 1) // 2 sos = zeros((n_sections, 6)) if len(p) % 2 == 1 and pairing == 'nearest': p = np.concatenate((p, [0.])) z = np.concatenate((z, [0.])) assert len(p) == len(z) # Ensure we have complex conjugate pairs # (note that _cplxreal only gives us one element of each complex pair): z = np.concatenate(_cplxreal(z)) p = np.concatenate(_cplxreal(p)) p_sos = np.zeros((n_sections, 2), np.complex128) z_sos = np.zeros_like(p_sos) for si in range(n_sections): # Select the next "worst" pole p1_idx = np.argmin(np.abs(1 - np.abs(p))) p1 = p[p1_idx] p = np.delete(p, p1_idx) # Pair that pole with a zero if np.isreal(p1) and np.isreal(p).sum() == 0: # Special case to set a first-order section z1_idx = _nearest_real_complex_idx(z, p1, 'real') z1 = z[z1_idx] z = np.delete(z, z1_idx) p2 = z2 = 0 else: if not np.isreal(p1) and np.isreal(z).sum() == 1: # Special case to ensure we choose a complex zero to pair # with so later (setting up a first-order section) z1_idx = _nearest_real_complex_idx(z, p1, 'complex') assert not np.isreal(z[z1_idx]) else: # Pair the pole with the closest zero (real or complex) z1_idx = np.argmin(np.abs(p1 - z)) z1 = z[z1_idx] z = np.delete(z, z1_idx) # Now that we have p1 and z1, figure out what p2 and z2 need to be if not np.isreal(p1): if not np.isreal(z1): # complex pole, complex zero p2 = p1.conj() z2 = z1.conj() else: # complex pole, real zero p2 = p1.conj() z2_idx = _nearest_real_complex_idx(z, p1, 'real') z2 = z[z2_idx] assert np.isreal(z2) z = np.delete(z, z2_idx) else: if not np.isreal(z1): # real pole, complex zero z2 = z1.conj() p2_idx = _nearest_real_complex_idx(p, z1, 'real') p2 = p[p2_idx] assert np.isreal(p2) else: # real pole, real zero # pick the next "worst" pole to use idx = np.where(np.isreal(p))[0] assert len(idx) > 0 p2_idx = idx[np.argmin(np.abs(np.abs(p[idx]) - 1))] p2 = p[p2_idx] # find a real zero to match the added pole assert np.isreal(p2) z2_idx = _nearest_real_complex_idx(z, p2, 'real') z2 = z[z2_idx] assert np.isreal(z2) z = np.delete(z, z2_idx) p = np.delete(p, p2_idx) p_sos[si] = [p1, p2] z_sos[si] = [z1, z2] assert len(p) == len(z) == 0 # we've consumed all poles and zeros del p, z # Construct the system, reversing order so the "worst" are last p_sos = np.reshape(p_sos[::-1], (n_sections, 2)) z_sos = np.reshape(z_sos[::-1], (n_sections, 2)) gains = np.ones(n_sections) gains[0] = k for si in range(n_sections): x = zpk2tf(z_sos[si], p_sos[si], gains[si]) sos[si] = np.concatenate(x) return sos def _align_nums(nums): """Aligns the shapes of multiple numerators. Given an array of numerator coefficient arrays [[a_1, a_2,..., a_n],..., [b_1, b_2,..., b_m]], this function pads shorter numerator arrays with zero's so that all numerators have the same length. Such alignment is necessary for functions like 'tf2ss', which needs the alignment when dealing with SIMO transfer functions. Parameters ---------- nums: array_like Numerator or list of numerators. Not necessarily with same length. Returns ------- nums: array The numerator. If `nums` input was a list of numerators then a 2d array with padded zeros for shorter numerators is returned. Otherwise returns ``np.asarray(nums)``. """ try: # The statement can throw a ValueError if one # of the numerators is a single digit and another # is array-like e.g. if nums = [5, [1, 2, 3]] nums = asarray(nums) if not np.issubdtype(nums.dtype, np.number): raise ValueError("dtype of numerator is non-numeric") return nums except ValueError: nums = [np.atleast_1d(num) for num in nums] max_width = max(num.size for num in nums) # pre-allocate aligned_nums = np.zeros((len(nums), max_width)) # Create numerators with padded zeros for index, num in enumerate(nums): aligned_nums[index, -num.size:] = num return aligned_nums def normalize(b, a): """Normalize numerator/denominator of a continuous-time transfer function. If values of `b` are too close to 0, they are removed. In that case, a BadCoefficients warning is emitted. Parameters ---------- b: array_like Numerator of the transfer function. Can be a 2d array to normalize multiple transfer functions. a: array_like Denominator of the transfer function. At most 1d. Returns ------- num: array The numerator of the normalized transfer function. At least a 1d array. A 2d-array if the input `num` is a 2d array. den: 1d-array The denominator of the normalized transfer function. Notes ----- Coefficients for both the numerator and denominator should be specified in descending exponent order (e.g., ``s^2 + 3s + 5`` would be represented as ``[1, 3, 5]``). """ num, den = b, a den = np.atleast_1d(den) num = np.atleast_2d(_align_nums(num)) if den.ndim != 1: raise ValueError("Denominator polynomial must be rank-1 array.") if num.ndim > 2: raise ValueError("Numerator polynomial must be rank-1 or" " rank-2 array.") if np.all(den == 0): raise ValueError("Denominator must have at least on nonzero element.") # Trim leading zeros in denominator, leave at least one. den = np.trim_zeros(den, 'f') # Normalize transfer function num, den = num / den[0], den / den[0] # Count numerator columns that are all zero leading_zeros = 0 for col in num.T: if np.allclose(col, 0, atol=1e-14): leading_zeros += 1 else: break # Trim leading zeros of numerator if leading_zeros > 0: warnings.warn("Badly conditioned filter coefficients (numerator): the " "results may be meaningless", BadCoefficients) # Make sure at least one column remains if leading_zeros == num.shape[1]: leading_zeros -= 1 num = num[:, leading_zeros:] # Squeeze first dimension if singular if num.shape[0] == 1: num = num[0, :] return num, den def lp2lp(b, a, wo=1.0): """ Transform a lowpass filter prototype to a different frequency. Return an analog low-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. """ a, b = map(atleast_1d, (a, b)) try: wo = float(wo) except TypeError: wo = float(wo[0]) d = len(a) n = len(b) M = max((d, n)) pwo = pow(wo, numpy.arange(M - 1, -1, -1)) start1 = max((n - d, 0)) start2 = max((d - n, 0)) b = b * pwo[start1] / pwo[start2:] a = a * pwo[start1] / pwo[start1:] return normalize(b, a) def lp2hp(b, a, wo=1.0): """ Transform a lowpass filter prototype to a highpass filter. Return an analog high-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. """ a, b = map(atleast_1d, (a, b)) try: wo = float(wo) except TypeError: wo = float(wo[0]) d = len(a) n = len(b) if wo != 1: pwo = pow(wo, numpy.arange(max((d, n)))) else: pwo = numpy.ones(max((d, n)), b.dtype.char) if d >= n: outa = a[::-1] * pwo outb = resize(b, (d,)) outb[n:] = 0.0 outb[:n] = b[::-1] * pwo[:n] else: outb = b[::-1] * pwo outa = resize(a, (n,)) outa[d:] = 0.0 outa[:d] = a[::-1] * pwo[:d] return normalize(outb, outa) def lp2bp(b, a, wo=1.0, bw=1.0): """ Transform a lowpass filter prototype to a bandpass filter. Return an analog band-pass filter with center frequency `wo` and bandwidth `bw` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. """ a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = mintypecode((a, b)) ma = max([N, D]) Np = N + ma Dp = D + ma bprime = numpy.zeros(Np + 1, artype) aprime = numpy.zeros(Dp + 1, artype) wosq = wo * wo for j in range(Np + 1): val = 0.0 for i in range(0, N + 1): for k in range(0, i + 1): if ma - i + 2 * k == j: val += comb(i, k) * b[N - i] * (wosq) ** (i - k) / bw ** i bprime[Np - j] = val for j in range(Dp + 1): val = 0.0 for i in range(0, D + 1): for k in range(0, i + 1): if ma - i + 2 * k == j: val += comb(i, k) * a[D - i] * (wosq) ** (i - k) / bw ** i aprime[Dp - j] = val return normalize(bprime, aprime) def lp2bs(b, a, wo=1.0, bw=1.0): """ Transform a lowpass filter prototype to a bandstop filter. Return an analog band-stop filter with center frequency `wo` and bandwidth `bw` from an analog low-pass filter prototype with unity cutoff frequency, in transfer function ('ba') representation. """ a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = mintypecode((a, b)) M = max([N, D]) Np = M + M Dp = M + M bprime = numpy.zeros(Np + 1, artype) aprime = numpy.zeros(Dp + 1, artype) wosq = wo * wo for j in range(Np + 1): val = 0.0 for i in range(0, N + 1): for k in range(0, M - i + 1): if i + 2 * k == j: val += (comb(M - i, k) * b[N - i] * (wosq) ** (M - i - k) * bw ** i) bprime[Np - j] = val for j in range(Dp + 1): val = 0.0 for i in range(0, D + 1): for k in range(0, M - i + 1): if i + 2 * k == j: val += (comb(M - i, k) * a[D - i] * (wosq) ** (M - i - k) * bw ** i) aprime[Dp - j] = val return normalize(bprime, aprime) def bilinear(b, a, fs=1.0): """Return a digital filter from an analog one using a bilinear transform. The bilinear transform substitutes ``(z-1) / (z+1)`` for ``s``. """ fs = float(fs) a, b = map(atleast_1d, (a, b)) D = len(a) - 1 N = len(b) - 1 artype = float M = max([N, D]) Np = M Dp = M bprime = numpy.zeros(Np + 1, artype) aprime = numpy.zeros(Dp + 1, artype) for j in range(Np + 1): val = 0.0 for i in range(N + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * b[N - i] * pow(2 * fs, i) * (-1) ** k) bprime[j] = real(val) for j in range(Dp + 1): val = 0.0 for i in range(D + 1): for k in range(i + 1): for l in range(M - i + 1): if k + l == j: val += (comb(i, k) * comb(M - i, l) * a[D - i] * pow(2 * fs, i) * (-1) ** k) aprime[j] = real(val) return normalize(bprime, aprime) def iirdesign(wp, ws, gpass, gstop, analog=False, ftype='ellip', output='ba'): """Complete IIR digital and analog filter design. Given passband and stopband frequencies and gains, construct an analog or digital IIR filter of minimum order for a given basic type. Return the output in numerator, denominator ('ba'), pole-zero ('zpk') or second order sections ('sos') form. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' - Bessel/Thomson: 'bessel' output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies Notes ----- The ``'sos'`` output parameter was added in 0.16.0. """ try: ordfunc = filter_dict[ftype][1] except KeyError: raise ValueError("Invalid IIR filter type: %s" % ftype) except IndexError: raise ValueError(("%s does not have order selection. Use " "iirfilter function.") % ftype) wp = atleast_1d(wp) ws = atleast_1d(ws) band_type = 2 * (len(wp) - 1) band_type += 1 if wp[0] >= ws[0]: band_type += 1 btype = {1: 'lowpass', 2: 'highpass', 3: 'bandstop', 4: 'bandpass'}[band_type] N, Wn = ordfunc(wp, ws, gpass, gstop, analog=analog) return iirfilter(N, Wn, rp=gpass, rs=gstop, analog=analog, btype=btype, ftype=ftype, output=output) def iirfilter(N, Wn, rp=None, rs=None, btype='band', analog=False, ftype='butter', output='ba'): """ IIR digital and analog filter design given order and critical points. Design an Nth-order digital or analog filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For digital filters, `Wn` is normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). rp : float, optional For Chebyshev and elliptic filters, provides the maximum ripple in the passband. (dB) rs : float, optional For Chebyshev and elliptic filters, provides the minimum attenuation in the stop band. (dB) btype : {'bandpass', 'lowpass', 'highpass', 'bandstop'}, optional The type of filter. Default is 'bandpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. ftype : str, optional The type of IIR filter to design: - Butterworth : 'butter' - Chebyshev I : 'cheby1' - Chebyshev II : 'cheby2' - Cauer/elliptic: 'ellip' - Bessel/Thomson: 'bessel' output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- butter : Filter design using order and critical points cheby1, cheby2, ellip, bessel buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord, ellipord iirdesign : General filter design using passband and stopband spec Notes ----- The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Generate a 17th-order Chebyshev II bandpass filter and plot the frequency response: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.iirfilter(17, [50, 200], rs=60, btype='band', ... analog=True, ftype='cheby2') >>> w, h = signal.freqs(b, a, 1000) >>> fig = plt.figure() >>> ax = fig.add_subplot(111) >>> ax.semilogx(w, 20 * np.log10(abs(h))) >>> ax.set_title('Chebyshev Type II bandpass frequency response') >>> ax.set_xlabel('Frequency [radians / second]') >>> ax.set_ylabel('Amplitude [dB]') >>> ax.axis((10, 1000, -100, 10)) >>> ax.grid(which='both', axis='both') >>> plt.show() """ ftype, btype, output = [x.lower() for x in (ftype, btype, output)] Wn = asarray(Wn) try: btype = band_dict[btype] except KeyError: raise ValueError("'%s' is an invalid bandtype for filter." % btype) try: typefunc = filter_dict[ftype][0] except KeyError: raise ValueError("'%s' is not a valid basic IIR filter." % ftype) if output not in ['ba', 'zpk', 'sos']: raise ValueError("'%s' is not a valid output form." % output) if rp is not None and rp < 0: raise ValueError("passband ripple (rp) must be positive") if rs is not None and rs < 0: raise ValueError("stopband attenuation (rs) must be positive") # Get analog lowpass prototype if typefunc == buttap: z, p, k = typefunc(N) elif typefunc == besselap: z, p, k = typefunc(N, norm=bessel_norms[ftype]) elif typefunc == cheb1ap: if rp is None: raise ValueError("passband ripple (rp) must be provided to " "design a Chebyshev I filter.") z, p, k = typefunc(N, rp) elif typefunc == cheb2ap: if rs is None: raise ValueError("stopband attenuation (rs) must be provided to " "design an Chebyshev II filter.") z, p, k = typefunc(N, rs) elif typefunc == ellipap: if rs is None or rp is None: raise ValueError("Both rp and rs must be provided to design an " "elliptic filter.") z, p, k = typefunc(N, rp, rs) else: raise NotImplementedError("'%s' not implemented in iirfilter." % ftype) # Pre-warp frequencies for digital filter design if not analog: if numpy.any(Wn < 0) or numpy.any(Wn > 1): raise ValueError("Digital filter critical frequencies " "must be 0 <= Wn <= 1") fs = 2.0 warped = 2 * fs * tan(pi * Wn / fs) else: warped = Wn # transform to lowpass, bandpass, highpass, or bandstop if btype in ('lowpass', 'highpass'): if numpy.size(Wn) != 1: raise ValueError('Must specify a single critical frequency Wn') if btype == 'lowpass': z, p, k = _zpklp2lp(z, p, k, wo=warped) elif btype == 'highpass': z, p, k = _zpklp2hp(z, p, k, wo=warped) elif btype in ('bandpass', 'bandstop'): try: bw = warped[1] - warped[0] wo = sqrt(warped[0] * warped[1]) except IndexError: raise ValueError('Wn must specify start and stop frequencies') if btype == 'bandpass': z, p, k = _zpklp2bp(z, p, k, wo=wo, bw=bw) elif btype == 'bandstop': z, p, k = _zpklp2bs(z, p, k, wo=wo, bw=bw) else: raise NotImplementedError("'%s' not implemented in iirfilter." % btype) # Find discrete equivalent if necessary if not analog: z, p, k = _zpkbilinear(z, p, k, fs=fs) # Transform to proper out type (pole-zero, state-space, numer-denom) if output == 'zpk': return z, p, k elif output == 'ba': return zpk2tf(z, p, k) elif output == 'sos': return zpk2sos(z, p, k) def _relative_degree(z, p): """ Return relative degree of transfer function from zeros and poles """ degree = len(p) - len(z) if degree < 0: raise ValueError("Improper transfer function. " "Must have at least as many poles as zeros.") else: return degree # TODO: merge these into existing functions or make public versions def _zpkbilinear(z, p, k, fs): """ Return a digital filter from an analog one using a bilinear transform. Transform a set of poles and zeros from the analog s-plane to the digital z-plane using Tustin's method, which substitutes ``(z-1) / (z+1)`` for ``s``, maintaining the shape of the frequency response. Parameters ---------- z : array_like Zeros of the analog IIR filter transfer function. p : array_like Poles of the analog IIR filter transfer function. k : float System gain of the analog IIR filter transfer function. fs : float Sample rate, as ordinary frequency (e.g. hertz). No prewarping is done in this function. Returns ------- z : ndarray Zeros of the transformed digital filter transfer function. p : ndarray Poles of the transformed digital filter transfer function. k : float System gain of the transformed digital filter. """ z = atleast_1d(z) p = atleast_1d(p) degree = _relative_degree(z, p) fs2 = 2*fs # Bilinear transform the poles and zeros z_z = (fs2 + z) / (fs2 - z) p_z = (fs2 + p) / (fs2 - p) # Any zeros that were at infinity get moved to the Nyquist frequency z_z = append(z_z, -ones(degree)) # Compensate for gain change k_z = k * real(prod(fs2 - z) / prod(fs2 - p)) return z_z, p_z, k_z def _zpklp2lp(z, p, k, wo=1.0): r""" Transform a lowpass filter prototype to a different frequency. Return an analog low-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog IIR filter transfer function. p : array_like Poles of the analog IIR filter transfer function. k : float System gain of the analog IIR filter transfer function. wo : float Desired cutoff, as angular frequency (e.g. rad/s). Defaults to no change. Returns ------- z : ndarray Zeros of the transformed low-pass filter transfer function. p : ndarray Poles of the transformed low-pass filter transfer function. k : float System gain of the transformed low-pass filter. Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s}{\omega_0} """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) # Avoid int wraparound degree = _relative_degree(z, p) # Scale all points radially from origin to shift cutoff frequency z_lp = wo * z p_lp = wo * p # Each shifted pole decreases gain by wo, each shifted zero increases it. # Cancel out the net change to keep overall gain the same k_lp = k * wo**degree return z_lp, p_lp, k_lp def _zpklp2hp(z, p, k, wo=1.0): r""" Transform a lowpass filter prototype to a highpass filter. Return an analog high-pass filter with cutoff frequency `wo` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog IIR filter transfer function. p : array_like Poles of the analog IIR filter transfer function. k : float System gain of the analog IIR filter transfer function. wo : float Desired cutoff, as angular frequency (e.g. rad/s). Defaults to no change. Returns ------- z : ndarray Zeros of the transformed high-pass filter transfer function. p : ndarray Poles of the transformed high-pass filter transfer function. k : float System gain of the transformed high-pass filter. Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{\omega_0}{s} This maintains symmetry of the lowpass and highpass responses on a logarithmic scale. """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) degree = _relative_degree(z, p) # Invert positions radially about unit circle to convert LPF to HPF # Scale all points radially from origin to shift cutoff frequency z_hp = wo / z p_hp = wo / p # If lowpass had zeros at infinity, inverting moves them to origin. z_hp = append(z_hp, zeros(degree)) # Cancel out gain change caused by inversion k_hp = k * real(prod(-z) / prod(-p)) return z_hp, p_hp, k_hp def _zpklp2bp(z, p, k, wo=1.0, bw=1.0): r""" Transform a lowpass filter prototype to a bandpass filter. Return an analog band-pass filter with center frequency `wo` and bandwidth `bw` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog IIR filter transfer function. p : array_like Poles of the analog IIR filter transfer function. k : float System gain of the analog IIR filter transfer function. wo : float Desired passband center, as angular frequency (e.g. rad/s). Defaults to no change. bw : float Desired passband width, as angular frequency (e.g. rad/s). Defaults to 1. Returns ------- z : ndarray Zeros of the transformed band-pass filter transfer function. p : ndarray Poles of the transformed band-pass filter transfer function. k : float System gain of the transformed band-pass filter. Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s^2 + {\omega_0}^2}{s \cdot \mathrm{BW}} This is the "wideband" transformation, producing a passband with geometric (log frequency) symmetry about `wo`. """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) bw = float(bw) degree = _relative_degree(z, p) # Scale poles and zeros to desired bandwidth z_lp = z * bw/2 p_lp = p * bw/2 # Square root needs to produce complex result, not NaN z_lp = z_lp.astype(complex) p_lp = p_lp.astype(complex) # Duplicate poles and zeros and shift from baseband to +wo and -wo z_bp = concatenate((z_lp + sqrt(z_lp**2 - wo**2), z_lp - sqrt(z_lp**2 - wo**2))) p_bp = concatenate((p_lp + sqrt(p_lp**2 - wo**2), p_lp - sqrt(p_lp**2 - wo**2))) # Move degree zeros to origin, leaving degree zeros at infinity for BPF z_bp = append(z_bp, zeros(degree)) # Cancel out gain change from frequency scaling k_bp = k * bw**degree return z_bp, p_bp, k_bp def _zpklp2bs(z, p, k, wo=1.0, bw=1.0): r""" Transform a lowpass filter prototype to a bandstop filter. Return an analog band-stop filter with center frequency `wo` and stopband width `bw` from an analog low-pass filter prototype with unity cutoff frequency, using zeros, poles, and gain ('zpk') representation. Parameters ---------- z : array_like Zeros of the analog IIR filter transfer function. p : array_like Poles of the analog IIR filter transfer function. k : float System gain of the analog IIR filter transfer function. wo : float Desired stopband center, as angular frequency (e.g. rad/s). Defaults to no change. bw : float Desired stopband width, as angular frequency (e.g. rad/s). Defaults to 1. Returns ------- z : ndarray Zeros of the transformed band-stop filter transfer function. p : ndarray Poles of the transformed band-stop filter transfer function. k : float System gain of the transformed band-stop filter. Notes ----- This is derived from the s-plane substitution .. math:: s \rightarrow \frac{s \cdot \mathrm{BW}}{s^2 + {\omega_0}^2} This is the "wideband" transformation, producing a stopband with geometric (log frequency) symmetry about `wo`. """ z = atleast_1d(z) p = atleast_1d(p) wo = float(wo) bw = float(bw) degree = _relative_degree(z, p) # Invert to a highpass filter with desired bandwidth z_hp = (bw/2) / z p_hp = (bw/2) / p # Square root needs to produce complex result, not NaN z_hp = z_hp.astype(complex) p_hp = p_hp.astype(complex) # Duplicate poles and zeros and shift from baseband to +wo and -wo z_bs = concatenate((z_hp + sqrt(z_hp**2 - wo**2), z_hp - sqrt(z_hp**2 - wo**2))) p_bs = concatenate((p_hp + sqrt(p_hp**2 - wo**2), p_hp - sqrt(p_hp**2 - wo**2))) # Move any zeros that were at infinity to the center of the stopband z_bs = append(z_bs, +1j*wo * ones(degree)) z_bs = append(z_bs, -1j*wo * ones(degree)) # Cancel out gain change caused by inversion k_bs = k * real(prod(-z) / prod(-p)) return z_bs, p_bs, k_bs def butter(N, Wn, btype='low', analog=False, output='ba'): """ Butterworth digital and analog filter design. Design an Nth-order digital or analog Butterworth filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For a Butterworth filter, this is the point at which the gain drops to 1/sqrt(2) that of the passband (the "-3 dB point"). For digital filters, `Wn` is normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- buttord, buttap Notes ----- The Butterworth filter has maximally flat frequency response in the passband. The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Plot the filter's frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.butter(4, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Butterworth filter frequency response') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.show() """ return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='butter') def cheby1(N, rp, Wn, btype='low', analog=False, output='ba'): """ Chebyshev type I digital and analog filter design. Design an Nth-order digital or analog Chebyshev type I filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. rp : float The maximum ripple allowed below unity gain in the passband. Specified in decibels, as a positive number. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For Type I filters, this is the point in the transition band at which the gain first drops below -`rp`. For digital filters, `Wn` is normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- cheb1ord, cheb1ap Notes ----- The Chebyshev type I filter maximizes the rate of cutoff between the frequency response's passband and stopband, at the expense of ripple in the passband and increased ringing in the step response. Type I filters roll off faster than Type II (`cheby2`), but Type II filters do not have any ripple in the passband. The equiripple passband has N maxima or minima (for example, a 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is unity for odd-order filters, or -rp dB for even-order filters. The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Plot the filter's frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.cheby1(4, 5, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev Type I frequency response (rp=5)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-5, color='green') # rp >>> plt.show() """ return iirfilter(N, Wn, rp=rp, btype=btype, analog=analog, output=output, ftype='cheby1') def cheby2(N, rs, Wn, btype='low', analog=False, output='ba'): """ Chebyshev type II digital and analog filter design. Design an Nth-order digital or analog Chebyshev type II filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. rs : float The minimum attenuation required in the stop band. Specified in decibels, as a positive number. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For Type II filters, this is the point in the transition band at which the gain first reaches -`rs`. For digital filters, `Wn` is normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- cheb2ord, cheb2ap Notes ----- The Chebyshev type II filter maximizes the rate of cutoff between the frequency response's passband and stopband, at the expense of ripple in the stopband and increased ringing in the step response. Type II filters do not roll off as fast as Type I (`cheby1`). The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Plot the filter's frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.cheby2(4, 40, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev Type II frequency response (rs=40)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-40, color='green') # rs >>> plt.show() """ return iirfilter(N, Wn, rs=rs, btype=btype, analog=analog, output=output, ftype='cheby2') def ellip(N, rp, rs, Wn, btype='low', analog=False, output='ba'): """ Elliptic (Cauer) digital and analog filter design. Design an Nth-order digital or analog elliptic filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. rp : float The maximum ripple allowed below unity gain in the passband. Specified in decibels, as a positive number. rs : float The minimum attenuation required in the stop band. Specified in decibels, as a positive number. Wn : array_like A scalar or length-2 sequence giving the critical frequencies. For elliptic filters, this is the point in the transition band at which the gain first drops below -`rp`. For digital filters, `Wn` is normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`Wn` is thus in half-cycles / sample.) For analog filters, `Wn` is an angular frequency (e.g. rad/s). btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. See Also -------- ellipord, ellipap Notes ----- Also known as Cauer or Zolotarev filters, the elliptical filter maximizes the rate of transition between the frequency response's passband and stopband, at the expense of ripple in both, and increased ringing in the step response. As `rp` approaches 0, the elliptical filter becomes a Chebyshev type II filter (`cheby2`). As `rs` approaches 0, it becomes a Chebyshev type I filter (`cheby1`). As both approach 0, it becomes a Butterworth filter (`butter`). The equiripple passband has N maxima or minima (for example, a 5th-order filter has 3 maxima and 2 minima). Consequently, the DC gain is unity for odd-order filters, or -rp dB for even-order filters. The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Plot the filter's frequency response, showing the critical points: >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.ellip(4, 5, 40, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Elliptic filter frequency response (rp=5, rs=40)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-40, color='green') # rs >>> plt.axhline(-5, color='green') # rp >>> plt.show() """ return iirfilter(N, Wn, rs=rs, rp=rp, btype=btype, analog=analog, output=output, ftype='elliptic') def bessel(N, Wn, btype='low', analog=False, output='ba', norm='phase'): """ Bessel/Thomson digital and analog filter design. Design an Nth-order digital or analog Bessel filter and return the filter coefficients. Parameters ---------- N : int The order of the filter. Wn : array_like A scalar or length-2 sequence giving the critical frequencies (defined by the `norm` parameter). For analog filters, `Wn` is an angular frequency (e.g. rad/s). For digital filters, `Wn` is normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`Wn` is thus in half-cycles / sample.) btype : {'lowpass', 'highpass', 'bandpass', 'bandstop'}, optional The type of filter. Default is 'lowpass'. analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. (See Notes.) output : {'ba', 'zpk', 'sos'}, optional Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. norm : {'phase', 'delay', 'mag'}, optional Critical frequency normalization: ``phase`` The filter is normalized such that the phase response reaches its midpoint at angular (e.g. rad/s) frequency `Wn`. This happens for both low-pass and high-pass filters, so this is the "phase-matched" case. The magnitude response asymptotes are the same as a Butterworth filter of the same order with a cutoff of `Wn`. This is the default, and matches MATLAB's implementation. ``delay`` The filter is normalized such that the group delay in the passband is 1/`Wn` (e.g. seconds). This is the "natural" type obtained by solving Bessel polynomials. ``mag`` The filter is normalized such that the gain magnitude is -3 dB at angular frequency `Wn`. .. versionadded:: 0.18.0 Returns ------- b, a : ndarray, ndarray Numerator (`b`) and denominator (`a`) polynomials of the IIR filter. Only returned if ``output='ba'``. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if ``output='zpk'``. sos : ndarray Second-order sections representation of the IIR filter. Only returned if ``output=='sos'``. Notes ----- Also known as a Thomson filter, the analog Bessel filter has maximally flat group delay and maximally linear phase response, with very little ringing in the step response. [1]_ The Bessel is inherently an analog filter. This function generates digital Bessel filters using the bilinear transform, which does not preserve the phase response of the analog filter. As such, it is only approximately correct at frequencies below about fs/4. To get maximally-flat group delay at higher frequencies, the analog Bessel filter must be transformed using phase-preserving techniques. See `besselap` for implementation details and references. The ``'sos'`` output parameter was added in 0.16.0. Examples -------- Plot the phase-normalized frequency response, showing the relationship to the Butterworth's cutoff frequency (green): >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> b, a = signal.butter(4, 100, 'low', analog=True) >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(np.abs(h)), color='silver', ls='dashed') >>> b, a = signal.bessel(4, 100, 'low', analog=True, norm='phase') >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) >>> plt.title('Bessel filter magnitude response (with Butterworth)') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.show() and the phase midpoint: >>> plt.figure() >>> plt.semilogx(w, np.unwrap(np.angle(h))) >>> plt.axvline(100, color='green') # cutoff frequency >>> plt.axhline(-np.pi, color='red') # phase midpoint >>> plt.title('Bessel filter phase response') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Phase [radians]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.show() Plot the magnitude-normalized frequency response, showing the -3 dB cutoff: >>> b, a = signal.bessel(3, 10, 'low', analog=True, norm='mag') >>> w, h = signal.freqs(b, a) >>> plt.semilogx(w, 20 * np.log10(np.abs(h))) >>> plt.axhline(-3, color='red') # -3 dB magnitude >>> plt.axvline(10, color='green') # cutoff frequency >>> plt.title('Magnitude-normalized Bessel filter frequency response') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.show() Plot the delay-normalized filter, showing the maximally-flat group delay at 0.1 seconds: >>> b, a = signal.bessel(5, 1/0.1, 'low', analog=True, norm='delay') >>> w, h = signal.freqs(b, a) >>> plt.figure() >>> plt.semilogx(w[1:], -np.diff(np.unwrap(np.angle(h)))/np.diff(w)) >>> plt.axhline(0.1, color='red') # 0.1 seconds group delay >>> plt.title('Bessel filter group delay') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Group delay [seconds]') >>> plt.margins(0, 0.1) >>> plt.grid(which='both', axis='both') >>> plt.show() References ---------- .. [1] Thomson, W.E., "Delay Networks having Maximally Flat Frequency Characteristics", Proceedings of the Institution of Electrical Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. """ return iirfilter(N, Wn, btype=btype, analog=analog, output=output, ftype='bessel_'+norm) def maxflat(): pass def yulewalk(): pass def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): """ Band Stop Objective Function for order minimization. Returns the non-integer order for an analog band stop filter. Parameters ---------- wp : scalar Edge of passband `passb`. ind : int, {0, 1} Index specifying which `passb` edge to vary (0 or 1). passb : ndarray Two element sequence of fixed passband edges. stopb : ndarray Two element sequence of fixed stopband edges. gstop : float Amount of attenuation in stopband in dB. gpass : float Amount of ripple in the passband in dB. type : {'butter', 'cheby', 'ellip'} Type of filter. Returns ------- n : scalar Filter order (possibly non-integer). """ passbC = passb.copy() passbC[ind] = wp nat = (stopb * (passbC[0] - passbC[1]) / (stopb ** 2 - passbC[0] * passbC[1])) nat = min(abs(nat)) if type == 'butter': GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) n = (log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat))) elif type == 'cheby': GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat) elif type == 'ellip': GSTOP = 10 ** (0.1 * gstop) GPASS = 10 ** (0.1 * gpass) arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) arg0 = 1.0 / nat d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) n = (d0[0] * d1[1] / (d0[1] * d1[0])) else: raise ValueError("Incorrect type: %s" % type) return n def buttord(wp, ws, gpass, gstop, analog=False): """Butterworth filter order selection. Return the order of the lowest order digital or analog Butterworth filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. Returns ------- ord : int The lowest order for a Butterworth filter which meets specs. wn : ndarray or float The Butterworth natural frequency (i.e. the "3dB frequency"). Should be used with `butter` to give filter results. See Also -------- butter : Filter design using order and critical points cheb1ord : Find order and critical points from passband and stopband spec cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design an analog bandpass filter with passband within 3 dB from 20 to 50 rad/s, while rejecting at least -40 dB below 14 and above 60 rad/s. Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> N, Wn = signal.buttord([20, 50], [14, 60], 3, 40, True) >>> b, a = signal.butter(N, Wn, 'band', True) >>> w, h = signal.freqs(b, a, np.logspace(1, 2, 500)) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Butterworth bandpass filter fit to constraints') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([1, 14, 14, 1], [-40, -40, 99, 99], '0.9', lw=0) # stop >>> plt.fill([20, 20, 50, 50], [-99, -3, -3, -99], '0.9', lw=0) # pass >>> plt.fill([60, 60, 1e9, 1e9], [99, -40, -40, 99], '0.9', lw=0) # stop >>> plt.axis([10, 100, -60, 3]) >>> plt.show() """ wp = atleast_1d(wp) ws = atleast_1d(ws) filter_type = 2 * (len(wp) - 1) filter_type += 1 if wp[0] >= ws[0]: filter_type += 1 # Pre-warp frequencies for digital filter design if not analog: passb = tan(pi * wp / 2.0) stopb = tan(pi * ws / 2.0) else: passb = wp * 1.0 stopb = ws * 1.0 if filter_type == 1: # low nat = stopb / passb elif filter_type == 2: # high nat = passb / stopb elif filter_type == 3: # stop wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, args=(0, passb, stopb, gpass, gstop, 'butter'), disp=0) passb[0] = wp0 wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], args=(1, passb, stopb, gpass, gstop, 'butter'), disp=0) passb[1] = wp1 nat = ((stopb * (passb[0] - passb[1])) / (stopb ** 2 - passb[0] * passb[1])) elif filter_type == 4: # pass nat = ((stopb ** 2 - passb[0] * passb[1]) / (stopb * (passb[0] - passb[1]))) nat = min(abs(nat)) GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) ord = int(ceil(log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)))) # Find the Butterworth natural frequency WN (or the "3dB" frequency") # to give exactly gpass at passb. try: W0 = (GPASS - 1.0) ** (-1.0 / (2.0 * ord)) except ZeroDivisionError: W0 = 1.0 print("Warning, order is zero...check input parameters.") # now convert this frequency back from lowpass prototype # to the original analog filter if filter_type == 1: # low WN = W0 * passb elif filter_type == 2: # high WN = passb / W0 elif filter_type == 3: # stop WN = numpy.zeros(2, float) discr = sqrt((passb[1] - passb[0]) ** 2 + 4 * W0 ** 2 * passb[0] * passb[1]) WN[0] = ((passb[1] - passb[0]) + discr) / (2 * W0) WN[1] = ((passb[1] - passb[0]) - discr) / (2 * W0) WN = numpy.sort(abs(WN)) elif filter_type == 4: # pass W0 = numpy.array([-W0, W0], float) WN = (-W0 * (passb[1] - passb[0]) / 2.0 + sqrt(W0 ** 2 / 4.0 * (passb[1] - passb[0]) ** 2 + passb[0] * passb[1])) WN = numpy.sort(abs(WN)) else: raise ValueError("Bad type: %s" % filter_type) if not analog: wn = (2.0 / pi) * arctan(WN) else: wn = WN if len(wn) == 1: wn = wn[0] return ord, wn def cheb1ord(wp, ws, gpass, gstop, analog=False): """Chebyshev type I filter order selection. Return the order of the lowest order digital or analog Chebyshev Type I filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. Returns ------- ord : int The lowest order for a Chebyshev type I filter that meets specs. wn : ndarray or float The Chebyshev natural frequency (the "3dB frequency") for use with `cheby1` to give filter results. See Also -------- cheby1 : Filter design using order and critical points buttord : Find order and critical points from passband and stopband spec cheb2ord, ellipord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design a digital lowpass filter such that the passband is within 3 dB up to 0.2*(fs/2), while rejecting at least -40 dB above 0.3*(fs/2). Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> N, Wn = signal.cheb1ord(0.2, 0.3, 3, 40) >>> b, a = signal.cheby1(N, 3, Wn, 'low') >>> w, h = signal.freqz(b, a) >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev I lowpass filter fit to constraints') >>> plt.xlabel('Normalized frequency') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([.01, 0.2, 0.2, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop >>> plt.fill([0.3, 0.3, 2, 2], [ 9, -40, -40, 9], '0.9', lw=0) # pass >>> plt.axis([0.08, 1, -60, 3]) >>> plt.show() """ wp = atleast_1d(wp) ws = atleast_1d(ws) filter_type = 2 * (len(wp) - 1) if wp[0] < ws[0]: filter_type += 1 else: filter_type += 2 # Pre-warp frequencies for digital filter design if not analog: passb = tan(pi * wp / 2.0) stopb = tan(pi * ws / 2.0) else: passb = wp * 1.0 stopb = ws * 1.0 if filter_type == 1: # low nat = stopb / passb elif filter_type == 2: # high nat = passb / stopb elif filter_type == 3: # stop wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, args=(0, passb, stopb, gpass, gstop, 'cheby'), disp=0) passb[0] = wp0 wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], args=(1, passb, stopb, gpass, gstop, 'cheby'), disp=0) passb[1] = wp1 nat = ((stopb * (passb[0] - passb[1])) / (stopb ** 2 - passb[0] * passb[1])) elif filter_type == 4: # pass nat = ((stopb ** 2 - passb[0] * passb[1]) / (stopb * (passb[0] - passb[1]))) nat = min(abs(nat)) GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat))) # Natural frequencies are just the passband edges if not analog: wn = (2.0 / pi) * arctan(passb) else: wn = passb if len(wn) == 1: wn = wn[0] return ord, wn def cheb2ord(wp, ws, gpass, gstop, analog=False): """Chebyshev type II filter order selection. Return the order of the lowest order digital or analog Chebyshev Type II filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. Returns ------- ord : int The lowest order for a Chebyshev type II filter that meets specs. wn : ndarray or float The Chebyshev natural frequency (the "3dB frequency") for use with `cheby2` to give filter results. See Also -------- cheby2 : Filter design using order and critical points buttord : Find order and critical points from passband and stopband spec cheb1ord, ellipord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design a digital bandstop filter which rejects -60 dB from 0.2*(fs/2) to 0.5*(fs/2), while staying within 3 dB below 0.1*(fs/2) or above 0.6*(fs/2). Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> N, Wn = signal.cheb2ord([0.1, 0.6], [0.2, 0.5], 3, 60) >>> b, a = signal.cheby2(N, 60, Wn, 'stop') >>> w, h = signal.freqz(b, a) >>> plt.semilogx(w / np.pi, 20 * np.log10(abs(h))) >>> plt.title('Chebyshev II bandstop filter fit to constraints') >>> plt.xlabel('Normalized frequency') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([.01, .1, .1, .01], [-3, -3, -99, -99], '0.9', lw=0) # stop >>> plt.fill([.2, .2, .5, .5], [ 9, -60, -60, 9], '0.9', lw=0) # pass >>> plt.fill([.6, .6, 2, 2], [-99, -3, -3, -99], '0.9', lw=0) # stop >>> plt.axis([0.06, 1, -80, 3]) >>> plt.show() """ wp = atleast_1d(wp) ws = atleast_1d(ws) filter_type = 2 * (len(wp) - 1) if wp[0] < ws[0]: filter_type += 1 else: filter_type += 2 # Pre-warp frequencies for digital filter design if not analog: passb = tan(pi * wp / 2.0) stopb = tan(pi * ws / 2.0) else: passb = wp * 1.0 stopb = ws * 1.0 if filter_type == 1: # low nat = stopb / passb elif filter_type == 2: # high nat = passb / stopb elif filter_type == 3: # stop wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, args=(0, passb, stopb, gpass, gstop, 'cheby'), disp=0) passb[0] = wp0 wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], args=(1, passb, stopb, gpass, gstop, 'cheby'), disp=0) passb[1] = wp1 nat = ((stopb * (passb[0] - passb[1])) / (stopb ** 2 - passb[0] * passb[1])) elif filter_type == 4: # pass nat = ((stopb ** 2 - passb[0] * passb[1]) / (stopb * (passb[0] - passb[1]))) nat = min(abs(nat)) GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) ord = int(ceil(arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat))) # Find frequency where analog response is -gpass dB. # Then convert back from low-pass prototype to the original filter. new_freq = cosh(1.0 / ord * arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0)))) new_freq = 1.0 / new_freq if filter_type == 1: nat = passb / new_freq elif filter_type == 2: nat = passb * new_freq elif filter_type == 3: nat = numpy.zeros(2, float) nat[0] = (new_freq / 2.0 * (passb[0] - passb[1]) + sqrt(new_freq ** 2 * (passb[1] - passb[0]) ** 2 / 4.0 + passb[1] * passb[0])) nat[1] = passb[1] * passb[0] / nat[0] elif filter_type == 4: nat = numpy.zeros(2, float) nat[0] = (1.0 / (2.0 * new_freq) * (passb[0] - passb[1]) + sqrt((passb[1] - passb[0]) ** 2 / (4.0 * new_freq ** 2) + passb[1] * passb[0])) nat[1] = passb[0] * passb[1] / nat[0] if not analog: wn = (2.0 / pi) * arctan(nat) else: wn = nat if len(wn) == 1: wn = wn[0] return ord, wn def ellipord(wp, ws, gpass, gstop, analog=False): """Elliptic (Cauer) filter order selection. Return the order of the lowest order digital or analog elliptic filter that loses no more than `gpass` dB in the passband and has at least `gstop` dB attenuation in the stopband. Parameters ---------- wp, ws : float Passband and stopband edge frequencies. For digital filters, these are normalized from 0 to 1, where 1 is the Nyquist frequency, pi radians/sample. (`wp` and `ws` are thus in half-cycles / sample.) For example: - Lowpass: wp = 0.2, ws = 0.3 - Highpass: wp = 0.3, ws = 0.2 - Bandpass: wp = [0.2, 0.5], ws = [0.1, 0.6] - Bandstop: wp = [0.1, 0.6], ws = [0.2, 0.5] For analog filters, `wp` and `ws` are angular frequencies (e.g. rad/s). gpass : float The maximum loss in the passband (dB). gstop : float The minimum attenuation in the stopband (dB). analog : bool, optional When True, return an analog filter, otherwise a digital filter is returned. Returns ------- ord : int The lowest order for an Elliptic (Cauer) filter that meets specs. wn : ndarray or float The Chebyshev natural frequency (the "3dB frequency") for use with `ellip` to give filter results. See Also -------- ellip : Filter design using order and critical points buttord : Find order and critical points from passband and stopband spec cheb1ord, cheb2ord iirfilter : General filter design using order and critical frequencies iirdesign : General filter design using passband and stopband spec Examples -------- Design an analog highpass filter such that the passband is within 3 dB above 30 rad/s, while rejecting -60 dB at 10 rad/s. Plot its frequency response, showing the passband and stopband constraints in gray. >>> from scipy import signal >>> import matplotlib.pyplot as plt >>> N, Wn = signal.ellipord(30, 10, 3, 60, True) >>> b, a = signal.ellip(N, 3, 60, Wn, 'high', True) >>> w, h = signal.freqs(b, a, np.logspace(0, 3, 500)) >>> plt.semilogx(w, 20 * np.log10(abs(h))) >>> plt.title('Elliptical highpass filter fit to constraints') >>> plt.xlabel('Frequency [radians / second]') >>> plt.ylabel('Amplitude [dB]') >>> plt.grid(which='both', axis='both') >>> plt.fill([.1, 10, 10, .1], [1e4, 1e4, -60, -60], '0.9', lw=0) # stop >>> plt.fill([30, 30, 1e9, 1e9], [-99, -3, -3, -99], '0.9', lw=0) # pass >>> plt.axis([1, 300, -80, 3]) >>> plt.show() """ wp = atleast_1d(wp) ws = atleast_1d(ws) filter_type = 2 * (len(wp) - 1) filter_type += 1 if wp[0] >= ws[0]: filter_type += 1 # Pre-warp frequencies for digital filter design if not analog: passb = tan(pi * wp / 2.0) stopb = tan(pi * ws / 2.0) else: passb = wp * 1.0 stopb = ws * 1.0 if filter_type == 1: # low nat = stopb / passb elif filter_type == 2: # high nat = passb / stopb elif filter_type == 3: # stop wp0 = optimize.fminbound(band_stop_obj, passb[0], stopb[0] - 1e-12, args=(0, passb, stopb, gpass, gstop, 'ellip'), disp=0) passb[0] = wp0 wp1 = optimize.fminbound(band_stop_obj, stopb[1] + 1e-12, passb[1], args=(1, passb, stopb, gpass, gstop, 'ellip'), disp=0) passb[1] = wp1 nat = ((stopb * (passb[0] - passb[1])) / (stopb ** 2 - passb[0] * passb[1])) elif filter_type == 4: # pass nat = ((stopb ** 2 - passb[0] * passb[1]) / (stopb * (passb[0] - passb[1]))) nat = min(abs(nat)) GSTOP = 10 ** (0.1 * gstop) GPASS = 10 ** (0.1 * gpass) arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) arg0 = 1.0 / nat d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) ord = int(ceil(d0[0] * d1[1] / (d0[1] * d1[0]))) if not analog: wn = arctan(passb) * 2.0 / pi else: wn = passb if len(wn) == 1: wn = wn[0] return ord, wn def buttap(N): """Return (z,p,k) for analog prototype of Nth-order Butterworth filter. The filter will have an angular (e.g. rad/s) cutoff frequency of 1. See Also -------- butter : Filter design function using this prototype """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") z = numpy.array([]) m = numpy.arange(-N+1, N, 2) # Middle value is 0 to ensure an exactly real pole p = -numpy.exp(1j * pi * m / (2 * N)) k = 1 return z, p, k def cheb1ap(N, rp): """ Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. The returned filter prototype has `rp` decibels of ripple in the passband. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- cheby1 : Filter design function using this prototype """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") elif N == 0: # Avoid divide-by-zero error # Even order filters have DC gain of -rp dB return numpy.array([]), numpy.array([]), 10**(-rp/20) z = numpy.array([]) # Ripple factor (epsilon) eps = numpy.sqrt(10 ** (0.1 * rp) - 1.0) mu = 1.0 / N * arcsinh(1 / eps) # Arrange poles in an ellipse on the left half of the S-plane m = numpy.arange(-N+1, N, 2) theta = pi * m / (2*N) p = -sinh(mu + 1j*theta) k = numpy.prod(-p, axis=0).real if N % 2 == 0: k = k / sqrt((1 + eps * eps)) return z, p, k def cheb2ap(N, rs): """ Return (z,p,k) for Nth-order Chebyshev type I analog lowpass filter. The returned filter prototype has `rs` decibels of ripple in the stopband. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first reaches ``-rs``. See Also -------- cheby2 : Filter design function using this prototype """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") elif N == 0: # Avoid divide-by-zero warning return numpy.array([]), numpy.array([]), 1 # Ripple factor (epsilon) de = 1.0 / sqrt(10 ** (0.1 * rs) - 1) mu = arcsinh(1.0 / de) / N if N % 2: m = numpy.concatenate((numpy.arange(-N+1, 0, 2), numpy.arange(2, N, 2))) else: m = numpy.arange(-N+1, N, 2) z = -conjugate(1j / sin(m * pi / (2.0 * N))) # Poles around the unit circle like Butterworth p = -exp(1j * pi * numpy.arange(-N+1, N, 2) / (2 * N)) # Warp into Chebyshev II p = sinh(mu) * p.real + 1j * cosh(mu) * p.imag p = 1.0 / p k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real return z, p, k EPSILON = 2e-16 def _vratio(u, ineps, mp): [s, c, d, phi] = special.ellipj(u, mp) ret = abs(ineps - s / c) return ret def _kratio(m, k_ratio): m = float(m) if m < 0: m = 0.0 if m > 1: m = 1.0 if abs(m) > EPSILON and (abs(m) + EPSILON) < 1: k = special.ellipk([m, 1 - m]) r = k[0] / k[1] - k_ratio elif abs(m) > EPSILON: r = -k_ratio else: r = 1e20 return abs(r) def ellipap(N, rp, rs): """Return (z,p,k) of Nth-order elliptic analog lowpass filter. The filter is a normalized prototype that has `rp` decibels of ripple in the passband and a stopband `rs` decibels down. The filter's angular (e.g. rad/s) cutoff frequency is normalized to 1, defined as the point at which the gain first drops below ``-rp``. See Also -------- ellip : Filter design function using this prototype References ---------- .. [1] Lutova, Tosic, and Evans, "Filter Design for Signal Processing", Chapters 5 and 12. """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") elif N == 0: # Avoid divide-by-zero warning # Even order filters have DC gain of -rp dB return numpy.array([]), numpy.array([]), 10**(-rp/20) elif N == 1: p = -sqrt(1.0 / (10 ** (0.1 * rp) - 1.0)) k = -p z = [] return asarray(z), asarray(p), k eps = numpy.sqrt(10 ** (0.1 * rp) - 1) ck1 = eps / numpy.sqrt(10 ** (0.1 * rs) - 1) ck1p = numpy.sqrt(1 - ck1 * ck1) if ck1p == 1: raise ValueError("Cannot design a filter with given rp and rs" " specifications.") val = special.ellipk([ck1 * ck1, ck1p * ck1p]) if abs(1 - ck1p * ck1p) < EPSILON: krat = 0 else: krat = N * val[0] / val[1] m = optimize.fmin(_kratio, [0.5], args=(krat,), maxfun=250, maxiter=250, disp=0) if m < 0 or m > 1: m = optimize.fminbound(_kratio, 0, 1, args=(krat,), maxfun=250, maxiter=250, disp=0) capk = special.ellipk(m) j = numpy.arange(1 - N % 2, N, 2) jj = len(j) [s, c, d, phi] = special.ellipj(j * capk / N, m * numpy.ones(jj)) snew = numpy.compress(abs(s) > EPSILON, s, axis=-1) z = 1.0 / (sqrt(m) * snew) z = 1j * z z = numpy.concatenate((z, conjugate(z))) r = optimize.fmin(_vratio, special.ellipk(m), args=(1. / eps, ck1p * ck1p), maxfun=250, maxiter=250, disp=0) v0 = capk * r / (N * val[0]) [sv, cv, dv, phi] = special.ellipj(v0, 1 - m) p = -(c * d * sv * cv + 1j * s * dv) / (1 - (d * sv) ** 2.0) if N % 2: newp = numpy.compress(abs(p.imag) > EPSILON * numpy.sqrt(numpy.sum(p * numpy.conjugate(p), axis=0).real), p, axis=-1) p = numpy.concatenate((p, conjugate(newp))) else: p = numpy.concatenate((p, conjugate(p))) k = (numpy.prod(-p, axis=0) / numpy.prod(-z, axis=0)).real if N % 2 == 0: k = k / numpy.sqrt((1 + eps * eps)) return z, p, k # TODO: Make this a real public function scipy.misc.ff def _falling_factorial(x, n): r""" Return the factorial of `x` to the `n` falling. This is defined as: .. math:: x^\underline n = (x)_n = x (x-1) \cdots (x-n+1) This can more efficiently calculate ratios of factorials, since: n!/m! == falling_factorial(n, n-m) where n >= m skipping the factors that cancel out the usual factorial n! == ff(n, n) """ val = 1 for k in range(x - n + 1, x + 1): val *= k return val def _bessel_poly(n, reverse=False): """ Return the coefficients of Bessel polynomial of degree `n` If `reverse` is true, a reverse Bessel polynomial is output. Output is a list of coefficients: [1] = 1 [1, 1] = 1*s + 1 [1, 3, 3] = 1*s^2 + 3*s + 3 [1, 6, 15, 15] = 1*s^3 + 6*s^2 + 15*s + 15 [1, 10, 45, 105, 105] = 1*s^4 + 10*s^3 + 45*s^2 + 105*s + 105 etc. Output is a Python list of arbitrary precision long ints, so n is only limited by your hardware's memory. Sequence is http://oeis.org/A001498 , and output can be confirmed to match http://oeis.org/A001498/b001498.txt : >>> i = 0 >>> for n in range(51): ... for x in _bessel_poly(n, reverse=True): ... print(i, x) ... i += 1 """ if abs(int(n)) != n: raise ValueError("Polynomial order must be a nonnegative integer") else: n = int(n) # np.int32 doesn't work, for instance out = [] for k in range(n + 1): num = _falling_factorial(2*n - k, n) den = 2**(n - k) * factorial(k, exact=True) out.append(num // den) if reverse: return out[::-1] else: return out def _campos_zeros(n): """ Return approximate zero locations of Bessel polynomials y_n(x) for order `n` using polynomial fit (Campos-Calderon 2011) """ if n == 1: return asarray([-1+0j]) s = npp_polyval(n, [0, 0, 2, 0, -3, 1]) b3 = npp_polyval(n, [16, -8]) / s b2 = npp_polyval(n, [-24, -12, 12]) / s b1 = npp_polyval(n, [8, 24, -12, -2]) / s b0 = npp_polyval(n, [0, -6, 0, 5, -1]) / s r = npp_polyval(n, [0, 0, 2, 1]) a1 = npp_polyval(n, [-6, -6]) / r a2 = 6 / r k = np.arange(1, n+1) x = npp_polyval(k, [0, a1, a2]) y = npp_polyval(k, [b0, b1, b2, b3]) return x + 1j*y def _aberth(f, fp, x0, tol=1e-15, maxiter=50): """ Given a function `f`, its first derivative `fp`, and a set of initial guesses `x0`, simultaneously find the roots of the polynomial using the Aberth-Ehrlich method. ``len(x0)`` should equal the number of roots of `f`. (This is not a complete implementation of Bini's algorithm.) """ N = len(x0) x = array(x0, complex) beta = np.empty_like(x0) for iteration in range(maxiter): alpha = -f(x) / fp(x) # Newton's method # Model "repulsion" between zeros for k in range(N): beta[k] = np.sum(1/(x[k] - x[k+1:])) beta[k] += np.sum(1/(x[k] - x[:k])) x += alpha / (1 + alpha * beta) if not all(np.isfinite(x)): raise RuntimeError('Root-finding calculation failed') # Mekwi: The iterative process can be stopped when |hn| has become # less than the largest error one is willing to permit in the root. if all(abs(alpha) <= tol): break else: raise Exception('Zeros failed to converge') return x def _bessel_zeros(N): """ Find zeros of ordinary Bessel polynomial of order `N`, by root-finding of modified Bessel function of the second kind """ if N == 0: return asarray([]) # Generate starting points x0 = _campos_zeros(N) # Zeros are the same for exp(1/x)*K_{N+0.5}(1/x) and Nth-order ordinary # Bessel polynomial y_N(x) def f(x): return special.kve(N+0.5, 1/x) # First derivative of above def fp(x): return (special.kve(N-0.5, 1/x)/(2*x**2) - special.kve(N+0.5, 1/x)/(x**2) + special.kve(N+1.5, 1/x)/(2*x**2)) # Starting points converge to true zeros x = _aberth(f, fp, x0) # Improve precision using Newton's method on each for i in range(len(x)): x[i] = optimize.newton(f, x[i], fp, tol=1e-15) # Average complex conjugates to make them exactly symmetrical x = np.mean((x, x[::-1].conj()), 0) # Zeros should sum to -1 if abs(np.sum(x) + 1) > 1e-15: raise RuntimeError('Generated zeros are inaccurate') return x def _norm_factor(p, k): """ Numerically find frequency shift to apply to delay-normalized filter such that -3 dB point is at 1 rad/sec. `p` is an array_like of polynomial poles `k` is a float gain First 10 values are listed in "Bessel Scale Factors" table, "Bessel Filters Polynomials, Poles and Circuit Elements 2003, C. Bond." """ p = asarray(p, dtype=complex) def G(w): """ Gain of filter """ return abs(k / prod(1j*w - p)) def cutoff(w): """ When gain = -3 dB, return 0 """ return G(w) - 1/np.sqrt(2) return optimize.newton(cutoff, 1.5) def besselap(N, norm='phase'): """ Return (z,p,k) for analog prototype of an Nth-order Bessel filter. Parameters ---------- N : int The order of the filter. norm : {'phase', 'delay', 'mag'}, optional Frequency normalization: ``phase`` The filter is normalized such that the phase response reaches its midpoint at an angular (e.g. rad/s) cutoff frequency of 1. This happens for both low-pass and high-pass filters, so this is the "phase-matched" case. [6]_ The magnitude response asymptotes are the same as a Butterworth filter of the same order with a cutoff of `Wn`. This is the default, and matches MATLAB's implementation. ``delay`` The filter is normalized such that the group delay in the passband is 1 (e.g. 1 second). This is the "natural" type obtained by solving Bessel polynomials ``mag`` The filter is normalized such that the gain magnitude is -3 dB at angular frequency 1. This is called "frequency normalization" by Bond. [1]_ .. versionadded:: 0.18.0 Returns ------- z : ndarray Zeros of the transfer function. Is always an empty array. p : ndarray Poles of the transfer function. k : scalar Gain of the transfer function. For phase-normalized, this is always 1. See Also -------- bessel : Filter design function using this prototype Notes ----- To find the pole locations, approximate starting points are generated [2]_ for the zeros of the ordinary Bessel polynomial [3]_, then the Aberth-Ehrlich method [4]_ [5]_ is used on the Kv(x) Bessel function to calculate more accurate zeros, and these locations are then inverted about the unit circle. References ---------- .. [1] C.R. Bond, "Bessel Filter Constants", http://www.crbond.com/papers/bsf.pdf .. [2] Campos and Calderon, "Approximate closed-form formulas for the zeros of the Bessel Polynomials", :arXiv:`1105.0957`. .. [3] Thomson, W.E., "Delay Networks having Maximally Flat Frequency Characteristics", Proceedings of the Institution of Electrical Engineers, Part III, November 1949, Vol. 96, No. 44, pp. 487-490. .. [4] Aberth, "Iteration Methods for Finding all Zeros of a Polynomial Simultaneously", Mathematics of Computation, Vol. 27, No. 122, April 1973 .. [5] Ehrlich, "A modified Newton method for polynomials", Communications of the ACM, Vol. 10, Issue 2, pp. 107-108, Feb. 1967, :DOI:`10.1145/363067.363115` .. [6] Miller and Bohn, "A Bessel Filter Crossover, and Its Relation to Others", RaneNote 147, 1998, http://www.rane.com/note147.html """ if abs(int(N)) != N: raise ValueError("Filter order must be a nonnegative integer") if N == 0: p = [] k = 1 else: # Find roots of reverse Bessel polynomial p = 1/_bessel_zeros(N) a_last = _falling_factorial(2*N, N) // 2**N # Shift them to a different normalization if required if norm in ('delay', 'mag'): # Normalized for group delay of 1 k = a_last if norm == 'mag': # -3 dB magnitude point is at 1 rad/sec norm_factor = _norm_factor(p, k) p /= norm_factor k = norm_factor**-N * a_last elif norm == 'phase': # Phase-matched (1/2 max phase shift at 1 rad/sec) # Asymptotes are same as Butterworth filter p *= 10**(-math.log10(a_last)/N) k = 1 else: raise ValueError('normalization not understood') return asarray([]), asarray(p, dtype=complex), float(k) def iirnotch(w0, Q): """ Design second-order IIR notch digital filter. A notch filter is a band-stop filter with a narrow bandwidth (high quality factor). It rejects a narrow frequency band and leaves the rest of the spectrum little changed. Parameters ---------- w0 : float Normalized frequency to remove from a signal. It is a scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the sampling frequency. Q : float Quality factor. Dimensionless parameter that characterizes notch filter -3 dB bandwidth ``bw`` relative to its center frequency, ``Q = w0/bw``. Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the IIR filter. See Also -------- iirpeak Notes ----- .. versionadded: 0.19.0 References ---------- .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", Prentice-Hall, 1996 Examples -------- Design and plot filter to remove the 60Hz component from a signal sampled at 200Hz, using a quality factor Q = 30 >>> from scipy import signal >>> import numpy as np >>> import matplotlib.pyplot as plt >>> fs = 200.0 # Sample frequency (Hz) >>> f0 = 60.0 # Frequency to be removed from signal (Hz) >>> Q = 30.0 # Quality factor >>> w0 = f0/(fs/2) # Normalized Frequency >>> # Design notch filter >>> b, a = signal.iirnotch(w0, Q) >>> # Frequency response >>> w, h = signal.freqz(b, a) >>> # Generate frequency axis >>> freq = w*fs/(2*np.pi) >>> # Plot >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue') >>> ax[0].set_title("Frequency Response") >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') >>> ax[0].set_xlim([0, 100]) >>> ax[0].set_ylim([-25, 10]) >>> ax[0].grid() >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') >>> ax[1].set_ylabel("Angle (degrees)", color='green') >>> ax[1].set_xlabel("Frequency (Hz)") >>> ax[1].set_xlim([0, 100]) >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) >>> ax[1].set_ylim([-90, 90]) >>> ax[1].grid() >>> plt.show() """ return _design_notch_peak_filter(w0, Q, "notch") def iirpeak(w0, Q): """ Design second-order IIR peak (resonant) digital filter. A peak filter is a band-pass filter with a narrow bandwidth (high quality factor). It rejects components outside a narrow frequency band. Parameters ---------- w0 : float Normalized frequency to be retained in a signal. It is a scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the sampling frequency. Q : float Quality factor. Dimensionless parameter that characterizes peak filter -3 dB bandwidth ``bw`` relative to its center frequency, ``Q = w0/bw``. Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the IIR filter. See Also -------- iirnotch Notes ----- .. versionadded: 0.19.0 References ---------- .. [1] Sophocles J. Orfanidis, "Introduction To Signal Processing", Prentice-Hall, 1996 Examples -------- Design and plot filter to remove the frequencies other than the 300Hz component from a signal sampled at 1000Hz, using a quality factor Q = 30 >>> from scipy import signal >>> import numpy as np >>> import matplotlib.pyplot as plt >>> fs = 1000.0 # Sample frequency (Hz) >>> f0 = 300.0 # Frequency to be retained (Hz) >>> Q = 30.0 # Quality factor >>> w0 = f0/(fs/2) # Normalized Frequency >>> # Design peak filter >>> b, a = signal.iirpeak(w0, Q) >>> # Frequency response >>> w, h = signal.freqz(b, a) >>> # Generate frequency axis >>> freq = w*fs/(2*np.pi) >>> # Plot >>> fig, ax = plt.subplots(2, 1, figsize=(8, 6)) >>> ax[0].plot(freq, 20*np.log10(abs(h)), color='blue') >>> ax[0].set_title("Frequency Response") >>> ax[0].set_ylabel("Amplitude (dB)", color='blue') >>> ax[0].set_xlim([0, 500]) >>> ax[0].set_ylim([-50, 10]) >>> ax[0].grid() >>> ax[1].plot(freq, np.unwrap(np.angle(h))*180/np.pi, color='green') >>> ax[1].set_ylabel("Angle (degrees)", color='green') >>> ax[1].set_xlabel("Frequency (Hz)") >>> ax[1].set_xlim([0, 500]) >>> ax[1].set_yticks([-90, -60, -30, 0, 30, 60, 90]) >>> ax[1].set_ylim([-90, 90]) >>> ax[1].grid() >>> plt.show() """ return _design_notch_peak_filter(w0, Q, "peak") def _design_notch_peak_filter(w0, Q, ftype): """ Design notch or peak digital filter. Parameters ---------- w0 : float Normalized frequency to remove from a signal. It is a scalar that must satisfy ``0 < w0 < 1``, with ``w0 = 1`` corresponding to half of the sampling frequency. Q : float Quality factor. Dimensionless parameter that characterizes notch filter -3 dB bandwidth ``bw`` relative to its center frequency, ``Q = w0/bw``. ftype : str The type of IIR filter to design: - notch filter : ``notch`` - peak filter : ``peak`` Returns ------- b, a : ndarray, ndarray Numerator (``b``) and denominator (``a``) polynomials of the IIR filter. """ # Guarantee that the inputs are floats w0 = float(w0) Q = float(Q) # Checks if w0 is within the range if w0 > 1.0 or w0 < 0.0: raise ValueError("w0 should be such that 0 < w0 < 1") # Get bandwidth bw = w0/Q # Normalize inputs bw = bw*np.pi w0 = w0*np.pi # Compute -3dB atenuation gb = 1/np.sqrt(2) if ftype == "notch": # Compute beta: formula 11.3.4 (p.575) from reference [1] beta = (np.sqrt(1.0-gb**2.0)/gb)*np.tan(bw/2.0) elif ftype == "peak": # Compute beta: formula 11.3.19 (p.579) from reference [1] beta = (gb/np.sqrt(1.0-gb**2.0))*np.tan(bw/2.0) else: raise ValueError("Unknown ftype.") # Compute gain: formula 11.3.6 (p.575) from reference [1] gain = 1.0/(1.0+beta) # Compute numerator b and denominator a # formulas 11.3.7 (p.575) and 11.3.21 (p.579) # from reference [1] if ftype == "notch": b = gain*np.array([1.0, -2.0*np.cos(w0), 1.0]) else: b = (1.0-gain)*np.array([1.0, 0.0, -1.0]) a = np.array([1.0, -2.0*gain*np.cos(w0), (2.0*gain-1.0)]) return b, a filter_dict = {'butter': [buttap, buttord], 'butterworth': [buttap, buttord], 'cauer': [ellipap, ellipord], 'elliptic': [ellipap, ellipord], 'ellip': [ellipap, ellipord], 'bessel': [besselap], 'bessel_phase': [besselap], 'bessel_delay': [besselap], 'bessel_mag': [besselap], 'cheby1': [cheb1ap, cheb1ord], 'chebyshev1': [cheb1ap, cheb1ord], 'chebyshevi': [cheb1ap, cheb1ord], 'cheby2': [cheb2ap, cheb2ord], 'chebyshev2': [cheb2ap, cheb2ord], 'chebyshevii': [cheb2ap, cheb2ord], } band_dict = {'band': 'bandpass', 'bandpass': 'bandpass', 'pass': 'bandpass', 'bp': 'bandpass', 'bs': 'bandstop', 'bandstop': 'bandstop', 'bands': 'bandstop', 'stop': 'bandstop', 'l': 'lowpass', 'low': 'lowpass', 'lowpass': 'lowpass', 'lp': 'lowpass', 'high': 'highpass', 'highpass': 'highpass', 'h': 'highpass', 'hp': 'highpass', } bessel_norms = {'bessel': 'phase', 'bessel_phase': 'phase', 'bessel_delay': 'delay', 'bessel_mag': 'mag'}
bsd-3-clause
shenzebang/scikit-learn
examples/exercises/plot_cv_digits.py
232
1206
""" ============================================= Cross-validation on Digits Dataset Exercise ============================================= A tutorial exercise using Cross-validation with an SVM on the Digits dataset. This exercise is used in the :ref:`cv_generators_tut` part of the :ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`. """ print(__doc__) import numpy as np from sklearn import cross_validation, datasets, svm digits = datasets.load_digits() X = digits.data y = digits.target svc = svm.SVC(kernel='linear') C_s = np.logspace(-10, 0, 10) scores = list() scores_std = list() for C in C_s: svc.C = C this_scores = cross_validation.cross_val_score(svc, X, y, n_jobs=1) scores.append(np.mean(this_scores)) scores_std.append(np.std(this_scores)) # Do the plotting import matplotlib.pyplot as plt plt.figure(1, figsize=(4, 3)) plt.clf() plt.semilogx(C_s, scores) plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--') plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--') locs, labels = plt.yticks() plt.yticks(locs, list(map(lambda x: "%g" % x, locs))) plt.ylabel('CV score') plt.xlabel('Parameter C') plt.ylim(0, 1.1) plt.show()
bsd-3-clause
andreasvc/disco-dop
web/browse.py
1
12449
"""Web interface to browse a corpus with various visualizations.""" # stdlib import os import re import sys import glob import math import logging from collections import OrderedDict from functools import wraps import matplotlib matplotlib.use('AGG') import matplotlib.cm as cm import pandas # Flask & co from flask import Flask, Response from flask import request, render_template # disco-dop from discodop import treebank, treebanktransforms from discodop.tree import DrawTree DEBUG = False # when True: enable debugging interface, disable multiprocessing PASSWD = None # optionally, dict with user=>pass strings HEADRULES = '../alpino.headrules' logging.basicConfig( format='%(asctime)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=logging.DEBUG) APP = Flask(__name__) log = APP.logger STANDALONE = __name__ == '__main__' CORPUS_DIR = "corpus/" COLORS = dict(enumerate(''' Black Red Green Orange Blue Turquoise SlateGray Peru Teal Aqua Aquamarine BlanchedAlmond Brown Burlywood CadetBlue Chartreuse Chocolate Coral Crimson Cyan Firebrick ForestGreen Fuchsia Gainsboro Gold Goldenrod Gray GreenYellow HotPink IndianRed Indigo Khaki Lime YellowGreen Magenta Maroon Yellow MidnightBlue Moccasin NavyBlue Olive OliveDrab Orchid PapayaWhip Pink Plum PowderBlue Purple RebeccaPurple RoyalBlue SaddleBrown Salmon SandyBrown SeaGreen Sienna Silver SkyBlue SlateBlue Tan Thistle Tomato Violet Wheat'''.split())) WORDLIST = pandas.read_table('sonar-word.freqsort.lower.gz', encoding='utf8', index_col=0, header=None, names=['word', 'count'], nrows=20000).index def getdeplen(item): """Compute dependency length.""" tree = item.tree.copy(True) deps = treebank.dependencies(tree) a, b = treebank.deplen(deps) return ([abs(x - y) > 7 for x, _, y in deps], a / b if b else 0) # cannot highlight due to removing punct # return (None, a / b if b else 0) def getmodifiers(item): """Count and highlight REL/PP-modifiers.""" nodes = list(item.tree.subtrees(lambda n: n.label in ('REL', 'PP') and treebanktransforms.function(n) == 'mod')) return toboolvec(len(item.sent), {a for x in nodes for a in x.leaves()}), len(nodes) def toboolvec(length, indices): """Convert a list of indices into a list of booleans.""" return [n in indices for n in range(length)] # Functions that accept item object with item.tree and item.sent members; # return tuple (wordhighlights, sentweight). FILTERS = { 'average dependency length': getdeplen, 'd-level': lambda i: (None, treebanktransforms.dlevel(i.tree)), 'rare words': lambda i: (list(~pandas.Index( t.lower() for t in i.sent ).isin(WORDLIST) & pandas.Series([ # filter names 'eigen' not in n.source[treebank.MORPH] for n in sorted(i.tree.subtrees(lambda n: isinstance(n[0], int)), key=lambda n: n[0])]) ), None), 'PP/REL modifiers': getmodifiers, 'punctuation': lambda i: (None, max('.,\'"?!(:;'.find(t) + 1 for t in i.sent)), 'direct speech': lambda i: (None, re.match(r"^- .*$|(?:^|.* )['\"](?: .*|$)", ' '.join(i.sent)) is not None), } def torgb(val, mappable): """Return hexadecimal HTML color string.""" return '#%02x%02x%02x' % mappable.to_rgba(val, bytes=True)[:3] def charvalues(sent, values): """Project token values to character values. >>> sorted(charvalues(['The', 'cat', 'is', 'on', 'the', 'mat'], ... [0, 0, 1, 1, 0, 1])) [0, 1, 2, 3, 8, 9, 10, 14, 15, 16, 17] """ assert len(sent) == len(values) result = [] for a, b in zip(sent, values): result.extend([b] * (len(a) + 1)) return result # http://flask.pocoo.org/snippets/8/ def check_auth(username, password): """This function is called to check if a username / password combination is valid.""" return PASSWD is None or (username in PASSWD and password == PASSWD[username]) def authenticate(): """Sends a 401 response that enables basic auth.""" return Response( 'Could not verify your access level for that URL.\n' 'You have to login with proper credentials', 401, {'WWW-Authenticate': 'Basic realm="Login Required"'}) def requires_auth(f): """Decorator to require basic authentication for route.""" @wraps(f) def decorated(*args, **kwargs): """This docstring intentionally left blank.""" auth = request.authorization if not auth or not check_auth(auth.username, auth.password): return authenticate() return f(*args, **kwargs) return decorated # end snipppet def applyhighlight(sent, high1, high2, colorvec=None): """Return a version of sent where given char. indices are highlighted.""" cur = None start = 0 out = [] for n, _ in enumerate(sent): if colorvec is not None: if cur != COLORS.get(colorvec[n], 'gray'): out.append(sent[start:n]) if cur is not None: out.append('</font>') out.append('<font color=%s>' % COLORS.get(colorvec[n], 'gray')) start = n cur = COLORS.get(colorvec[n], 'gray') elif n in high1: if cur != 'red': out.append(sent[start:n]) if cur is not None: out.append('</span>') out.append('<span class=r>') start = n cur = 'red' elif n in high2: if cur != 'blue': out.append(sent[start:n]) if cur is not None: out.append('</span>') out.append('<span class=b>') start = n cur = 'blue' else: if cur is not None: out.append(sent[start:n]) out.append('</span>') start = n cur = None out.append(sent[start:]) if cur is not None: out.append('</font>') return ''.join(out) def addsentweight(x): wordhighlights, sentweight = x if sentweight is None: return wordhighlights, sum(wordhighlights) return x @APP.route('/browse') @requires_auth def browsetrees(): """Browse through trees in a file.""" chunk = 20 # number of trees to fetch for one request if 'text' in request.args and 'sent' in request.args: textno = int(request.args['text']) sentno = int(request.args['sent']) start = max(1, sentno - sentno % chunk) stop = start + chunk nofunc = 'nofunc' in request.args nomorph = 'nomorph' in request.args filename = os.path.join(CORPUS_DIR, TEXTS[textno] + '.export') trees = CORPORA[filename].itertrees(start, stop) results = ['<pre id="t%s"%s>%s\n%s</pre>' % (n, ' style="display: none; "' if 'ajax' in request.args else '', ', '.join('%s: %.3g' % (f, addsentweight(FILTERS[f](item))[1]) for f in sorted(FILTERS)), DrawTree(item.tree, item.sent).text( unicodelines=True, html=True)) for n, (_key, item) in enumerate(trees, start)] if 'ajax' in request.args: return '\n'.join(results) prevlink = '<a id=prev>prev</a>' if sentno > chunk: prevlink = '<a href="browse?text=%d;sent=%d" id=prev>prev</a>' % ( textno, sentno - chunk + 1) nextlink = '<a id=next>next</a>' nextlink = '<a href="browse?text=%d;sent=%d" id=next>next</a>' % ( textno, sentno + chunk + 1) return render_template('browse.html', textno=textno, sentno=sentno, text=TEXTS[textno], totalsents=1000, trees=results, prevlink=prevlink, nextlink=nextlink, chunk=chunk, nofunc=nofunc, nomorph=nomorph, mintree=start, maxtree=stop) return '<h1>Browse through trees</h1>\n<ol>\n%s</ol>\n' % '\n'.join( '<li><a href="browse?text=%d;sent=1;nomorph">%s</a> ' % (n, text) for n, text in enumerate(TEXTS)) @APP.route('/') @APP.route('/browsesents') @requires_auth def browsesents(): """Browse through sentences in a file; highlight selectable features.""" chunk = 20 # number of sentences per page if 'text' in request.args and 'sent' in request.args: textno = int(request.args['text']) sentno = int(request.args['sent']) sentno = max(chunk // 2 + 1, sentno) start = max(1, sentno - chunk // 2) stop = start + chunk filename = os.path.join(CORPUS_DIR, TEXTS[textno] + '.export') feat = request.args.get('feat', next(iter(FILTERS))) trees = list(CORPORA[filename].itertrees(start, stop)) results = [] values = [addsentweight(FILTERS[feat](item)) for n, (_key, item) in enumerate(trees, start)] norm = matplotlib.colors.Normalize( vmin=0, vmax=max(a for _, a in values) * 2) mappable = cm.ScalarMappable(norm, 'YlOrBr') for n, ((_key, item), (wordhighlights, sentweight)) in enumerate( zip(trees, values), start): if sentweight is None: sentweight = sum(wordhighlights) if wordhighlights is not None: xsent = applyhighlight( ' '.join(item.sent), None, None, colorvec=charvalues(item.sent, wordhighlights)) else: xsent = ' '.join(item.sent) results.append( '<a href="browse?text=%d;sent=%d" ' 'style="text-decoration: none; color: black;">' '<span style="background: %s; " title="%s: %.3g">' ' %s </span></a>' % (textno, n, torgb(sentweight, mappable), feat, sentweight, xsent)) legend = 'Feature: [ %s ]<br>' % ', '.join(f if f == feat else ('<a href="browsesents?text=%d;sent=%d;feat=%s">' '%s</a>' % (textno, sentno, f, f)) for f in sorted(FILTERS)) legend += 'Legend: ' + ''.join( '<span style="background-color: %s; width: 30px; ' 'display: inline-block; text-align: center; ">' '%d</span>' % (torgb(n, mappable), n) for n in range(0, int(math.ceil(max(a for _, a in values))) + 1)) prevlink = '<a id=prev>prev</a>' if sentno > chunk: prevlink = ( '<a href="browsesents?text=%d;sent=%d;feat=%s" id=prev>' 'prev</a>' % (textno, sentno - chunk, feat)) nextlink = '<a id=next>next</a>' nextlink = ('<a href="browsesents?text=%d;sent=%d;feat=%s" id=next>' 'next</a>' % (textno, sentno + chunk, feat)) return render_template('browsesents.html', textno=textno, sentno=sentno, text=TEXTS[textno], totalsents='??', # FIXME sents=results, prevlink=prevlink, nextlink=nextlink, chunk=chunk, mintree=start, legend=legend, query=request.args.get('query', ''), engine='') return render_template('browsemain.html', texts=TEXTS) def querydict(queries): """Return an OrderedDict of names and queries. name is abbreviated query if not given.""" result = OrderedDict() for line in (x for x in queries.splitlines() if x.strip()): if ':' in line and line[:line.index(':')].isalnum(): name, query = line.split(':', 1) else: name = line[:100] + ('' if len(line) < 100 else '...') query = line if '\t' in query: normquery, query = query.split('\t') else: normquery = None result[name] = normquery, query return result def getcorpus(): """Get list of files and number of lines in them.""" files = sorted(glob.glob(os.path.join(CORPUS_DIR, '*.export'))) assert files, ('no corpus files with extension .export ' 'found.') texts = [os.path.splitext(os.path.basename(a))[0] for a in files] corpora = {filename: treebank.NegraCorpusReader(filename, headrules=HEADRULES, punct='move') for filename in files} if os.path.exists('metadata.csv'): metadata = pandas.read_csv('metadata.csv', index_col=0) assert set(metadata.index) == set(texts), ( 'metadata.csv does not match list of files.\n' 'only in metadata: %s\nonly in files: %s' % ( set(metadata.index) - set(texts), set(texts) - set(metadata.index))) metadata = metadata.loc[texts] else: metadata = None return texts, corpora, metadata class QueryStringRedirectMiddleware(object): """Support ; as query delimiter. http://flask.pocoo.org/snippets/43/""" def __init__(self, application): self.application = application def __call__(self, environ, start_response): qs = environ.get('QUERY_STRING', '') environ['QUERY_STRING'] = qs.replace(';', '&') return self.application(environ, start_response) APP.wsgi_app = QueryStringRedirectMiddleware(APP.wsgi_app) log.info('loading corpus.') if STANDALONE: from getopt import gnu_getopt, GetoptError try: opts, _args = gnu_getopt(sys.argv[1:], '', ['port=', 'ip=', 'numproc=', 'debug']) opts = dict(opts) except GetoptError as err: print('error: %r' % err, file=sys.stderr) sys.exit(2) DEBUG = '--debug' in opts # NB: load corpus regardless of whether running standalone: (TEXTS, CORPORA, METADATA) = getcorpus() log.info('corpus loaded.') try: with open('treesearchpasswd.txt', 'rt') as fileobj: PASSWD = {a.strip(): b.strip() for a, b in (line.split(':', 1) for line in fileobj)} log.info('password protection enabled.') except IOError: log.info('no password protection.') if STANDALONE: APP.run(use_reloader=False, host=opts.get('--ip', '0.0.0.0'), port=int(opts.get('--port', 5003)), debug=DEBUG)
gpl-2.0
ShuboshaKuro/SimpleGameEngine
Test.py
1
1251
import numpy as np import os from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D # has to change whenever noise_width and noise_height change in the PerlinNoise.hpp file DIMENSION1 = 200 DIMENSION2 = 200 # works if the working directory is set path = os.path.dirname(os.path.realpath(__file__)) FILENAME = path + "\input0.txt" if __name__ == '__main__': string = open(FILENAME, '+r') noise = np.fromstring(string.read(), sep=" ", dtype=float).reshape(DIMENSION2, DIMENSION1) # Build a grid by the 2 dimensions Xr = np.arange(DIMENSION1) Yr = np.arange(DIMENSION2) X, Y = np.meshgrid(Xr, Yr) # Build a figure with 2 subplots, the first is 3D fig = plt.figure() fig.suptitle("3D and 2D heighmap") colormap = 'coolwarm' ax = fig.add_subplot(2, 1, 1, projection='3d') surf = ax.plot_surface(X, Y, noise, rstride=1, cstride=1, cmap=colormap, linewidth=0, antialiased=False) ax2 = fig.add_subplot(2, 1, 2) im = ax2.imshow(noise, cmap=colormap, interpolation='nearest') # swap the Y axis so it aligns with the 3D plot ax2.invert_yaxis() # add an explanatory colour bar plt.colorbar(im, orientation='horizontal') # Show the image plt.show()
mit
ronnyandersson/zignal
examples/ex_chunks.py
1
2576
''' Created on 12 Apr 2020 @author: Ronny Andersson (ronny@andersson.tk) @copyright: (c) 2020 Ronny Andersson @license: MIT Demo of how to iterate over an instance of the Audio class, for chunk-based processing. Typically the chunks have a size that is a power of two, for example 256, 1024 or 4096. In this example the chunk size is set to 1000 for simplicity in the plots. The sample rate in this example is also set to a value that enhances the effect of the example, since hera a chunk equals to one second of data. ''' # Standard library import logging # Third party import matplotlib.pyplot as plt import numpy as np # Internal import zignal if __name__ == '__main__': logging.basicConfig( format='%(levelname)-7s: %(module)s.%(funcName)-15s %(message)s', level='DEBUG', ) logging.getLogger("matplotlib").setLevel(logging.INFO) logging.getLogger("zignal").setLevel(logging.DEBUG) fs = 1000 # Create various ramp signals, to visualise the chunks better. Not real # audio, but shows in a plot what the chunks look like a1 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000/2))) a2 = zignal.Audio(fs=fs, initialdata=np.linspace(0, -1, num=(1000*1)+500)) a3 = zignal.Audio(fs=fs, initialdata=np.linspace(0, 1, num=(1000*2)+200)) a = zignal.Audio(fs=fs) a.append(a1, a2, a3) print(a) # We now have 2.2 seconds of audio in three channels. This does not add up # to even chunk sizes, so padding will have to be done in order to iterate. # # Three (3) chunks are expected. for val in a.iter_chunks(chunksize=1000): print("------------------------------------------------") print("shape of data in chunk: %s" % str(val.shape)) print(val) plt.figure(1) plt.plot(val[:, 0], ls="-", label="a1") plt.plot(val[:, 1], ls="--", label="a2") plt.plot(val[:, 2], ls="-.", label="a3") plt.grid() plt.ylim(-1.1, 1.1) plt.xlabel("samples in chunk") plt.ylabel("magnitude [lin]") plt.legend(loc="upper right") plt.show() # We can pad beforehand if we know how many samples are missing, then no # padding will occur inside the iterator b = a.copy() b.gain(-20) # just to get a debug logging entry b.pad(nofsamples=800) print(b) for val in b.iter_chunks(chunksize=1000): print("------------------------------------------------") print("shape of data in chunk: %s" % str(val.shape)) print(val) print('-- Done --')
mit
antiface/mne-python
examples/time_frequency/plot_compute_raw_data_spectrum.py
16
2573
""" ================================================== Compute the power spectral density of raw data ================================================== This script shows how to compute the power spectral density (PSD) of measurements on a raw dataset. It also show the effect of applying SSP to the data to reduce ECG and EOG artifacts. """ # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr> # Martin Luessi <mluessi@nmr.mgh.harvard.edu> # Eric Larson <larson.eric.d@gmail.com> # License: BSD (3-clause) import numpy as np import matplotlib.pyplot as plt import mne from mne import io, read_proj, read_selection from mne.datasets import sample print(__doc__) ############################################################################### # Set parameters data_path = sample.data_path() raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif' proj_fname = data_path + '/MEG/sample/sample_audvis_eog_proj.fif' # Setup for reading the raw data raw = io.Raw(raw_fname, preload=True) raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more # Add SSP projection vectors to reduce EOG and ECG artifacts projs = read_proj(proj_fname) raw.add_proj(projs, remove_existing=True) tmin, tmax = 0, 60 # use the first 60s of data fmin, fmax = 2, 300 # look at frequencies between 2 and 300Hz n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2 plt.ion() # Let's first check out all channel types raw.plot_psd(area_mode='range', tmax=10.0) # Now let's focus on a smaller subset: # Pick MEG magnetometers in the Left-temporal region selection = read_selection('Left-temporal') picks = mne.pick_types(raw.info, meg='mag', eeg=False, eog=False, stim=False, exclude='bads', selection=selection) # Let's just look at the first few channels for demonstration purposes picks = picks[:4] plt.figure() ax = plt.axes() raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft, n_jobs=1, proj=False, ax=ax, color=(0, 0, 1), picks=picks) # And now do the same with SSP applied raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft, n_jobs=1, proj=True, ax=ax, color=(0, 1, 0), picks=picks) # And now do the same with SSP + notch filtering raw.notch_filter(np.arange(60, 241, 60), picks=picks, n_jobs=1) raw.plot_psd(tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax, n_fft=n_fft, n_jobs=1, proj=True, ax=ax, color=(1, 0, 0), picks=picks) ax.set_title('Four left-temporal magnetometers') plt.legend(['Without SSP', 'With SSP', 'SSP + Notch'])
bsd-3-clause
sumspr/scikit-learn
examples/linear_model/plot_sgd_penalties.py
249
1563
""" ============== SGD: Penalties ============== Plot the contours of the three penalties. All of the above are supported by :class:`sklearn.linear_model.stochastic_gradient`. """ from __future__ import division print(__doc__) import numpy as np import matplotlib.pyplot as plt def l1(xs): return np.array([np.sqrt((1 - np.sqrt(x ** 2.0)) ** 2.0) for x in xs]) def l2(xs): return np.array([np.sqrt(1.0 - x ** 2.0) for x in xs]) def el(xs, z): return np.array([(2 - 2 * x - 2 * z + 4 * x * z - (4 * z ** 2 - 8 * x * z ** 2 + 8 * x ** 2 * z ** 2 - 16 * x ** 2 * z ** 3 + 8 * x * z ** 3 + 4 * x ** 2 * z ** 4) ** (1. / 2) - 2 * x * z ** 2) / (2 - 4 * z) for x in xs]) def cross(ext): plt.plot([-ext, ext], [0, 0], "k-") plt.plot([0, 0], [-ext, ext], "k-") xs = np.linspace(0, 1, 100) alpha = 0.501 # 0.5 division throuh zero cross(1.2) plt.plot(xs, l1(xs), "r-", label="L1") plt.plot(xs, -1.0 * l1(xs), "r-") plt.plot(-1 * xs, l1(xs), "r-") plt.plot(-1 * xs, -1.0 * l1(xs), "r-") plt.plot(xs, l2(xs), "b-", label="L2") plt.plot(xs, -1.0 * l2(xs), "b-") plt.plot(-1 * xs, l2(xs), "b-") plt.plot(-1 * xs, -1.0 * l2(xs), "b-") plt.plot(xs, el(xs, alpha), "y-", label="Elastic Net") plt.plot(xs, -1.0 * el(xs, alpha), "y-") plt.plot(-1 * xs, el(xs, alpha), "y-") plt.plot(-1 * xs, -1.0 * el(xs, alpha), "y-") plt.xlabel(r"$w_0$") plt.ylabel(r"$w_1$") plt.legend() plt.axis("equal") plt.show()
bsd-3-clause
DavidNorman/tensorflow
tensorflow/examples/tutorials/word2vec/word2vec_basic.py
2
14485
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic word2vec example.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import collections import hashlib import math import os import random import sys from tempfile import gettempdir import zipfile import numpy as np from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf from tensorflow.contrib.tensorboard.plugins import projector data_index = 0 def _hash_file(fpath): hasher = hashlib.sha256() with open(fpath, 'rb') as fpath_file: for chunk in iter(lambda: fpath_file.read(65535), b''): hasher.update(chunk) return hasher.hexdigest() def word2vec_basic(log_dir): """Example of building, training and visualizing a word2vec model.""" # Create the directory for TensorBoard variables if there is not. if not os.path.exists(log_dir): os.makedirs(log_dir) # Step 1: Download the data. # Note: Source website does not support HTTPS right now. url = 'http://mattmahoney.net/dc/' # pylint: disable=redefined-outer-name def maybe_download(filename, expected_bytes, sha256=None): """Download a file if not present, and make sure it's the right size.""" local_filename = os.path.join(gettempdir(), filename) if not os.path.exists(local_filename): local_filename, _ = urllib.request.urlretrieve(url + filename, local_filename) statinfo = os.stat(local_filename) if sha256 and _hash_file(local_filename) != sha256: raise Exception('Failed to verify ' + local_filename + ' due to hash ' 'mismatch. Can you get to it with a browser?') if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: print(statinfo.st_size) raise Exception('Failed to verify ' + local_filename + '. Can you get to it with a browser?') return local_filename filename = maybe_download( 'text8.zip', 31344016, sha256='a6640522afe85d1963ad56c05b0ede0a0c000dddc9671758a6cc09b7a38e5232') # Read the data into a list of strings. def read_data(filename): """Extract the first file enclosed in a zip file as a list of words.""" with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return data vocabulary = read_data(filename) print('Data size', len(vocabulary)) # Step 2: Build the dictionary and replace rare words with UNK token. vocabulary_size = 50000 def build_dataset(words, n_words): """Process raw inputs into a dataset.""" count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(n_words - 1)) dictionary = {word: index for index, (word, _) in enumerate(count)} data = [] unk_count = 0 for word in words: index = dictionary.get(word, 0) if index == 0: # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary # Filling 4 global variables: # data - list of codes (integers from 0 to vocabulary_size-1). # This is the original text but words are replaced by their codes # count - map of words(strings) to count of occurrences # dictionary - map of words(strings) to their codes(integers) # reverse_dictionary - map of codes(integers) to words(strings) data, count, unused_dictionary, reverse_dictionary = build_dataset( vocabulary, vocabulary_size) del vocabulary # Hint to reduce memory. print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) # Step 3: Function to generate a training batch for the skip-gram model. def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) # pylint: disable=redefined-builtin if data_index + span > len(data): data_index = 0 buffer.extend(data[data_index:data_index + span]) data_index += span for i in range(batch_size // num_skips): context_words = [w for w in range(span) if w != skip_window] words_to_use = random.sample(context_words, num_skips) for j, context_word in enumerate(words_to_use): batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[context_word] if data_index == len(data): buffer.extend(data[0:span]) data_index = span else: buffer.append(data[data_index]) data_index += 1 # Backtrack a little bit to avoid skipping words in the end of a batch data_index = (data_index + len(data) - span) % len(data) return batch, labels batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) for i in range(8): print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) # Step 4: Build and train a skip-gram model. batch_size = 128 embedding_size = 128 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label. num_sampled = 64 # Number of negative examples to sample. # We pick a random validation set to sample nearest neighbors. Here we limit # the validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. These 3 variables are used only for # displaying model accuracy, they don't affect calculation. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) graph = tf.Graph() with graph.as_default(): # Input data. with tf.name_scope('inputs'): train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. with tf.name_scope('embeddings'): embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss with tf.name_scope('weights'): nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) with tf.name_scope('biases'): nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. # Explanation of the meaning of NCE loss and why choosing NCE over tf.nn.sampled_softmax_loss: # http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/ # http://papers.nips.cc/paper/5165-learning-word-embeddings-efficiently-with-noise-contrastive-estimation.pdf with tf.name_scope('loss'): loss = tf.reduce_mean( tf.nn.nce_loss( weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) # Add the loss value as a scalar to summary. tf.summary.scalar('loss', loss) # Construct the SGD optimizer using a learning rate of 1.0. with tf.name_scope('optimizer'): optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # Compute the cosine similarity between minibatch examples and all # embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keepdims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup(normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Merge all summaries. merged = tf.summary.merge_all() # Add variable initializer. init = tf.global_variables_initializer() # Create a saver. saver = tf.train.Saver() # Step 5: Begin training. num_steps = 100001 with tf.compat.v1.Session(graph=graph) as session: # Open a writer to write summaries. writer = tf.summary.FileWriter(log_dir, session.graph) # We must initialize all variables before we use them. init.run() print('Initialized') average_loss = 0 for step in xrange(num_steps): batch_inputs, batch_labels = generate_batch(batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # Define metadata variable. run_metadata = tf.RunMetadata() # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() # Also, evaluate the merged op to get all summaries from the returned # "summary" variable. Feed metadata variable to session for visualizing # the graph in TensorBoard. _, summary, loss_val = session.run([optimizer, merged, loss], feed_dict=feed_dict, run_metadata=run_metadata) average_loss += loss_val # Add returned summaries to writer in each step. writer.add_summary(summary, step) # Add metadata to visualize the graph for the last run. if step == (num_steps - 1): writer.add_run_metadata(run_metadata, 'step%d' % step) if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 # batches. print('Average loss at step ', step, ': ', average_loss) average_loss = 0 # Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in xrange(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = 'Nearest to %s:' % valid_word print( log_str, ', '.join([reverse_dictionary[nearest[k]] for k in range(top_k)])) final_embeddings = normalized_embeddings.eval() # Write corresponding labels for the embeddings. with open(log_dir + '/metadata.tsv', 'w') as f: for i in xrange(vocabulary_size): f.write(reverse_dictionary[i] + '\n') # Save the model for checkpoints. saver.save(session, os.path.join(log_dir, 'model.ckpt')) # Create a configuration for visualizing embeddings with the labels in # TensorBoard. config = projector.ProjectorConfig() embedding_conf = config.embeddings.add() embedding_conf.tensor_name = embeddings.name embedding_conf.metadata_path = os.path.join(log_dir, 'metadata.tsv') projector.visualize_embeddings(writer, config) writer.close() # Step 6: Visualize the embeddings. # pylint: disable=missing-docstring # Function to draw visualization of distance between embeddings. def plot_with_labels(low_dim_embs, labels, filename): assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings' plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate( label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename) try: # pylint: disable=g-import-not-at-top from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE( perplexity=30, n_components=2, init='pca', n_iter=5000, method='exact') plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels, os.path.join(gettempdir(), 'tsne.png')) except ImportError as ex: print('Please install sklearn, matplotlib, and scipy to show embeddings.') print(ex) # All functionality is run after tf.compat.v1.app.run() (b/122547914). This # could be split up but the methods are laid sequentially with their usage for # clarity. def main(unused_argv): # Give a folder path as an argument with '--log_dir' to save # TensorBoard summaries. Default is a log folder in current directory. current_path = os.path.dirname(os.path.realpath(sys.argv[0])) parser = argparse.ArgumentParser() parser.add_argument( '--log_dir', type=str, default=os.path.join(current_path, 'log'), help='The log directory for TensorBoard summaries.') flags, unused_flags = parser.parse_known_args() word2vec_basic(flags.log_dir) if __name__ == '__main__': tf.app.run()
apache-2.0
ankurankan/scikit-learn
examples/gaussian_process/plot_gp_regression.py
253
4054
#!/usr/bin/python # -*- coding: utf-8 -*- r""" ========================================================= Gaussian Processes regression: basic introductory example ========================================================= A simple one-dimensional regression exercise computed in two different ways: 1. A noise-free case with a cubic correlation model 2. A noisy case with a squared Euclidean correlation model In both cases, the model parameters are estimated using the maximum likelihood principle. The figures illustrate the interpolating property of the Gaussian Process model as well as its probabilistic nature in the form of a pointwise 95% confidence interval. Note that the parameter ``nugget`` is applied as a Tikhonov regularization of the assumed covariance between the training points. In the special case of the squared euclidean correlation model, nugget is mathematically equivalent to a normalized variance: That is .. math:: \mathrm{nugget}_i = \left[\frac{\sigma_i}{y_i}\right]^2 """ print(__doc__) # Author: Vincent Dubourg <vincent.dubourg@gmail.com> # Jake Vanderplas <vanderplas@astro.washington.edu> # Licence: BSD 3 clause import numpy as np from sklearn.gaussian_process import GaussianProcess from matplotlib import pyplot as pl np.random.seed(1) def f(x): """The function to predict.""" return x * np.sin(x) #---------------------------------------------------------------------- # First the noiseless case X = np.atleast_2d([1., 3., 5., 6., 7., 8.]).T # Observations y = f(X).ravel() # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='cubic', theta0=1e-2, thetaL=1e-4, thetaU=1e-1, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.plot(X, y, 'r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') #---------------------------------------------------------------------- # now the noisy case X = np.linspace(0.1, 9.9, 20) X = np.atleast_2d(X).T # Observations and noise y = f(X).ravel() dy = 0.5 + 1.0 * np.random.random(y.shape) noise = np.random.normal(0, dy) y += noise # Mesh the input space for evaluations of the real function, the prediction and # its MSE x = np.atleast_2d(np.linspace(0, 10, 1000)).T # Instanciate a Gaussian Process model gp = GaussianProcess(corr='squared_exponential', theta0=1e-1, thetaL=1e-3, thetaU=1, nugget=(dy / y) ** 2, random_start=100) # Fit to data using Maximum Likelihood Estimation of the parameters gp.fit(X, y) # Make the prediction on the meshed x-axis (ask for MSE as well) y_pred, MSE = gp.predict(x, eval_MSE=True) sigma = np.sqrt(MSE) # Plot the function, the prediction and the 95% confidence interval based on # the MSE fig = pl.figure() pl.plot(x, f(x), 'r:', label=u'$f(x) = x\,\sin(x)$') pl.errorbar(X.ravel(), y, dy, fmt='r.', markersize=10, label=u'Observations') pl.plot(x, y_pred, 'b-', label=u'Prediction') pl.fill(np.concatenate([x, x[::-1]]), np.concatenate([y_pred - 1.9600 * sigma, (y_pred + 1.9600 * sigma)[::-1]]), alpha=.5, fc='b', ec='None', label='95% confidence interval') pl.xlabel('$x$') pl.ylabel('$f(x)$') pl.ylim(-10, 20) pl.legend(loc='upper left') pl.show()
bsd-3-clause
elisamussumeci/InfoDenguePredict
infodenguepredict/models/VAR2.py
2
1286
""" Vector Autogregression using statsmodels http://statsmodels.sourceforge.net/devel/vector_ar.html """ import numpy as np import pandas as pd from statsmodels.tsa.api import * from statsmodels.tsa.vector_ar.var_model import VAR from datetime import datetime import matplotlib.pyplot as plt from infodenguepredict.data.infodengue import get_alerta_table, build_multicity_dataset def build_model(data): data.index = pd.DatetimeIndex(data.index) model = VAR(data) return model if __name__ == "__main__": prediction_window = 5 # weeks scenario = 'global' if scenario == 'local': data = get_alerta_table(3303500) # Nova Iguaçu: 3303500 data = data[['casos', 'nivel']] else: data = build_multicity_dataset('RJ') data = data[[col for col in data.columns if col.startswith('casos') and not col.startswith('casos_est')]][:5] print(data.info()) # data.casos_est.plot(title="Series") model = build_model(data) fit = model.fit(maxlags=11, ic='aic') # 4 lags print(fit.summary()) fit.plot() fit.plot_acorr() plt.figure() lag_order = fit.k_ar forecast = fit.forecast(data.values[-lag_order:], prediction_window) print(forecast) fit.plot_forecast(prediction_window) plt.show()
gpl-3.0
JT5D/scikit-learn
examples/plot_multilabel.py
9
4299
# Authors: Vlad Niculae, Mathieu Blondel # License: BSD 3 clause """ ========================= Multilabel classification ========================= This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process: - pick the number of labels: n ~ Poisson(n_labels) - n times, choose a class c: c ~ Multinomial(theta) - pick the document length: k ~ Poisson(length) - k times, choose a word: w ~ Multinomial(theta_c) In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles. The classification is performed by projecting to the first two principal components found by PCA and CCA for visualisation purposes, followed by using the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one. Note: in the plot, "unlabeled samples" does not mean that we don't know the labels (as in semi-supervised learning) but that the samples simply do *not* have a label. """ print(__doc__) import numpy as np import matplotlib.pylab as pl from sklearn.datasets import make_multilabel_classification from sklearn.multiclass import OneVsRestClassifier from sklearn.svm import SVC from sklearn.preprocessing import LabelBinarizer from sklearn.decomposition import PCA from sklearn.cross_decomposition import CCA def plot_hyperplane(clf, min_x, max_x, linestyle, label): # get the separating hyperplane w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough yy = a * xx - (clf.intercept_[0]) / w[1] pl.plot(xx, yy, linestyle, label=label) def plot_subfigure(X, Y, subplot, title, transform): if transform == "pca": X = PCA(n_components=2).fit_transform(X) elif transform == "cca": # Convert list of tuples to a class indicator matrix first Y_indicator = LabelBinarizer().fit(Y).transform(Y) X = CCA(n_components=2).fit(X, Y_indicator).transform(X) else: raise ValueError min_x = np.min(X[:, 0]) max_x = np.max(X[:, 0]) min_y = np.min(X[:, 1]) max_y = np.max(X[:, 1]) classif = OneVsRestClassifier(SVC(kernel='linear')) classif.fit(X, Y) pl.subplot(2, 2, subplot) pl.title(title) zero_class = np.where([0 in y for y in Y]) one_class = np.where([1 in y for y in Y]) pl.scatter(X[:, 0], X[:, 1], s=40, c='gray') pl.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b', facecolors='none', linewidths=2, label='Class 1') pl.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange', facecolors='none', linewidths=2, label='Class 2') plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--', 'Boundary\nfor class 1') plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.', 'Boundary\nfor class 2') pl.xticks(()) pl.yticks(()) pl.xlim(min_x - .5 * max_x, max_x + .5 * max_x) pl.ylim(min_y - .5 * max_y, max_y + .5 * max_y) if subplot == 2: pl.xlabel('First principal component') pl.ylabel('Second principal component') pl.legend(loc="upper left") pl.figure(figsize=(8, 6)) X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=True, random_state=1) plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca") X, Y = make_multilabel_classification(n_classes=2, n_labels=1, allow_unlabeled=False, random_state=1) plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca") plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca") pl.subplots_adjust(.04, .02, .97, .94, .09, .2) pl.show()
bsd-3-clause
elkingtonmcb/bcbio-nextgen
bcbio/variation/validateplot.py
1
15359
"""Plot validation results from variant calling comparisons. Handles data normalization and plotting, emphasizing comparisons on methodology differences. """ import collections import os import numpy as np import pandas as pd try: import matplotlib as mpl mpl.use('Agg', force=True) import matplotlib.pyplot as plt from matplotlib.ticker import FuncFormatter except ImportError: mpl, plt = None, None try: import seaborn as sns except ImportError: sns = None from bcbio.log import logger from bcbio import utils from bcbio.variation import bamprep def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None): """Create a plot from individual summary csv files with classification metrics. """ df = pd.concat([pd.read_csv(x) for x in plot_files]) df.to_csv(out_csv, index=False) return classifyplot_from_valfile(out_csv, outtype, title, size) def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None): """Create a plot from a summarized validation file. Does new-style plotting of summarized metrics of false negative rate and false discovery rate. https://en.wikipedia.org/wiki/Sensitivity_and_specificity """ df = pd.read_csv(val_file) grouped = df.groupby(["sample", "caller", "vtype"]) df = grouped.apply(_calculate_fnr_fdr) df = df.reset_index() out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype) _do_classifyplot(df, out_file, title, size) return [out_file] def _calculate_fnr_fdr(group): """Calculate the false negative rate (1 - sensitivity) and false discovery rate (1 - precision). """ data = {k: d["value"] for k, d in group.set_index("metric").T.to_dict().items()} return pd.DataFrame([{"fnr": data["fn"] / float(data["tp"] + data["fn"]) * 100.0 if data["tp"] > 0 else 0.0, "fdr": data["fp"] / float(data["tp"] + data["fp"]) * 100.0 if data["tp"] > 0 else 0.0, "tpr": "TP: %s FN: %s" % (data["tp"], data["fn"]), "spc": "FP: %s" % (data["fp"])}]) def _do_classifyplot(df, out_file, title=None, size=None): """Plot using classification-based plot using seaborn. """ metric_labels = {"fdr": "False discovery rate", "fnr": "False negative rate"} metrics = [("fnr", "tpr"), ("fdr", "spc")] colors = ["light grey", "greyish"] data_dict = df.set_index(["sample", "caller", "vtype"]).T.to_dict() plt.ioff() sns.set(style='white') vtypes = sorted(df["vtype"].unique(), reverse=True) callers = sorted(df["caller"].unique()) samples = sorted(df["sample"].unique()) fig, axs = plt.subplots(len(vtypes) * len(callers), len(metrics)) fig.text(.5, .95, title if title else "", horizontalalignment='center', size=14) for vi, vtype in enumerate(vtypes): sns.set_palette(sns.xkcd_palette([colors[vi]])) for ci, caller in enumerate(callers): for j, (metric, label) in enumerate(metrics): cur_plot = axs[vi * len(vtypes) + ci][j] vals, labels = [], [] for sample in samples: cur_data = data_dict[(sample, caller, vtype)] vals.append(cur_data[metric]) labels.append(cur_data[label]) cur_plot.barh(np.arange(len(samples)), vals) all_vals = [] for k, d in data_dict.items(): if k[-1] == vtype: for m in metrics: all_vals.append(d[m[0]]) metric_max = max(all_vals) cur_plot.set_xlim(0, metric_max) pad = 0.1 * metric_max for ai, (val, label) in enumerate(zip(vals, labels)): cur_plot.annotate(label, (pad + (0 if max(vals) > metric_max / 2.0 else max(vals)), ai + 0.35), va='center', size=7) if j == 0: cur_plot.tick_params(axis='y', which='major', labelsize=8) cur_plot.locator_params(nbins=len(samples) + 2, axis="y", tight=True) cur_plot.set_yticklabels(samples, size=8, va="bottom") cur_plot.set_title("%s: %s" % (vtype, caller), fontsize=12, loc="left") else: cur_plot.get_yaxis().set_ticks([]) if ci == len(callers) - 1: cur_plot.tick_params(axis='x', which='major', labelsize=8) cur_plot.get_xaxis().set_major_formatter( FuncFormatter(lambda v, p: "%s%%" % (int(v) if round(v) == v else v))) if vi == len(vtypes) - 1: cur_plot.get_xaxis().set_label_text(metric_labels[metric], size=12) else: cur_plot.get_xaxis().set_ticks([]) cur_plot.spines['bottom'].set_visible(False) cur_plot.spines['left'].set_visible(False) cur_plot.spines['top'].set_visible(False) cur_plot.spines['right'].set_visible(False) x, y = (6, len(vtypes) * len(callers) + 1 * 0.5 * len(samples)) if size is None else size fig.set_size_inches(x, y) fig.tight_layout(rect=(0, 0, 1, 0.95)) plt.subplots_adjust(hspace=0.6) fig.savefig(out_file) def create_from_csv(in_csv, config=None, outtype="png", title=None, size=None): df = pd.read_csv(in_csv) create(df, None, 0, config or {}, os.path.splitext(in_csv)[0], outtype, title, size) def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png", title=None, size=None): """Create plots of validation results for a sample, labeling prep strategies. """ if mpl is None or plt is None or sns is None: not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None]) logger.info("No validation plot. Missing imports: %s" % not_found) return None if header: df = pd.DataFrame(plot_data, columns=header) else: df = plot_data df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]] df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]] floors = get_group_floors(df, cat_labels) df["value.floor"] = [get_floor_value(x, cat, vartype, floors) for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])] out = [] for i, prep in enumerate(df["bamprep"].unique()): out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size)) return out cat_labels = {"concordant": "Concordant", "discordant-missing-total": "Discordant (missing)", "discordant-extra-total": "Discordant (extra)", "discordant-shared-total": "Discordant (shared)"} vtype_labels = {"snp": "SNPs", "indel": "Indels"} prep_labels = {} caller_labels = {"ensemble": "Ensemble", "freebayes": "FreeBayes", "gatk": "GATK Unified\nGenotyper", "gatk-haplotype": "GATK Haplotype\nCaller"} def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None, size=None): """Plot comparison between BAM preparation methods. """ samples = df[(df["bamprep"] == prep)]["sample"].unique() assert len(samples) >= 1, samples out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype) df = df[df["category"].isin(cat_labels)] _seaborn(df, prep, prepi, out_file, title, size) return out_file def _seaborn(df, prep, prepi, out_file, title=None, size=None): """Plot using seaborn wrapper around matplotlib. """ plt.ioff() sns.set(style='dark') vtypes = df["variant.type"].unique() callers = sorted(df["caller"].unique()) cats = _check_cats(["concordant", "discordant-missing-total", "discordant-extra-total", "discordant-shared-total"], vtypes, df, prep, callers) fig, axs = plt.subplots(len(vtypes), len(cats)) width = 0.8 for i, vtype in enumerate(vtypes): ax_row = axs[i] if len(vtypes) > 1 else axs for j, cat in enumerate(cats): vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) if len(cats) == 1: assert j == 0 ax = ax_row else: ax = ax_row[j] if i == 0: ax.set_title(cat_labels[cat], size=14) ax.get_yaxis().set_ticks([]) if j == 0: ax.set_ylabel(vtype_labels[vtype], size=14) ax.bar(np.arange(len(callers)), vals, width=width) ax.set_ylim(0, maxval) if i == len(vtypes) - 1: ax.set_xticks(np.arange(len(callers)) + width / 2.0) ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else "" for x in callers], size=8, rotation=45) else: ax.get_xaxis().set_ticks([]) _annotate(ax, labels, vals, np.arange(len(callers)), width) fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16) fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1) x, y = (10, 5) if size is None else size fig.set_size_inches(x, y) fig.savefig(out_file) def _check_cats(cats, vtypes, df, prep, callers): """Only include categories in the final output if they have values. """ out = [] for cat in cats: all_vals = [] for vtype in vtypes: vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers) all_vals.extend(vals) if sum(all_vals) / float(len(all_vals)) > 2: out.append(cat) if len(out) == 0: return cats else: return out def _get_chart_info(df, vtype, cat, prep, callers): """Retrieve values for a specific variant type, category and prep method. """ maxval_raw = max(list(df["value.floor"])) curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat) & (df["bamprep"] == prep)] vals = [] labels = [] for c in callers: row = curdf[df["caller"] == c] if len(row) > 0: vals.append(list(row["value.floor"])[0]) labels.append(list(row["value"])[0]) else: vals.append(1) labels.append("") return vals, labels, maxval_raw def _annotate(ax, annotate, height, left, width): """Annotate axis with labels. """ annotate_yrange_factor = 0.010 xticks = np.array(left) + width / 2.0 ymin, ymax = ax.get_ylim() yrange = ymax - ymin # Reset ymax and ymin so there's enough room to see the annotation of # the top-most if ymax > 0: ymax += yrange * 0.15 if ymin < 0: ymin -= yrange * 0.15 ax.set_ylim(ymin, ymax) yrange = ymax - ymin offset_ = yrange * annotate_yrange_factor if isinstance(annotate, collections.Iterable): annotations = map(str, annotate) else: annotations = ['%.3f' % h if type(h) is np.float_ else str(h) for h in height] for x, h, annotation in zip(xticks, height, annotations): # Adjust the offset to account for negative bars offset = offset_ if h >= 0 else -1 * offset_ verticalalignment = 'bottom' if h >= 0 else 'top' if len(str(annotation)) > 6: size = 7 elif len(str(annotation)) > 5: size = 8 else: size = 10 # Finally, add the text to the axes ax.annotate(annotation, (x, h + offset), verticalalignment=verticalalignment, horizontalalignment='center', size=size) def _ggplot(df, out_file): """Plot faceted items with ggplot wrapper on top of matplotlib. XXX Not yet functional """ import ggplot as gg df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]] df["category"] = [cat_labels[x] for x in df["category"]] df["caller"] = [caller_labels.get(x, None) for x in df["caller"]] p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar() + gg.facet_wrap("variant.type", "category") + gg.theme_seaborn()) gg.ggsave(p, out_file) def get_floor_value(x, cat, vartype, floors): """Modify values so all have the same relative scale for differences. Using the chosen base heights, adjusts an individual sub-plot to be consistent relative to that height. """ all_base = floors[vartype] cur_max = floors[(cat, vartype)] if cur_max > all_base: diff = cur_max - all_base x = max(1, x - diff) return x def get_group_floors(df, cat_labels): """Retrieve the floor for a given row of comparisons, creating a normalized set of differences. We need to set non-zero floors so large numbers (like concordance) don't drown out small numbers (like discordance). This defines the height for a row of comparisons as either the minimum height of any sub-plot, or the maximum difference between higher and lower (plus 10%). """ group_maxes = collections.defaultdict(list) group_diffs = collections.defaultdict(list) diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero for name, group in df.groupby(["category", "variant.type"]): label, stype = name if label in cat_labels: diff = max(group["value"]) - min(group["value"]) group_diffs[stype].append(diff + int(diff_pad * diff)) group_maxes[stype].append(max(group["value"])) group_maxes[name].append(max(group["value"])) out = {} for k, vs in group_maxes.iteritems(): if k in group_diffs: out[k] = max(max(group_diffs[stype]), min(vs)) else: out[k] = min(vs) return out def get_aligner(x, config): return utils.get_in(config, ("algorithm", "aligner"), "") def get_bamprep(x, config): params = bamprep._get_prep_params({"config": {"algorithm": config.get("algorithm", {})}}) if params["realign"] == "gatk" and params["recal"] == "gatk": return "gatk" elif not params["realign"] and not params["recal"]: return "none" elif not params.get("recal") or not params.get("realign"): return "mixed" else: return "" # ## Frequency plots def facet_freq_plot(freq_csv, caller): """Prepare a facet plot of frequencies stratified by variant type and status (TP, FP, FN). Makes a nice plot with the output from validate.freq_summary """ out_file = "%s.png" % os.path.splitext(freq_csv)[0] plt.ioff() sns.set(style='dark') df = pd.read_csv(freq_csv) g = sns.FacetGrid(df, row="vtype", col="valclass", margin_titles=True, col_order=["TP", "FN", "FP"], row_order=["snp", "indel"], sharey=False) g.map(plt.hist, "freq", bins=20, align="left") g.set(xlim=(0.0, 1.0)) g.fig.set_size_inches(8, 6) g.fig.text(.05, .97, caller, horizontalalignment='center', size=14) g.fig.savefig(out_file)
mit
srepho/BDA_py_demos
demos_ch10/demo10_1.py
19
4102
"""Bayesian data analysis Chapter 10, demo 1 Rejection sampling example """ from __future__ import division import numpy as np from scipy import stats import matplotlib as mpl import matplotlib.pyplot as plt # edit default plot settings (colours from colorbrewer2.org) plt.rc('font', size=14) plt.rc('lines', color='#377eb8', linewidth=2, markeredgewidth=0) plt.rc('axes', color_cycle=('#377eb8','#e41a1c','#4daf4a', '#984ea3','#ff7f00','#ffff33')) plt.rc('patch', facecolor='#bfe2ff') # fake interesting distribution x = np.linspace(-3, 3, 200) r = np.array([ 1.1 , 1.3 , -0.1 , -0.7 , 0.2 , -0.4 , 0.06, -1.7 , 1.7 , 0.3 , 0.7 , 1.6 , -2.06, -0.74, 0.2 , 0.5 ]) # Estimate the density (named q, to emphesize that it does not need to be # normalized). Parameter bw_method=0.48 is used to mimic the outcome of the # kernelp function in Matlab. q = stats.gaussian_kde(r, bw_method=0.48).evaluate(x) # rejection sampling example g_mean = 0 g_std = 1.1 g = stats.norm.pdf(x, loc=g_mean, scale=g_std) # M is computed by discrete approximation M = np.max(q/g) # prescale g *= M # plot the densities plt.figure() plt.plot(x, q) plt.plot(x, g, linestyle='--') plt.fill_between(x, q) plt.legend((r'$q(\theta|y)$', r'$Mg(\theta)$')) plt.yticks(()) plt.title('Rejection sampling') plt.ylim([0, 1.1*g.max()]) # illustrate one sample r1 = -0.8 zi = np.argmin(np.abs(x-r1)) # find the closest grid point plt.plot((x[zi], x[zi]), (0, q[zi]), color='gray') plt.plot((x[zi], x[zi]), (q[zi], g[zi]), color='gray', linestyle='--') r21 = 0.3 * g[zi] r22 = 0.8 * g[zi] plt.plot(r1, r21, marker='o', color='#4daf4a', markersize=12) plt.plot(r1, r22, marker='o', color='#e41a1c', markersize=12) # add annotations plt.text(x[zi], q[zi], r'$\leftarrow \, q(\theta=r|y)$', fontsize=18) plt.text(x[zi], g[zi], r'$\leftarrow \, g(\theta=r)$', fontsize=18) plt.text(r1-0.1, r21, 'accepted', horizontalalignment='right') plt.text(r1-0.1, r22, 'rejected', horizontalalignment='right') # get nsamp samples nsamp = 200 r1 = stats.norm.rvs(size=nsamp, loc=g_mean, scale=g_std) zi = np.argmin(np.abs(x[:,None] - r1), axis=0) r2 = np.random.rand(nsamp) * g[zi] acc = r2 < q[zi] # plot the densities againg plotgrid = mpl.gridspec.GridSpec(2, 1, height_ratios=[5,1]) fig = plt.figure() ax0 = plt.subplot(plotgrid[0]) plt.plot(x, q) plt.plot(x, g, linestyle='--') plt.fill_between(x, q) plt.xticks(()) plt.yticks(()) plt.title('Rejection sampling') plt.ylim([0, 1.1*g.max()]) plt.xlim((x[0],x[-1])) # the samples plt.scatter(r1[~acc], r2[~acc], 40, color='#ff999a') plt.scatter(r1[acc], r2[acc], 40, color='#4daf4a') plt.legend((r'$q(\theta|y)$', r'$Mg(\theta)$', 'rejected', 'accepted')) # only accepted samples ax1 = plt.subplot(plotgrid[1]) plt.scatter(r1[acc], np.ones(np.count_nonzero(acc)), 40, color='#4daf4a', alpha=0.3) plt.yticks(()) plt.xlim((x[0],x[-1])) # add inter-axis lines transf = fig.transFigure.inverted() for i in range(nsamp): if acc[i] and x[0] < r1[i] and r1[i] < x[-1]: coord1 = transf.transform(ax0.transData.transform([r1[i], r2[i]])) coord2 = transf.transform(ax1.transData.transform([r1[i], 1])) fig.lines.append(mpl.lines.Line2D( (coord1[0], coord2[0]), (coord1[1], coord2[1]), transform=fig.transFigure, alpha=0.2 )) # alternative proposal distribution g = np.empty(x.shape) g[x <= -1.5] = np.linspace(q[0], np.max(q[x<=-1.5]), len(x[x<=-1.5])) g[(x > -1.5) & (x <= 0.2)] = np.linspace( np.max(q[x<=-1.5]), np.max(q[(x>-1.5) & (x<=0.2)]), len(x[(x>-1.5) & (x<=0.2)]) ) g[(x > 0.2) & (x <= 2.3)] = np.linspace( np.max(q[(x>-1.5) & (x<=0.2)]), np.max(q[x>2.3]), len(x[(x>0.2) & (x<=2.3)]) ) g[x > 2.3] = np.linspace(np.max(q[x>2.3]), q[-1], len(x[x>2.3])) M = np.max(q/g) g *= M # plot plt.figure() plt.plot(x, q) plt.plot(x, g, linestyle='--') plt.fill_between(x, q) plt.legend((r'$q(\theta|y)$', r'$Mg(\theta)$')) plt.yticks(()) plt.title('Rejection sampling - alternative proposal distribution') plt.ylim([0, 1.1*g.max()]) plt.show()
gpl-3.0
robinbach/adv-loop-perf
04modelPython/Regression.py
1
4435
from sklearn import svm from sklearn import linear_model from sklearn.kernel_ridge import KernelRidge import numpy as np import sys import random import matplotlib.pyplot as plt numTrain = 11 def readFile(fPath): data = np.genfromtxt(fPath, delimiter=',') random.shuffle(data) performance = data.T[-2] distortion = data.T[-1] numX = len(data.T) - 2 A = data.T[0:numX] for i in range(len(A)): A[i] = A[i] / max(max(A[i]), 1.0) A = A.T ATrain = A[0:numTrain] ATest = A[numTrain + 1:] performanceTrain = performance[0:numTrain] performanceTest = performance[numTrain + 1:] distortionTrain = distortion[0:numTrain] distortionTest = distortion[numTrain + 1:] return ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest def linearRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): lr = linear_model.LinearRegression() lr.fit(ATrain, performanceTrain) performancePred = lr.predict(ATest) performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest) print 'linear regression performance error: ', performanceErr lr.fit(ATrain, distortionTrain) distortionPred = lr.predict(ATest) distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest) print 'linear regression distortion error: ', distortionErr histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def SVR(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): clf = svm.SVR(C=100, epsilon=0.001) clf.fit(ATrain, performanceTrain) performancePred = clf.predict(ATest) performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest) print 'SVR performance error: ', performanceErr clf.fit(ATrain, distortionTrain) distortionPred = clf.predict(ATest) distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest) print 'SVR distortion error: ', distortionErr histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def ridgeRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): model = KernelRidge(alpha=0.01, kernel='sigmoid') model.fit(ATrain, performanceTrain) performancePred = model.predict(ATest) performanceErr = sum(abs(performancePred - performanceTest)) / len(performanceTest) print 'Kernel ridge performance error: ', performanceErr model.fit(ATrain, distortionTrain) distortionPred = model.predict(ATest) distortionErr = sum(abs(distortionPred - distortionTest)) / len(distortionTest) print 'Kernel ridge distortion error: ', distortionErr histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def robustRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest): model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression()) model_ransac.fit(ATrain, performanceTrain) model_ransac.predict(ATest) temp = model_ransac.predict(ATest) performancePred = [] for data in temp: performancePred.append(data[0]) model_ransac.fit(ATrain, distortionTrain) model_ransac.predict(ATest) temp = model_ransac.predict(ATest) distortionPred = [] for data in temp: distortionPred.append(data[0]) histoPlot(performancePred, performanceTest) histoPlot(distortionPred, distortionTest) def histoPlot(pred, actual): x = np.arange(len(actual)) plt.hold(True) rects1 = plt.bar(x, pred, 0.2, color='r') x = x + 0.2 rects2 = plt.bar(x, actual, 0.2) plt.legend((rects1[0], rects2[0]), ('Prediction', 'Actual'), fontsize=20) plt.xlabel('Data Point', fontsize=30) plt.ylabel('Value', fontsize=30) performanceErr = sum(abs(pred - actual)) / len(actual) print 'Error: ', performanceErr plt.title('Mean error: ' + ('%.3f' % performanceErr), fontsize=30) plt.hold(False) plt.show() def main(): dataPath = sys.argv[1] ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest = readFile(dataPath) linearRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) SVR(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) ridgeRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) robustRegression(ATrain, performanceTrain, distortionTrain, ATest, performanceTest, distortionTest) if __name__ == '__main__': main()
mit
Batch21/pywr
docs/source/conf.py
2
9485
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Pywr documentation build configuration file, created by # sphinx-quickstart on Mon Jun 8 20:10:37 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import shlex import alabaster # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'matplotlib.sphinxext.plot_directive', 'alabaster', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Pywr' copyright = '2015, Joshua Arnott' author = 'Joshua Arnott' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1' # The full version, including alpha/beta/rc tags. release = '0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'github_user': 'pywr', 'github_repo': 'pywr', } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = [alabaster.get_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '**': [ 'about.html', 'navigation.html', 'relations.html', 'searchbox.html', 'donate.html', ] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr' #html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value #html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. #html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Pywrdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', # Latex figure (float) alignment #'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Pywr.tex', 'Pywr Documentation', 'Joshua Arnott', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pywr', 'Pywr Documentation', [author], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Pywr', 'Pywr Documentation', author, 'Pywr', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
gpl-3.0
eyurtsev/FlowCytometryTools
FlowCytometryTools/core/docstring.py
1
2522
from __future__ import print_function import string from matplotlib import inspect class FormatDict(dict): """Adapted from http://stackoverflow.com/questions/11283961/partial-string-formatting""" def __missing__(self, key): return "{" + key + "}" class DocReplacer(object): """Decorator object for replacing patterns in docstrings using string.format.""" def __init__(self, auto_dedent=True, allow_partial_formatting=False, **doc_dict): ''' Parameters ------------- auto_indent : bool Flag for automatically indenting the replaced lines to the level of the docstring. allow_partial_formatting : bool Emnables partial formatting (i.e., not all keys are available in the dictionary) doc_dict : kwargs Pattern in docstring that a key in this dict will be replaced by the corresponding values. Example ------------- TODO: Update this documentation @DocReplacer({'p1': 'p1 : int\n\tFirst parameter'}) def foo(p1): """ Some functions. Params: {p1} """ will result in foo's docstring being: """ Some functions. Params: p1 : int First parameter """ ''' self.doc_dict = doc_dict self.auto_dedent = auto_dedent self.allow_partial_formatting = allow_partial_formatting def __call__(self, func): if func.__doc__: doc = func.__doc__ if self.auto_dedent: doc = inspect.cleandoc(doc) func.__doc__ = self._format(doc) return func def replace(self): """Reformat values inside the self.doc_dict using self.doc_dict TODO: Make support for partial_formatting """ doc_dict = self.doc_dict.copy() for k, v in doc_dict.items(): if '{' and '}' in v: self.doc_dict[k] = v.format(**doc_dict) def update(self, *args, **kwargs): "Assume self.params is a dict and update it with supplied args" self.doc_dict.update(*args, **kwargs) def _format(self, doc): """ Formats the docstring using self.doc_dict """ if self.allow_partial_formatting: mapping = FormatDict(self.doc_dict) else: mapping = self.doc_dict formatter = string.Formatter() return formatter.vformat(doc, (), mapping)
mit
imanolarrieta/RL
rlpy/Domains/HelicopterHover.py
4
16981
"""Helicopter hovering task.""" from .Domain import Domain import numpy as np import rlpy.Tools.transformations as trans from rlpy.Tools.GeneralTools import cartesian import matplotlib.pyplot as plt from matplotlib.patches import FancyArrowPatch, Circle, Ellipse from mpl_toolkits.mplot3d import proj3d __copyright__ = "Copyright 2013, RLPy http://acl.mit.edu/RLPy" __credits__ = ["Alborz Geramifard", "Robert H. Klein", "Christoph Dann", "William Dabney", "Jonathan P. How"] __license__ = "BSD 3-Clause" __author__ = "Christoph Dann <cdann@cdann.de>" class Arrow3D(FancyArrowPatch): """ Helper class for plotting arrows in 3d """ def __init__(self, xs, ys, zs, *args, **kwargs): FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs) self._verts3d = xs, ys, zs def draw(self, renderer): xs3d, ys3d, zs3d = self._verts3d xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M) self.set_positions((xs[0], ys[0]), (xs[1], ys[1])) FancyArrowPatch.draw(self, renderer) class HelicopterHoverExtended(Domain): """ Implementation of a simulator that models one of the Stanford autonomous helicopters (an XCell Tempest helicopter) in the flight regime close to hover. Adapted from the `RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_ **STATE:** The state of the helicopter is described by a 20-dimensional vector with the following entries: * 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward * 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right * 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down * 3: u [forward velocity] * 4: v [sideways velocity (to the right)] * 5: w [downward velocity] * 6: p [angular rate around helicopter's x axis] * 7: q [angular rate around helicopter's y axis] * 8: r [angular rate around helicopter's z axis] * 9-12: orientation of heli in world as quaterion * 13-18: current noise due to gusts (usually not observable!) * 19: t number of timesteps in current episode **REFERENCE:** .. seealso:: Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics, with application to modeling helicopters. Advances in Neural Information Systems (2006). """ MAX_POS = 20. #: [m] maximum deviation in position in each dimension MAX_VEL = 10. #: [m/s] maximum velocity in each dimension MAX_ANG_RATE = 4 * np.pi # : maximum angular velocity MAX_ANG = 1. WIND_MAX = 5. # : maximum gust indensity MIN_QW_BEFORE_HITTING_TERMINAL_STATE = np.cos(30. / 2. * np.pi / 180.) wind = np.array([.0, .0, 0.]) #: wind in neutral orientation discount_factor = 0.95 #: discount factor gust_memory = 0.8 domain_fig = None episodeCap = 6000 # model specific parameters from the learned model noise_std = np.array([0.1941, 0.2975, 0.6058, 0.1508, 0.2492, 0.0734]) drag_vel_body = np.array([.18, .43, .49]) drag_ang_rate = np.array([12.78, 10.12, 8.16]) u_coeffs = np.array([33.04, -33.32, 70.54, -42.15]) tail_rotor_side_thrust = -0.54 dt = 0.01 #: length of one timestep continuous_dims = np.arange(20) statespace_limits_full = np.array([[-MAX_POS, MAX_POS]] * 3 + [[-MAX_VEL, MAX_VEL]] * 3 + [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3 + [[-MAX_ANG, MAX_ANG]] * 4 + [[-2., 2.]] * 6 + [[0, episodeCap]]) statespace_limits = statespace_limits_full # create all combinations of possible actions _action_bounds = np.array([[-2., 2.]] * 4) # maximum action: 2 _actions_dim = np.array( [[-.2, -0.05, 0.05, 0.2]] * 3 + [[0., 0.15, 0.3, 0.5]]) actions = cartesian(list(_actions_dim)) #: all possible actions actions_num = np.prod(actions.shape[0]) def __init__(self, noise_level=1., discount_factor=0.95): self.noise_level = noise_level self.discount_factor = discount_factor super(HelicopterHoverExtended, self).__init__() def s0(self): self.state = np.zeros((20)) self.state[9] = 1. return self.state.copy(), self.isTerminal(), self.possibleActions() def isTerminal(self): s = self.state if np.any(self.statespace_limits_full[:9, 0] > s[:9]) or np.any(self.statespace_limits_full[:9, 1] < s[:9]): return True if len(s) <= 12: w = np.sqrt(1. - np.sum(s[9:12] ** 2)) else: w = s[9] return np.abs(w) < self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE def _get_reward(self): s = self.state if self.isTerminal(): r = -np.sum(self.statespace_limits[:9, 1] ** 2) #r -= np.sum(self.statespace_limits[10:12, 1] ** 2) r -= (1. - self.MIN_QW_BEFORE_HITTING_TERMINAL_STATE ** 2) return r * (self.episodeCap - s[-1]) else: return -np.sum(s[:9] ** 2) - np.sum(s[10:12] ** 2) def possibleActions(self, s=None): return np.arange(self.actions_num) def step(self, a): a = self.actions[a] # make sure the actions are not beyond their limits a = np.maximum(self._action_bounds[:, 0], np.minimum(a, self._action_bounds[:, 1])) pos, vel, ang_rate, ori_bases, q = self._state_in_world(self.state) t = self.state[-1] gust_noise = self.state[13:19] gust_noise = (self.gust_memory * gust_noise + (1. - self.gust_memory) * self.random_state.randn(6) * self.noise_level * self.noise_std) # update noise which simulates gusts for i in range(10): # Euler integration # position pos += self.dt * vel # compute acceleration on the helicopter vel_body = self._in_world_coord(vel, q) wind_body = self._in_world_coord(self.wind, q) wind_body[-1] = 0. # the java implementation # has it this way acc_body = -self.drag_vel_body * (vel_body + wind_body) acc_body[-1] += self.u_coeffs[-1] * a[-1] acc_body[1] += self.tail_rotor_side_thrust acc_body += gust_noise[:3] acc = self._in_body_coord(acc_body, q) acc[-1] += 9.81 # gravity # velocity vel += self.dt * acc # orientation tmp = self.dt * ang_rate qdt = trans.quaternion_about_axis(np.linalg.norm(tmp), tmp) q = trans.quaternion_multiply(q, qdt) #assert np.allclose(1., np.sum(q**2)) # angular accelerations ang_acc = -ang_rate * self.drag_ang_rate + \ self.u_coeffs[:3] * a[:3] ang_acc += gust_noise[3:] ang_rate += self.dt * ang_acc st = np.zeros_like(self.state) st[:3] = -self._in_body_coord(pos, q) st[3:6] = self._in_body_coord(vel, q) st[6:9] = ang_rate st[9:13] = q st[13:19] = gust_noise st[-1] = t + 1 self.state = st.copy() return ( self._get_reward(), st, self.isTerminal(), self.possibleActions() ) def _state_in_world(self, s): """ transforms state from body coordinates in world coordinates .. warning:: angular rate still in body frame! """ pos_body = s[:3] vel_body = s[3:6] ang_rate = s[6:9].copy() q = s[9:13].copy() pos = self._in_world_coord(-pos_body, q) vel = self._in_world_coord(vel_body, q) rot = trans.quaternion_matrix(trans.quaternion_conjugate(q))[:3, :3] return pos, vel, ang_rate, rot, q def _in_body_coord(self, p, q): """ q is the inverse quaternion of the rotation of the helicopter in world coordinates """ q_pos = np.zeros((4)) q_pos[1:] = p q_p = trans.quaternion_multiply(trans.quaternion_multiply(q, q_pos), trans.quaternion_conjugate(q)) return q_p[1:] def _in_world_coord(self, p, q): """ q is the inverse quaternion of the rotation of the helicopter in world coordinates """ return self._in_body_coord(p, trans.quaternion_conjugate(q)) def showDomain(self, a=None): s = self.state if a is not None: a = self.actions[a].copy() * 3 # amplify for visualization pos, vel, ang_rate, ori_bases, _ = self._state_in_world(s) coords = np.zeros((3, 3, 2)) + pos[None, :, None] coords[:, :, 1] += ori_bases * 4 u, v = np.mgrid[0:2 * np.pi:10j, 0:2:1.] # rotor coordinates coord = np.zeros([3] + list(u.shape)) coord[0] = .1 * np.sin(u) * v coord[1] = 0. coord[2] = .1 * np.cos(u) * v coord[0] -= 0.8 coord_side = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord) coord_side += pos[:, None, None] coord = np.zeros([3] + list(u.shape)) coord[0] = .6 * np.cos(u) * v coord[1] = .6 * np.sin(u) * v coord[2] = -.4 coord_main = np.einsum("ij,jkl->ikl", np.linalg.pinv(ori_bases), coord) coord_main += pos[:, None, None] style = dict(fc="r", ec="r", lw=2., head_width=0.05, head_length=0.1) if self.domain_fig is None: self.domain_fig = plt.figure(figsize=(12, 8)) # action axes ax1 = plt.subplot2grid((1, 3), (0, 0), frameon=False) ax1.get_xaxis().set_visible(False) ax1.get_yaxis().set_visible(False) lim = 2 # self.MAX_POS ax1.set_xlim(-lim, lim) ax1.set_ylim(-lim, lim) if a is None: a = np.zeros((4)) # main rotor ax1.add_artist(Circle(np.zeros((2)), radius=0.6)) ax1.add_artist(Ellipse(np.array([0, 1.5]), height=0.3, width=0.02)) # TODO make sure the actions are plotted right # main rotor direction? arr1 = ax1.arrow(0, 0, a[0], 0, **style) arr2 = ax1.arrow(0, 0, 0, a[1], **style) # side rotor throttle? arr3 = ax1.arrow(0, 1.5, a[2], 0, **style) # main rotor throttle arr4 = ax1.arrow(1.5, 0, 0, a[3], **style) ax1.set_aspect("equal") self.action_arrows = (arr1, arr2, arr3, arr4) self.action_ax = ax1 #ax = self.domain_fig.gca(projection='3d') ax = plt.subplot2grid((1, 3), (0, 1), colspan=2, projection='3d') ax.view_init(elev=np.pi) # print origin x = Arrow3D([0, 2], [0, 0], [0, 0], mutation_scale=30, lw=1, arrowstyle="-|>", color="r") y = Arrow3D([0, 0], [0, 2], [0, 0], mutation_scale=30, lw=1, arrowstyle="-|>", color="b") z = Arrow3D([0, 0], [0, 0], [0, 2], mutation_scale=30, lw=1, arrowstyle="-|>", color="g") ax.add_artist(x) ax.add_artist(y) ax.add_artist(z) # print helicopter coordinate axes x = Arrow3D(*coords[0], mutation_scale=30, lw=2, arrowstyle="-|>", color="r") y = Arrow3D(*coords[1], mutation_scale=30, lw=2, arrowstyle="-|>", color="b") z = Arrow3D(*coords[2], mutation_scale=30, lw=2, arrowstyle="-|>", color="g") ax.add_artist(x) ax.add_artist(y) ax.add_artist(z) self.heli_arrows = (x, y, z) self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1], coord_main[2], color="k") self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1], coord_side[2], color="k") self._ax = ax ax.set_aspect("equal") lim = 5 # self.MAX_POS ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.view_init(elev=-135) plt.show() else: self.heli_arrows[0]._verts3d = tuple(coords[0]) self.heli_arrows[1]._verts3d = tuple(coords[1]) self.heli_arrows[2]._verts3d = tuple(coords[2]) ax = self._ax ax.collections.remove(self._wframe_main) ax.collections.remove(self._wframe_side) for arr in self.action_arrows: self.action_ax.artists.remove(arr) ax1 = self.action_ax # TODO make sure the actions are plotted right # main rotor direction? arr1 = ax1.arrow(0, 0, a[0], 0, **style) arr2 = ax1.arrow(0, 0, 0, a[1], **style) # side rotor throttle? arr3 = ax1.arrow(0, 1.5, a[2], 0, **style) # main rotor throttle arr4 = ax1.arrow(1.5, 0, 0, a[3], **style) self.action_arrows = (arr1, arr2, arr3, arr4) self._wframe_main = ax.plot_wireframe(coord_main[0], coord_main[1], coord_main[2], color="k") self._wframe_side = ax.plot_wireframe(coord_side[0], coord_side[1], coord_side[2], color="k") ax.set_aspect("equal") lim = 5 # self.MAX_POS ax.set_xlim(-lim, lim) ax.set_ylim(-lim, lim) ax.set_zlim(-lim, lim) ax.view_init(elev=-135) self.domain_fig.canvas.draw() class HelicopterHover(HelicopterHoverExtended): """ .. warning:: This domain has an internal hidden state, as it actually is a POMDP. Besides the 12-dimensional observable state, there is an internal state saved as ``self.hidden_state_`` (time and long-term noise which simulated gusts of wind). be aware of this state if you use this class to produce samples which are not in order Implementation of a simulator that models one of the Stanford autonomous helicopters (an XCell Tempest helicopter) in the flight regime close to hover. Adapted from the `RL-Community Java Implementation <http://library.rl-community.org/wiki/Helicopter_(Java)>`_ **STATE:** The state of the helicopter is described by a 12-dimensional vector with the following entries: * 0: xerr [helicopter x-coord position - desired x-coord position] -- helicopter's x-axis points forward * 1: yerr [helicopter y-coord position - desired y-coord position] -- helicopter's y-axis points to the right * 2: zerr [helicopter z-coord position - desired z-coord position] -- helicopter's z-axis points down * 3: u [forward velocity] * 4: v [sideways velocity (to the right)] * 5: w [downward velocity] * 6: p [angular rate around helicopter's x axis] * 7: q [angular rate around helicopter's y axis] * 8: r [angular rate around helicopter's z axis] * 9-11: orientation of the world in the heli system as quaterion **REFERENCE:** .. seealso:: Abbeel, P., Ganapathi, V. & Ng, A. Learning vehicular dynamics, with application to modeling helicopters. Advances in Neural Information Systems (2006). """ episodeCap = 6000 MAX_POS = 20. # m MAX_VEL = 10. # m/s MAX_ANG_RATE = 4 * np.pi MAX_ANG = 1. WIND_MAX = 5. continuous_dims = np.arange(12) statespace_limits = np.array([[-MAX_POS, MAX_POS]] * 3 + [[-MAX_VEL, MAX_VEL]] * 3 + [[-MAX_ANG_RATE, MAX_ANG_RATE]] * 3 + [[-MAX_ANG, MAX_ANG]] * 3) #full_state_ = np.zeros((20)) def s0(self): #self.hidden_state_ = np.zeros((8)) #self.hidden_state_[0] = 1. s_full, term, p_actions = super(HelicopterHover, self).s0() s, _ = self._split_state(s_full) return s, term, p_actions def _split_state(self, s): s_observable = np.zeros((12)) s_observable[:9] = s[:9] s_observable[9:12] = s[10:13] s_hidden = np.zeros((8)) s_hidden[0] = s[9] s_hidden[1:] = s[13:] return s_observable, s_hidden def step(self, a): #s_extended = self._augment_state(s) r, st, term, p_actions = super(HelicopterHover, self).step(a) st, _ = self._split_state(st) return (r, st, term, p_actions)
bsd-3-clause
kgullikson88/GSSP_Analyzer
gsspy/fitting.py
1
19991
from __future__ import print_function, division, absolute_import import numpy as np import matplotlib.pyplot as plt import os import sys import subprocess from astropy.io import fits from astropy import time import DataStructures from ._utils import combine_orders, read_grid_points, ensure_dir from .analyzer import GSSP_Analyzer import logging import glob home = os.environ['HOME'] GSSP_EXE = '{}/Applications/GSSP/GSSP_single/GSSP_single'.format(home) GSSP_ABUNDANCE_TABLES = '{}/Applications/GSSPAbundance_Tables/'.format(home) GSSP_MODELS = '/media/ExtraSpace/GSSP_Libraries/LLmodels/' class GSSP_Fitter(object): teff_minstep = 100 logg_minstep = 0.1 feh_minstep = 0.1 vsini_minstep = 10 vmicro_minstep = 0.1 def __init__(self, filename, gssp_exe=None, abund_tab=None, models_dir=None): """ A python wrapper to the GSSP code (must already be installed) Parameters: =========== filename: string The filename of the (flattened) fits spectrum to fit. gssp_exe: string (optional) The full path to the gssp executable file abund_tab: string (optional) The full path to the directory containing GSSP abundance tables. models_dir: string: The full path to the directory containing GSSP atmosphere models. Methods: ========== fit: Fit the parameters """ if gssp_exe is None: gssp_exe = GSSP_EXE if abund_tab is None: abund_tab = GSSP_ABUNDANCE_TABLES if models_dir is None: models_dir = GSSP_MODELS # Read in the file and combine the orders orders = self._read_fits_file(filename) combined = combine_orders(orders) #TODO: Cross-correlate the data to get it close. GSSP might have trouble with huge RVs... # Get the object name/date header = fits.getheader(filename) star = header['OBJECT'] date = header['DATE-OBS'] try: jd = time.Time(date, format='isot', scale='utc').jd except TypeError: jd = time.Time('{}T{}'.format(date, header['UT']), format='isot', scale='utc').jd # Save the data to an ascii file output_basename = '{}-{}'.format(star.replace(' ', ''), jd) np.savetxt('data_sets/{}.txt'.format(output_basename), np.transpose((combined.x, combined.y)), fmt='%.10f') # Save some instance variables self.data = combined self.jd = jd self.starname = star self.output_basename = output_basename self.gssp_exe = os.path.abspath(gssp_exe) self.abundance_table = abund_tab self.model_dir = models_dir self.gssp_gridpoints = read_grid_points(models_dir) def _run_gssp(self, teff_lims=(7000, 30000), teff_step=1000, logg_lims=(3.0, 4.5), logg_step=0.5, feh_lims=(-0.5, 0.5), feh_step=0.5, vsini_lims=(50, 350), vsini_step=50, vmicro_lims=(1, 5), vmicro_step=1, R=80000, ncores=1): """ Coarsely fit the parameters Teff, log(g), and [Fe/H]. """ # First, make sure the inputs are reasonable. teff_step = max(teff_step, self.teff_minstep) logg_step = max(logg_step, self.logg_minstep) feh_step = max(feh_step, self.feh_minstep) vsini_step = max(vsini_step, self.vsini_minstep) vmicro_step = max(vmicro_step, self.vmicro_minstep) teff_lims = (min(teff_lims), max(teff_lims)) logg_lims = (min(logg_lims), max(logg_lims)) feh_lims = (min(feh_lims), max(feh_lims)) vsini_lims = (min(vsini_lims), max(vsini_lims)) vmicro_lims = (min(vmicro_lims), max(vmicro_lims)) teff_lims, logg_lims, feh_lims = self._check_grid_limits(teff_lims, logg_lims, feh_lims) # Make the input file for GSSP inp_file=self._make_input_file(teff_lims=teff_lims, teff_step=teff_step, logg_lims=logg_lims, logg_step=logg_step, feh_lims=feh_lims, feh_step=feh_step, vsini_lims=vsini_lims, vsini_step=vsini_step, vmicro_lims=vmicro_lims, vmicro_step=vmicro_step, resolution=R) # Run GSSP subprocess.check_call(['mpirun', '-n', '{}'.format(ncores), '{}'.format(self.gssp_exe), '{}'.format(inp_file)]) # Move the output directory to a new name that won't be overridden output_dir = '{}_output'.format(self.output_basename) ensure_dir(output_dir) for f in glob.glob('output_files/*'): subprocess.check_call(['mv', f, '{}/'.format(output_dir)]) return def fit(self, teff_lims=(7000, 30000), teff_step=1000, logg_lims=(3.0, 4.5), logg_step=0.5, feh_lims=(-0.5, 0.5), feh_step=0.5, vsini_lims=(50, 350), vsini_step=50, vmicro_lims=(1, 5), vmicro_step=1, R=80000, ncores=1, refine=True): """ Fit the stellar parameters with GSSP Parameters: ============= par_lims: iterable with (at least) two objects The limits on the given parameter. 'par' can be one of: 1. teff: The effective temperature 2. logg: The surface gravity 3. feh: The metallicity [Fe/H] 4. vsini: The rotational velocity 5. vmicro: The microturbulent velocity The default values are a very large, very course grid. Consider refining based on spectral type first! par_step: float The initial step size to take in the given parameter. 'par' can be from the same list as above. R: float The spectrograph resolving power (lambda/delta-lambda) ncores: integer, default=1 The number of cores to use in the GSSP run. refine: boolean Should we run GSSP again with a smaller grid after the initial fit? If yes, the best answers will probably be better. Returns: ========= A pd.Series object with the best parameters """ # Run GSSP self._run_gssp(teff_lims=teff_lims, teff_step=teff_step, logg_lims=logg_lims, logg_step=logg_step, feh_lims=feh_lims, feh_step=feh_step, vsini_lims=vsini_lims, vsini_step=vsini_step, vmicro_lims=vmicro_lims, vmicro_step=vmicro_step, R=R, ncores=ncores) # Look at the output and save the figures output_dir = '{}_output'.format(self.output_basename) best_pars, figs = GSSP_Analyzer(output_dir).estimate_best_parameters() for par in figs.keys(): fig = figs[par] fig.savefig(os.path.join(output_dir, '{}_course.pdf'.format(par))) plt.close('all') if not refine: return best_pars # If we get here, we should restrict the grid near the # best solution and fit again teff_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_Teff'], upper=best_pars['1sig_CI_upper_Teff'], values=self.gssp_gridpoints.teff) logg_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_logg'], upper=best_pars['1sig_CI_upper_logg'], values=self.gssp_gridpoints.logg) feh_lims = self._get_refined_limits(lower=best_pars['1sig_CI_lower_feh'], upper=best_pars['1sig_CI_upper_feh'], values=self.gssp_gridpoints.feh) vsini_lower = best_pars.best_vsini*(1-1.5) + 1.5*best_pars['1sig_CI_lower_vsini'] vsini_upper = best_pars.best_vsini*(1-1.5) + 1.5*best_pars['1sig_CI_upper_vsini'] vsini_lims = (max(10, vsini_lower), min(400, vsini_upper)) vsini_step = max(self.vsini_minstep, (vsini_lims[1] - vsini_lims[0])/10) vmicro_lims = (best_pars.micro_turb, best_pars.micro_turb) # Rename the files in the output directory so they don't get overwritten file_list = ['CCF.dat', 'Chi2_table.dat', 'Observed_spectrum.dat', 'Synthetic_best_fit.rgs'] ensure_dir(os.path.join(output_dir, 'course_output', '')) for f in file_list: original_fname = os.path.join(output_dir, f) new_fname = os.path.join(output_dir, 'course_output', f) subprocess.check_call(['mv', original_fname, new_fname]) # Run GSSP on the refined grid self._run_gssp(teff_lims=teff_lims, teff_step=self.teff_minstep, logg_lims=logg_lims, logg_step=self.logg_minstep, feh_lims=feh_lims, feh_step=self.feh_minstep, vsini_lims=vsini_lims, vsini_step=round(vsini_step), vmicro_lims=vmicro_lims, vmicro_step=vmicro_step, R=R, ncores=ncores) best_pars, figs = GSSP_Analyzer(output_dir).estimate_best_parameters() for par in figs.keys(): fig = figs[par] fig.savefig(os.path.join(output_dir, '{}_fine.pdf'.format(par))) fig.close() return best_pars def _check_grid_limits_old(self, teff_lims, logg_lims, feh_lims): df = self.gssp_gridpoints[['teff', 'logg', 'feh']].drop_duplicates() # First, check if the limits are do-able lower = df.loc[(df.teff <= teff_lims[0]) & (df.logg <= logg_lims[0]) & (df.feh <= feh_lims[0])] upper = df.loc[(df.teff >= teff_lims[1]) & (df.logg >= logg_lims[1]) & (df.feh >= feh_lims[1])] if len(upper) >= 1 and len(lower) >= 1: return teff_lims, logg_lims, feh_lims # If we get here, there is a problem... # Check temperature first: if not (len(df.loc[df.teff <= teff_lims[0]]) >= 1 and len(df.loc[df.teff >= teff_lims[1]]) >= 1): # Temperature grid is no good. low_teff, high_teff = df.teff.min(), df.teff.max() print('The temperature grid is not available in the model library!') print('You wanted temperatures from {} - {}'.format(*teff_lims)) print('The model grid extends from {} - {}'.format(low_teff, high_teff)) new_teff_lims = (max(low_teff, teff_lims[0]), min(high_teff, teff_lims[1])) print('Resetting temperature limits to {} - {}'.format(*new_teff_lims)) return self._check_grid_limits(new_teff_lims, logg_lims, feh_lims) # Check log(g) next: teff_df = df.loc[(df.teff >= teff_lims[0]) & (df.teff <= teff_lims[1])] if not (len(teff_df.loc[df.logg <= logg_lims[0]]) >= 1 and len(teff_df.loc[df.logg >= logg_lims[1]]) >= 1): # Temperature grid is no good. low_logg, high_logg = df.logg.min(), df.logg.max() print('The log(g) grid is not available in the model library!') print('You wanted log(g) from {} - {}'.format(*logg_lims)) print('The model grid extends from {} - {}'.format(low_logg, high_logg)) new_logg_lims = (max(low_logg, logg_lims[0]), min(high_logg, logg_lims[1])) print('Resetting log(g) limits to {} - {}'.format(*new_logg_lims)) return self._check_grid_limits(teff_lims, new_logg_lims, feh_lims) # Finally, check [Fe/H]: subset_df = df.loc[(df.teff >= teff_lims[0]) & (df.teff <= teff_lims[1]) * (df.logg >= logg_lims[0]) & (df.logg <= logg_lims[1])] if not (len(subset_df.loc[df.feh <= feh_lims[0]]) >= 1 and len(subset_df.loc[df.feh >= feh_lims[1]]) >= 1): # Temperature grid is no good. low_feh, high_feh = df.feh.min(), df.feh.max() print('The [Fe/H] grid is not available in the model library!') print('You wanted [Fe/H] from {} - {}'.format(*feh_lims)) print('The model grid extends from {} - {}'.format(low_feh, high_feh)) new_feh_lims = (max(low_feh, feh_lims[0]), min(high_feh, feh_lims[1])) print('Resetting [Fe/H] limits to {} - {}'.format(*new_feh_lims)) return self._check_grid_limits(teff_lims, logg_lims, new_feh_lims) # We should never get here raise ValueError('Something weird happened while checking limits!') def _check_grid_limits(self, teff_lims, logg_lims, feh_lims): df = self.gssp_gridpoints[['teff', 'logg', 'feh']].drop_duplicates() # First, check if the limits are do-able as is lower = df.loc[(df.teff == teff_lims[0]) & (df.feh == feh_lims[0])] upper = df.loc[(df.teff == teff_lims[1]) & (df.feh == feh_lims[1])] if (lower.logg.min() <= logg_lims[0] and lower.logg.max() >= logg_lims[1] and upper.logg.min() <= logg_lims[0] and upper.logg.max() >= logg_lims[1]): return teff_lims, logg_lims, feh_lims # If we get here, there is a problem... # Check temperature first: low_teff, high_teff = df.teff.min(), df.teff.max() if low_teff > teff_lims[0] or high_teff < teff_lims[1]: print('The temperature grid is not available in the model library!') print('You wanted temperatures from {} - {}'.format(*teff_lims)) print('The model grid extends from {} - {}'.format(low_teff, high_teff)) new_teff_lims = (max(low_teff, teff_lims[0]), min(high_teff, teff_lims[1])) print('Resetting temperature limits to {} - {}'.format(*new_teff_lims)) return self._check_grid_limits(new_teff_lims, logg_lims, feh_lims) # Check [Fe/H] next subset_df = df.loc[(df.teff >= teff_lims[0]) & (df.teff <= teff_lims[1])] low_feh, high_feh = subset_df.feh.min(), subset_df.feh.max() if low_feh > feh_lims[0] or high_feh < feh_lims[1]: print('The [Fe/H] grid is not available in the model library!') print('You wanted [Fe/H] from {} - {}'.format(*feh_lims)) print('The model grid extends from {} - {}'.format(low_feh, high_feh)) new_feh_lims = (max(low_feh, feh_lims[0]), min(high_feh, feh_lims[1])) print('Resetting [Fe/H] limits to {} - {}'.format(*new_feh_lims)) return self._check_grid_limits(teff_lims, logg_lims, new_feh_lims) # Finally, check log(g) subset_df = subset_df.loc[(subset_df.feh >= feh_lims[0]) & (subset_df.feh <= feh_lims[1])] low_logg, high_logg = subset_df.logg.min(), subset_df.logg.max() if low_logg > logg_lims[0] or high_logg < logg_lims[1]: print('The log(g) grid is not available in the model library!') print('You wanted log(g) from {} - {}'.format(*logg_lims)) print('The model grid extends from {} - {}'.format(low_logg, high_logg)) new_logg_lims = (max(low_logg, logg_lims[0]), min(high_logg, logg_lims[1])) print('Resetting log(g) limits to {} - {}'.format(*new_logg_lims)) return self._check_grid_limits(teff_lims, new_logg_lims, feh_lims) # We should never get here raise ValueError('Something weird happened while checking limits!') def _get_refined_limits(self, lower, upper, values): """ Get the items in the 'values' array that are just less than lower and just more than upper. """ unique_values = sorted(np.unique(values)) l_idx = np.searchsorted(unique_values, lower, side='left') r_idx = np.searchsorted(unique_values, upper, side='right') if l_idx > 0: l_idx -= 1 if r_idx < len(unique_values) - 1: r_idx += 1 return unique_values[l_idx], unique_values[r_idx] def _read_fits_file(self, fname): orders = [] hdulist = fits.open(fname) for i, hdu in enumerate(hdulist[1:]): xypt = DataStructures.xypoint(x=hdu.data['wavelength'], y=hdu.data['flux'], cont=hdu.data['continuum'], err=hdu.data['error']) xypt.x *= 10 #Convert from nanometers to angstrom orders.append(xypt) return orders def _make_input_file(self, teff_lims, teff_step, logg_lims, logg_step, feh_lims, feh_step, vsini_lims, vsini_step, vmicro_lims, vmicro_step, resolution): """ Make the input file for the given star """ output_string = '{:.1f} {:.0f} {:.1f}\n'.format(teff_lims[0], teff_step, teff_lims[-1]) output_string += '{:.1f} {:.1f} {:.1f}\n'.format(logg_lims[0], logg_step, logg_lims[1]) output_string += '{:.1f} {:.1f} {:.1f}\n'.format(vmicro_lims[0], vmicro_step, vmicro_lims[1]) output_string += '{:.1f} {:.1f} {:.1f}\n'.format(vsini_lims[0], vsini_step, vsini_lims[1]) output_string += "skip 0.03 0.02 0.07 !dilution factor\n" output_string += 'skip {:.1f} {:.1f} {:.1f}\n'.format(feh_lims[0], feh_step, feh_lims[1]) output_string += 'He 0.04 0.005 0.06 ! Individual abundance\n' output_string += '0.0 {:.0f}\n'.format(resolution) output_string += '{}\n{}\n'.format(self.abundance_table, self.model_dir) output_string += '2 1 !atmosphere model vmicro and mass\n' output_string += 'ST ! model atmosphere chemical composition flag\n' dx = self.data.x[1] - self.data.x[0] output_string += '1 {:.5f} fit\n'.format(dx) output_string += 'data_sets/{}.txt\n'.format(self.output_basename) output_string += '0.5 0.99 0.0 adjust ! RV determination stuff\n' xmin, xmax = self.data.x[0]-1, self.data.x[-1]+1 output_string += '{:.1f} {:.1f}\n'.format(xmin, xmax) outfilename = '{}.inp'.format(self.output_basename) with open(outfilename, 'w') as outfile: outfile.write(output_string) return outfilename
mit
tomka/CATMAID
django/applications/catmaid/control/useranalytics.py
2
20452
# -*- coding: utf-8 -*- from datetime import timedelta, datetime from dateutil import parser as dateparser import io import logging import numpy as np import pytz from typing import Any, Dict, List, Tuple from django.db import connection from django.http import HttpRequest, HttpResponse from django.utils import timezone from django.shortcuts import get_object_or_404 from django.views.decorators.cache import never_cache from catmaid.control.common import get_request_bool from catmaid.control.authentication import requires_user_role from catmaid.models import Connector, Project, Treenode, Review, UserRole logger = logging.getLogger(__name__) try: import matplotlib # Use a noninteractive backend since most CATMAID instances are headless. matplotlib.use('svg') import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter, DayLocator from pylab import figure from matplotlib.backends.backend_svg import FigureCanvasSVG except ImportError: logger.warning("CATMAID was unable to load the matplotlib module. " "User analytics will not be available") class Bout(object): """ Represents one bout, based on a list of events. The first event ist the start date/time, the last event the end. """ def __init__(self, start, end=None): self.events = [start] if end: self.events.append(end) def addEvent(self, e): """ Increments the event counter. """ self.events.append(e) @property def nrEvents(self): return len(self.events) @property def start(self): return self.events[0] @property def end(self): return self.events[-1] def __str__(self): return "Bout with %s events [%s, %s]" % \ (self.nrEvents, self.start, self.end) @never_cache @requires_user_role(UserRole.Browse) def plot_useranalytics(request:HttpRequest, project_id) -> HttpResponse: """ Creates an SVG image containing different plots for analzing the performance of individual users over time. """ time_zone = pytz.utc userid = request.GET.get('userid', None) if not (userid and userid.strip()): raise ValueError("Need user ID") project = get_object_or_404(Project, pk=project_id) if project_id else None all_writes = get_request_bool(request.GET, 'all_writes', False) maxInactivity = int(request.GET.get('max_inactivity', 3)) # Get the start date for the query, defaulting to 7 days ago. start_date = request.GET.get('start', None) if start_date: start_date = dateparser.parse(start_date) start_date = time_zone.localize(start_date) else: with timezone.override(time_zone): start_date = timezone.now() - timedelta(7) # Get the end date for the query, defaulting to now. end_date = request.GET.get('end', None) if end_date: end_date = dateparser.parse(end_date) end_date = time_zone.localize(end_date) else: with timezone.override(time_zone): end_date = timezone.now() # The API is inclusive and should return stats for the end date as # well. The actual query is easier with an exclusive end and therefore # the end date is set to the beginning of the next day. end_date = end_date + timedelta(days=1) if request.user.is_superuser or \ project and request.user.has_perm('can_browse', project): f = generateReport( userid, project_id, maxInactivity, start_date, end_date, all_writes ) else: f = generateErrorImage('You lack permissions to view this report.') # Use raw text rather than SVG fonts or pathing. plt.rcParams['svg.fonttype'] = 'none' buf = io.BytesIO() plt.savefig(buf, format='svg') return HttpResponse(buf.getvalue(), content_type='image/svg+xml') def eventTimes(user_id, project_id, start_date, end_date, all_writes=True) -> Dict[str, Any]: """ Returns a tuple containing a list of tree node edition times, connector edition times and tree node review times within the date range specified where the editor/reviewer is the given user. """ dr = (start_date, end_date) tns = Treenode.objects.filter( editor_id=user_id, edition_time__range=dr) cns = Connector.objects.filter( editor_id=user_id, edition_time__range=dr) rns = Review.objects.filter( reviewer_id=user_id, review_time__range=dr) if project_id: tns = tns.filter(project_id=project_id) cns = cns.filter(project_id=project_id) rns = rns.filter(project_id=project_id) tns = tns.values_list('edition_time', flat=True) cns = cns.values_list('edition_time', flat=True) rns = rns.values_list('review_time', flat=True) events = { 'treenode_events': list(tns), 'connector_events': list(cns), 'review_events': list(rns) } if all_writes: if project_id: params:Tuple[str, ...] = (start_date, end_date, user_id, project_id) project_filter = "AND project_id = %s" else: params = (start_date, end_date, user_id) project_filter = "" # Query transaction log. This makes this feature only useful of history # tracking is available. cursor = connection.cursor() cursor.execute(f""" SELECT execution_time FROM catmaid_transaction_info WHERE execution_time >= %s AND execution_time <= %s AND user_id = %s {project_filter} """, params) events['write_events'] = [r[0] for r in cursor.fetchall()] return events def eventsPerInterval(times, start_date, end_date, interval='day') -> Tuple[np.ndarray, List]: """ Creates a histogram of how many events fall into all intervals between <start_data> and <end_date>. The interval type can be day, hour and halfhour. Returned is a tuple containing two elemens: the histogram and a time axis, labeling every bin. """ if interval=='day': intervalsPerDay = 1 secondsPerInterval = 86400 elif interval=='hour': intervalsPerDay = 24 secondsPerInterval = 3600 elif interval=='halfhour': intervalsPerDay = 48 secondsPerInterval = 1800 else: raise ValueError('Interval options are day, hour, or halfhour') # Generate axis daycount = (end_date - start_date).days dt = timedelta(0, secondsPerInterval) timeaxis = [start_date + n*dt for n in range(intervalsPerDay * daycount)] # Calculate bins timebins = np.zeros(intervalsPerDay * daycount) intervalsPerSecond = 1.0 / secondsPerInterval for t in times: i = int((t - start_date).total_seconds() * intervalsPerSecond) timebins[i] += 1 return timebins, timeaxis def activeTimes(alltimes, gapThresh): """ Goes through the sorted array of time differences between all events stored in <alltimes>. If two events are closer together than <gapThresh> minutes, they are counted as events within one bout. A tuple containing a list of bout start dates as well as a list with total numbers of events for each bout is returned. """ # Sort all events and create a list of (time) differences between them alltimes.sort() dts = np.diff(alltimes) # Threshold between to events to be counted as separate bouts (seconds) threshold = 60 * gapThresh # Indicates whether we are currently in a bout and since we haven't even # looked at the first event, we are initially not. bout = None # Go through all events for i, e in enumerate(alltimes): if i > 0 and dts[i-1].total_seconds() < threshold: # Increment current bout's event counter and continue with the # next element as long as the time difference to the next # element is below our threshold. bout.addEvent(e) # type: ignore # mypy cannot prove bout will not be None continue else: # Return current bout (not available in first iteration) and create # a new one. if bout: yield bout bout = Bout(e) # Return last bout, if it hasn't been returned, yet if bout: yield bout def activeTimesPerDay(active_bouts) -> Tuple[Any, List]: """ Creates a tuple containing the active time in hours for every day between the first event of the first bout and the last event of the last bout as well as a list with the date for every day. """ # Return right away if there are no bouts if not active_bouts: return [], [] # Find first event of first bout daystart = active_bouts[0].start.replace( hour=0, minute=0, second=0, microsecond=0) # Find last event of last bout dayend = active_bouts[-1].end # Get total number of between first event and last event numdays = (dayend - daystart).days + 1 # Create a list of dates for every day between first and last event timeaxis = [daystart.date() + timedelta(d) for d in range(numdays)] # Calculate the netto active time for each day net_active_time = np.array(np.zeros(numdays)) for bout in active_bouts: active_time = (bout.end - bout.start).total_seconds() net_active_time[(bout.start - daystart).days] += active_time # Return a tuple containing the active time for every # day in hours and the list of days. return np.divide(net_active_time, 3600), timeaxis def singleDayEvents(alltimes, start_hour, end_hour) -> Tuple[Any, List]: alltimes.sort() timeaxis = [n for n in np.add(start_hour,range(end_hour-start_hour+1))] activity = np.zeros(end_hour-start_hour+1) for a in alltimes: if a.hour >= start_hour: if a.hour < end_hour: activity[a.hour-start_hour] += 1 return np.true_divide(activity, (alltimes[-1] - alltimes[0]).days), timeaxis def singleDayActiveness(activebouts, increment, start_hour, end_hour) -> Tuple[Any, Any]: """ Returns a ... for all bouts between <start_hour> and <end_hour> of the day. """ # Return right away, when there are no bouts given if not activebouts: return [], [] # Make sure 60 can be cleanly devided by <incement> if np.mod(60, increment) > 0: raise ValueError('Increments must divide 60 evenly') # Some constants stepsPerHour = 60 / increment hoursConsidered = (end_hour - start_hour) + 1 daysConsidered = (activebouts[-1].end - activebouts[0].start).days + 1 # Get start of current day starttime = timezone.now().replace(hour=start_hour,minute=0,second=0,microsecond=0) # Create time axis list with entry for every <increment> minutes between # <start_hour> and <end_hour>. timeaxis = [starttime + timedelta(0, 0, 0, 0, n * increment) \ for n in range(stepsPerHour * hoursConsidered)] # Loop through all days considered to find number of weekend days weekendCorrection = 0 for d in range(daysConsidered): # TODO: Why is 0 and 6 used for comparison? saturday = (activebouts[0].start + timedelta(d)).isoweekday() == 0 sunday = (activebouts[0].start + timedelta(d)).isoweekday() == 6 if saturday or sunday: weekendCorrection += 1 # Initialize list for minutes per period with zeros durPerPeriod = np.zeros(stepsPerHour * hoursConsidered) for bout in activebouts: # Ignore bouts what start after requested <end_hour> or end before # requested <start_hour>. if bout.start.hour > end_hour: continue elif bout.end.hour < start_hour: continue # Crop start and end times of every valid bout to request period elif bout.start.hour < start_hour: bout.start = bout.start.replace(hour=start_hour,minute=0,second=0,microsecond=0) elif bout.end.hour > end_hour: bout.end = bout.end.replace(hour=end_hour,minute=0,second=0,microsecond=0) # Go through every sub bout, defined by periods if <increment> minutes, # and store the number of minutes for every time-fraction considered. for subbout in splitBout(bout,increment): subboutSeconds = (subbout.end - subbout.start).total_seconds() i = stepsPerHour * (subbout.start.hour - start_hour) + \ subbout.start.minute / increment durPerPeriod[i] += np.true_divide(subboutSeconds, 60) # Divide each period (in seconds) by ? n = increment * (daysConsidered - weekendCorrection) durations = np.true_divide(durPerPeriod, n) # Return a tuple containing a list durations and a list of timepoints return durations, timeaxis def splitBout(bout,increment) -> List[Bout]: """ Splits one bout in periods of <increment> minutes. """ if np.mod(60, increment) > 0: raise RuntimeError('Increments must divide 60 evenly') boutListOut = [] currtime = bout.start nexttime = bout.start while nexttime < bout.end: basemin = increment * ( currtime.minute / increment ) nexttime = currtime.replace(minute=0,second=0,microsecond=0) + timedelta(0,0,0,0,basemin+increment) if nexttime > bout.end: nexttime = bout.end boutListOut.append(Bout(currtime, nexttime)) currtime = nexttime return boutListOut def generateErrorImage(msg) -> "matplotlib.figure.Figure": """ Creates an empty image (based on image nr. 1) and adds a message to it. """ fig = plt.figure(1, figsize=(6,6)) fig.clf() fig.suptitle(msg) return fig def generateReport( user_id, project_id, activeTimeThresh, start_date, end_date, all_writes=True ) -> "matplotlib.figure.Figure": """ nts: node times cts: connector times rts: review times """ events = eventTimes(user_id, project_id, start_date, end_date, all_writes) nts = events['treenode_events'] cts = events['connector_events'] rts = events['review_events'] # If no nodes have been found, return an image with a descriptive text. if len(nts) == 0: return generateErrorImage("No tree nodes were edited during the " + "defined period if time.") annotationEvents, ae_timeaxis = eventsPerInterval( nts + cts, start_date, end_date ) reviewEvents, re_timeaxis = eventsPerInterval( rts, start_date, end_date ) if all_writes: write_events = events['write_events'] other_write_events = write_events writeEvents, we_timeaxis = eventsPerInterval(other_write_events, start_date, end_date) else: other_write_events = [] activeBouts = list(activeTimes( nts+cts+rts+other_write_events, activeTimeThresh )) netActiveTime, at_timeaxis = activeTimesPerDay( activeBouts ) dayformat = DateFormatter('%b %d') fig = plt.figure(figsize=(9.6, 8)) # Top left plot: created and edited nodes per day ax1 = plt.subplot2grid((2,2), (0,0)) # If other writes should be shown, draw accumulated write bar first. This # makes the regular bar draw over it, so that only the difference is # visible, which is exactly what we want. if all_writes: we = ax1.bar(we_timeaxis, writeEvents, color='#00AA00', align='edge') an = ax1.bar(ae_timeaxis, annotationEvents, color='#0000AA', align='edge') rv = ax1.bar(re_timeaxis, reviewEvents, bottom=annotationEvents, color='#AA0000', align='edge') ax1.set_xlim((start_date,end_date)) if all_writes: ax1.legend( (we, an, rv), ('Other changes','Annotated', 'Reviewed'), loc=2) ax1.set_ylabel('Nodes and changes') else: ax1.legend( (an, rv), ('Annotated', 'Reviewed'), loc=2 ) ax1.set_ylabel('Nodes') yl = ax1.get_yticklabels() plt.setp(yl, fontsize=10) ax1.xaxis.set_major_formatter(dayformat) xl = ax1.get_xticklabels() plt.setp(xl, rotation=30, fontsize=10) ax1.set_title('Edit events', fontsize=10) # Bottom left plot: net active time per day ax2 = plt.subplot2grid((2,2), (1,0)) ax2.bar(at_timeaxis, netActiveTime, color='k', align='edge') ax2.set_xlim((start_date,end_date)) ax2.set_ylabel('Hours') yl = ax2.get_yticklabels() plt.setp(yl, fontsize=10) ax2.xaxis.set_major_formatter(dayformat) xl = ax2.get_xticklabels() plt.setp(xl, rotation=30, fontsize=10) ax2.set_title('Net daily active time', fontsize=10) """ ax3 = fig.add_subplot(223) ax3 = eventsPerIntervalPerDayPlot(ax3, rts+nts+cts, start_date, end_date, 30 ) """ # Right column plot: bouts over days ax4 = plt.subplot2grid((2,2), (0,1), rowspan=2) ax4 = dailyActivePlotFigure(activeBouts, ax4, start_date, end_date) yl = ax4.get_yticklabels() plt.setp(yl, fontsize=10) ax4.xaxis.set_major_formatter(dayformat) xl = ax4.get_xticklabels() plt.setp(xl, rotation=30, fontsize=10) ax4.set_title('Active Bouts', fontsize=10) yl = ax4.get_yticklabels() plt.setp(yl, fontsize=10) ax4.set_ylabel('Time (24 hr)') fig.set_tight_layout(True) return fig def dailyActivePlotFigure(activebouts, ax:"matplotlib.axes.Axes", start_date, end_date) -> "matplotlib.axes.Axes": """ Draws a plot of all bouts during each day between <start_date> and <end_date> to the plot given by <ax>. """ # Y axis: Draw a line for each two hours in a day and set ticks accordingly for i in range(2, 24, 2): ax.axhline(i, color='#AAAAAA', linestyle = ':') ax.axhspan(8, 18, facecolor='#999999', alpha=0.25) ax.set_yticks(range(0, 25, 2)) # X axis: Ticks and labels for every day ax.xaxis.set_major_locator(DayLocator()) # Draw all bouts for bout in activebouts: # Ignore bouts that span accross midnight # TODO: Draw midnight spanning bouts, too. if bout.start.day == bout.end.day: isodate = bout.start.isocalendar() ax.bar( bout.start.replace(hour=0, minute=0, second=0, microsecond=0), np.true_divide((bout.end-bout.start).total_seconds(), 3600), bottom=bout.start.hour + bout.start.minute/60.0 + bout.start.second/3600.0, alpha=0.5, color='#0000AA', align='edge', edgecolor="k") # Set Axis limits ax.set_ylim((0, 24)) ax.invert_yaxis() ax.set_xlim((start_date, end_date)) return ax def eventsPerIntervalPerDayPlot(ax, times, start_date, end_date, interval=60) -> "matplotlib.axes.Axes": if np.mod(24 * 60, interval) > 0: raise ValueError('Interval in minutes must divide the day evenly') daycount = (end_date-start_date).days timebins = {} for i in range(daycount): timebins[i] = np.zeros(24 * 60 / interval) dayList = [] daylabels = [] for i in range(daycount): day = start_date + timedelta( i ) dayList.append( day ) daylabels.append( str(day.month) + '/' + str(day.day) ) timeaxis = [i for i in range(24 * 60 / interval )] timelabels = [] for i in range(int(24 * 60 / 30)): if np.mod(i,2)==0: timelabels.append( str(i/2) + ':00' ) else: timelabels.append( str( (i-1)/2 ) + ':30' ) for t in times: timebins[np.floor((t-start_date).days)][ np.floor(np.divide(t.hour*60+t.minute, interval)) ] += 1 meandat = np.zeros(len(timebins[0])) ignoredDays = 0 ind = 0 cm = plt.get_cmap('jet',len(timebins)) dats = [] for dat in timebins.values(): if np.sum(dat)==0: ignoredDays += 1 else: tmp, = ax.plot(timeaxis, dat, marker='s', linestyle='-.',alpha=0.5, color=cm(ind)) dats.append(tmp) meandat += dat ind += 1 meandat = np.divide(meandat, daycount-ignoredDays) tmp, = ax.plot( timeaxis, meandat, color='k', linewidth=4, linestyle='-') dats.append(tmp) daylabels.append('Mean') ax.set_xticks(timeaxis) ax.set_xticklabels(timelabels) xl = ax.get_xticklabels() plt.setp(xl, rotation=30, fontsize=10) yl = ax.get_yticklabels() plt.setp(yl, fontsize=10) ax.set_ylabel('Events',fontsize=10) ax.set_xlim(8 * 60 / interval, 19 * 60 / interval) ax.legend(dats,daylabels,loc=2,frameon=False) return ax
gpl-3.0
enriquecoronadozu/HMPy
src/borrar/modificar/hmpy.py
1
6228
#!/usr/bin/env python """@See preprocessed data """ from numpy import* import matplotlib.pyplot as plt from GestureModel import* from Creator import* from Classifier import* def plotResults(gr_points,gr_sig, b_points,b_sig,name_model): from scipy import linalg import matplotlib.pyplot as plt gr_points = gr_points.transpose() b_points = b_points.transpose() gr_sigma = [] b_sigma = [] n,m = gr_points.shape maximum = zeros((m)) minimum = zeros((m)) x = arange(0,m,1) for i in range(m): gr_sigma.append(gr_sig[i*3:i*3+3]) b_sigma.append(b_sig[i*3:i*3+3]) for i in range(m): sigma = 3.*linalg.sqrtm(gr_sigma[i]) maximum[i] = gr_points[0,i]+ sigma[0,0]; minimum[i] = gr_points[0,i]- sigma[0,0]; fig2 = plt.figure() import matplotlib.pyplot as plt plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 ) plt.plot(x, gr_points[0]) plt.savefig(name_model+ "_gravity_x_axis.png") for i in range(m): sigma = 3.*linalg.sqrtm(gr_sigma[i]) maximum[i] = gr_points[1,i]+ sigma[1,1]; minimum[i] = gr_points[1,i]- sigma[1,1]; fig3 = plt.figure() plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 ) plt.plot(x, gr_points[1]) plt.savefig(name_model+ "_gravity_y_axis.png") for i in range(m): sigma = 3.*linalg.sqrtm(gr_sigma[i]) maximum[i] = gr_points[2,i]+ sigma[2,2]; minimum[i] = gr_points[2,i]- sigma[2,2]; fig3 = plt.figure() import matplotlib.pyplot as plt plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 ) plt.plot(x, gr_points[2]) plt.savefig(name_model+ "_gravity_z_axis.png") for i in range(m): sigma = 3.*linalg.sqrtm(b_sigma[i]) maximum[i] = b_points[0,i]+ sigma[0,0]; minimum[i] = b_points[0,i]- sigma[0,0]; fig4 = plt.figure() import matplotlib.pyplot as plt plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 ) plt.plot(x, b_points[0]) plt.savefig(name_model+ "_body_x_axis.png") for i in range(m): sigma = 3.*linalg.sqrtm(b_sigma[i]) maximum[i] = b_points[1,i]+ sigma[1,1]; minimum[i] = b_points[1,i]- sigma[1,1]; fig5 = plt.figure() import matplotlib.pyplot as plt plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 ) plt.plot(x, b_points[1]) plt.savefig(name_model+ "_body_axis.png") for i in range(m): sigma = 3.*linalg.sqrtm(b_sigma[i]) maximum[i] = b_points[2,i]+ sigma[2,2]; minimum[i] = b_points[2,i]- sigma[2,2]; fig6 = plt.figure() import matplotlib.pyplot as plt plt.fill_between(x, maximum, minimum,lw=2, alpha=0.5 ) plt.plot(x, b_points[2]) plt.savefig(name_model+ "_body_z_axis.png") #NOTE: Add path def newModel(name,files): g = Creator() #Read the data g.ReadFiles(files,[]) g.CreateDatasets_Acc() g.ObtainNumberOfCluster() gravity = g.gravity K_gravity = g.K_gravity body = g.body K_body = g.K_body # 2) define the number of points to be used in GMR # (current settings allow for CONSTANT SPACING only) numPoints = amax(gravity[0,:]); scaling_factor = 10/10; numGMRPoints = math.ceil(numPoints*scaling_factor); # 3) perform Gaussian Mixture Modelling and Regression to retrieve the # expected curve and associated covariance matrices for each feature gr_points, gr_sigma = g.GetExpected(gravity,K_gravity,numGMRPoints) b_points, b_sigma = g.GetExpected(body,K_body,numGMRPoints) savetxt(name+"MuGravity.txt", gr_points,fmt='%.12f') savetxt(name+"SigmaGravity.txt", gr_sigma,fmt='%.12f') savetxt(name+"MuBody.txt", b_points,fmt='%.12f') savetxt(name+"SigmaBody.txt", b_sigma,fmt='%.12f') def loadModel(file_name, th=1, plot=True): #Load files gr_points = loadtxt(file_name+"MuGravity.txt") gr_sigma = loadtxt(file_name+"SigmaGravity.txt") b_points = loadtxt(file_name+"MuBody.txt") b_sigma = loadtxt(file_name+"SigmaBody.txt") #Add model gm = GestureModel() gm.addModel("gravity",gr_points, gr_sigma,th) gm.addModel("body",b_points, b_sigma,th) if plot == True: plotResults(gr_points,gr_sigma, b_points,b_sigma,file_name) return gm name_models = ['A','B','S1','S2'] num_samples = [10,14,9,10] th = [25,20,10,65] create_models = False list_files = [] #Create a list of the list of files for each model print "Defining files" i = 0 for name in name_models: files = [] for k in range(1,num_samples[i]+1): files.append('Models/' + name + '/data/mod('+ str(k) + ').txt') list_files.append(files) i = i + 1 #Create the models and save the list of files for calculate the weigths if(create_models == True): print "Creating models" i = 0 for model in name_models: print list_files[i] newModel(model,list_files[i]) i = i + 1 list_models = [] print "Loading models" #Load the models for j in range(len(name_models)): #For the moment don't put True is there are more that 2 models in Ubuntu gm = loadModel(name_models[j],th[j],False) list_models.append(gm) print "Calculating weigths" #Used to calculate the weights v0 = Classifier() for j in range(len(name_models)): print "\nFor model " + name_models[j] + ":" w_g, w_b = v0.calculateW(list_files[j],list_models[j]) list_models[j].addWeight("gravity",w_g) list_models[j].addWeight("body",w_b) print "\n Init classifers" l_class = [] for j in range(len(name_models)): l_class.append(Classifier()) print "Give the model to each classifier" for j in range(len(name_models)): l_class[j].classify(list_models[j]) print "Validation" sfile = "validation/mix3.txt" import matplotlib.pyplot as plt fig = plt.figure() for j in range(len(name_models)): poss = l_class[j].validate_from_file(sfile, ',') m,n = poss.shape x = arange(0,m,1) plt.plot(x, poss,'o',label= name_models[j]) plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.) plt.savefig("result.png") print "Finish ..."
gpl-3.0
mkuai/underwater
src/flow-monitor/examples/wifi-olsr-flowmon.py
108
7439
# -*- Mode: Python; -*- # Copyright (c) 2009 INESC Porto # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Authors: Gustavo Carneiro <gjc@inescporto.pt> import sys import ns.applications import ns.core import ns.flow_monitor import ns.internet import ns.mobility import ns.network import ns.olsr import ns.wifi try: import ns.visualizer except ImportError: pass DISTANCE = 100 # (m) NUM_NODES_SIDE = 3 def main(argv): cmd = ns.core.CommandLine() cmd.NumNodesSide = None cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)") cmd.Results = None cmd.AddValue("Results", "Write XML results to file") cmd.Plot = None cmd.AddValue("Plot", "Plot the results using the matplotlib python module") cmd.Parse(argv) wifi = ns.wifi.WifiHelper.Default() wifiMac = ns.wifi.NqosWifiMacHelper.Default() wifiPhy = ns.wifi.YansWifiPhyHelper.Default() wifiChannel = ns.wifi.YansWifiChannelHelper.Default() wifiPhy.SetChannel(wifiChannel.Create()) ssid = ns.wifi.Ssid("wifi-default") wifi.SetRemoteStationManager("ns3::ArfWifiManager") wifiMac.SetType ("ns3::AdhocWifiMac", "Ssid", ns.wifi.SsidValue(ssid)) internet = ns.internet.InternetStackHelper() list_routing = ns.internet.Ipv4ListRoutingHelper() olsr_routing = ns.olsr.OlsrHelper() static_routing = ns.internet.Ipv4StaticRoutingHelper() list_routing.Add(static_routing, 0) list_routing.Add(olsr_routing, 100) internet.SetRoutingHelper(list_routing) ipv4Addresses = ns.internet.Ipv4AddressHelper() ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0")) port = 9 # Discard port(RFC 863) onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory", ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port))) onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps"))) onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]")) onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]")) addresses = [] nodes = [] if cmd.NumNodesSide is None: num_nodes_side = NUM_NODES_SIDE else: num_nodes_side = int(cmd.NumNodesSide) for xi in range(num_nodes_side): for yi in range(num_nodes_side): node = ns.network.Node() nodes.append(node) internet.Install(ns.network.NodeContainer(node)) mobility = ns.mobility.ConstantPositionMobilityModel() mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0)) node.AggregateObject(mobility) devices = wifi.Install(wifiPhy, wifiMac, node) ipv4_interfaces = ipv4Addresses.Assign(devices) addresses.append(ipv4_interfaces.GetAddress(0)) for i, node in enumerate(nodes): destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)] #print i, destaddr onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port))) app = onOffHelper.Install(ns.network.NodeContainer(node)) urv = ns.core.UniformRandomVariable() app.Start(ns.core.Seconds(urv.GetValue(20, 30))) #internet.EnablePcapAll("wifi-olsr") flowmon_helper = ns.flow_monitor.FlowMonitorHelper() #flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31))) monitor = flowmon_helper.InstallAll() monitor = flowmon_helper.GetMonitor() monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001)) monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001)) monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20)) ns.core.Simulator.Stop(ns.core.Seconds(44.0)) ns.core.Simulator.Run() def print_stats(os, st): print >> os, " Tx Bytes: ", st.txBytes print >> os, " Rx Bytes: ", st.rxBytes print >> os, " Tx Packets: ", st.txPackets print >> os, " Rx Packets: ", st.rxPackets print >> os, " Lost Packets: ", st.lostPackets if st.rxPackets > 0: print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets) print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1)) print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1 if 0: print >> os, "Delay Histogram" for i in range(st.delayHistogram.GetNBins () ): print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \ st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i) print >> os, "Jitter Histogram" for i in range(st.jitterHistogram.GetNBins () ): print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \ st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i) print >> os, "PacketSize Histogram" for i in range(st.packetSizeHistogram.GetNBins () ): print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \ st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i) for reason, drops in enumerate(st.packetsDropped): print " Packets dropped by reason %i: %i" % (reason, drops) #for reason, drops in enumerate(st.bytesDropped): # print "Bytes dropped by reason %i: %i" % (reason, drops) monitor.CheckForLostPackets() classifier = flowmon_helper.GetClassifier() if cmd.Results is None: for flow_id, flow_stats in monitor.GetFlowStats(): t = classifier.FindFlow(flow_id) proto = {6: 'TCP', 17: 'UDP'} [t.protocol] print "FlowID: %i (%s %s/%s --> %s/%i)" % \ (flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort) print_stats(sys.stdout, flow_stats) else: print monitor.SerializeToXmlFile(cmd.Results, True, True) if cmd.Plot is not None: import pylab delays = [] for flow_id, flow_stats in monitor.GetFlowStats(): tupl = classifier.FindFlow(flow_id) if tupl.protocol == 17 and tupl.sourcePort == 698: continue delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets) pylab.hist(delays, 20) pylab.xlabel("Delay (s)") pylab.ylabel("Number of Flows") pylab.show() return 0 if __name__ == '__main__': sys.exit(main(sys.argv))
gpl-2.0
natefoo/tools-iuc
tools/spyboat/output_report.py
12
8730
""" Produces plots and a summary html 'headless' """ import logging import os import matplotlib import matplotlib.pyplot as ppl import spyboat.plotting as spyplot ppl.switch_backend('Agg') matplotlib.rcParams["text.usetex"] = False logger = logging.getLogger(__name__) # figure resolution DPI = 250 def produce_snapshots(input_movie, results, frame, Wkwargs, img_path="."): """ Takes the *input_movie* and the *results* dictionary from spyboat.processing.run_parallel and produces phase, period and amplitude snapshot png's. For the period snapshot also the period range is needed, hence the analysis dictionary 'Wkwargs' also gets passed. The output files name pattern is: [input, phase, period, amplitude]_frame{frame}.png and the storage location in *img_path*. These get picked up by 'create_html' """ spyplot.input_snapshot(input_movie[frame]) fig = ppl.gcf() out_path = os.path.join(img_path, f"input_frame{frame}.png") fig.savefig(out_path, dpi=DPI) ppl.close(fig) spyplot.phase_snapshot(results["phase"][frame]) fig = ppl.gcf() out_path = os.path.join(img_path, f"phase_frame{frame}.png") fig.savefig(out_path, dpi=DPI) ppl.close(fig) spyplot.period_snapshot( results["period"][frame], Wkwargs["Tmin"], Wkwargs["Tmax"], time_unit="a.u." ) fig = ppl.gcf() out_path = os.path.join(img_path, f"period_frame{frame}.png") fig.savefig(out_path, dpi=DPI) ppl.close(fig) spyplot.amplitude_snapshot(results["amplitude"][frame]) fig = ppl.gcf() out_path = os.path.join(img_path, f"amplitude_frame{frame}.png") fig.savefig(out_path, dpi=DPI) ppl.close(fig) logger.info(f"Produced 4 snapshots for frame {frame}..") def produce_distr_plots(results, Wkwargs, img_path="."): """ Output file names are: period_distr.png, power_distr.png and phase_distr.png """ spyplot.period_distr_dynamics(results["period"], Wkwargs) fig = ppl.gcf() out_path = os.path.join(img_path, "period_distr.png") fig.savefig(out_path, dpi=DPI) spyplot.power_distr_dynamics(results["power"], Wkwargs) fig = ppl.gcf() out_path = os.path.join(img_path, "power_distr.png") fig.savefig(out_path, dpi=DPI) spyplot.phase_coherence_dynamics(results["phase"], Wkwargs) fig = ppl.gcf() out_path = os.path.join(img_path, "phase_distr.png") fig.savefig(out_path, dpi=DPI) logger.info("Produced 3 distribution plots..") def create_html(frame_nums, par_str, html_fname="OutputReport.html"): """ The html generated assumes the respective png's have been created with 'produce_snapshots' and 'produce_distr_plots' and can be found at the cwd (that's how Galaxy works..) """ # -- create a gallery for every frame in frame_nums -- galleries = "" for frame_num in frame_nums: new_gal = f""" <div class="FrameSlides"> <h3 style="text-align:center; color=#363333"> Frame Nr. {frame_num} </h3> <div class="snapshot_gallery"> <figure class=”snapshot_gallery__item snapshot_gallery__item--1" style="margin: 0 0"> <img src="input_frame{frame_num}.png" alt="The Input" class="snapshot_gallery__img"> </figure> <figure class=”snapshot_gallery__item snapshot_gallery__item--2" style="margin: 0 0"> <img src="phase_frame{frame_num}.png" alt="Phase" class="snapshot_gallery__img"> </figure> <figure class=”snapshot_gallery__item snapshot_gallery__item--3" style="margin: 0 0"> <img src="period_frame{frame_num}.png" alt="Period" class="snapshot_gallery__img"> </figure> <figure class=”snapshot_gallery__item snapshot_gallery__item--4" style="margin: 0 0"> <img src="amplitude_frame{frame_num}.png" alt="Amplitude" class="snapshot_gallery__img"> </figure> </div> </div> """ galleries += new_gal parameter_cells = '' for line in par_str.split('\n'): # last str is empty.. if not line: break par_name, par_val = line.split('->') parameter_cells += f''' <tr> <td>{par_name}</td> <td>{par_val}</td> </tr>''' html_string = f""" <html> <!-- this file got automatically created by 'output_report.py' --> <title>SpyBOAT Output Report</title> <head> <!-- that doesn't work with galaxy.. --> <!--link rel="stylesheet" href="styles.css"--> <style type="text/css"> body{{ margin:10 100; background:whitesmoke; }} p{{ text-align: center; margin-top: 0.05cm; margin-bottom: .05cm; color:#2c2e2e; }} .center{{ text-align: center; display: block; margin-left: auto; margin-right: auto; width: 100%;}} /* matplotlib output at 1600x1200 */ .snapshot_gallery {{ margin: 0 0; text-align: center; display: grid; grid-template-columns: repeat(2,1fr); grid-template-rows: repeat(2,27vw); grid-gap: 5px; }} .snapshot_gallery__img {{ width: 100%; height: 100%; object-fit: contain; margin-top: 5px; margin-bottom: 15px; }} .subheader{{ text-align:center; font-size: 160%; color:#363333;}} .centerimg{{ text-align: center; width: 65%; max-width: 400px; display: block; padding: 10px; margin-left: auto; margin-right: auto; }} .div_distr{{ text-align: center; border-radius: 25px; margin-top: 1cm; margin: auto; margin-bottom: 0.5cm; background-color: #cce1e3; max-width: 550px; }} .partable{{ width: 70%; margin-left: auto; margin-right: auto; }} tr, td{{ color:#2c2e2e; font-size: 110%; }} </style> </head> <body> <h1 style="text-align:center; color:#363333">SpyBOAT Results Report</h1> <hr style="width:70%"> <h1 class="subheader"> Spatial Summary Statistics </h1> <div class="div_distr"> <img src="period_distr.png" alt="Period" class="centerimg"> <p> Median and quartiles of the estimated periods for each frame </p> </div> <div class="div_distr"> <img src="power_distr.png" alt="Period" class="centerimg"> <p> Median and quartiles of the ridge wavelet power for each frame </p> </div> <div class="div_distr"> <img src="phase_distr.png" alt="Period" class="centerimg"> <p> Kuramoto order parameter for the phases estimated for each frame </p> </div> <h1 class="subheader"> Output Movie Snapshots </h1> <!-- trigger the javascript at the end---> <div class="center"> <button class="w3-button" onclick="plusDivs(-1)">&#10094; Prev</button> <button class="w3-button" onclick="plusDivs(1)">Next &#10095;</button> </div> <!-- defines all elements of the "FrameSlides" class ---> {galleries} </div> <h1 class="subheader"> Parameters </h1> <div class="div_distr"> <table border = "1" class="partable"> <tr> <th>Name</th> <th>Value</th> </tr> {parameter_cells} </table> </div> <!-- javascript with escaped '{{'---> <script> var slideIndex = 1; showDivs(slideIndex); function plusDivs(n) {{ showDivs(slideIndex += n); }} function showDivs(n) {{ var i; var x = document.getElementsByClassName("FrameSlides"); if (n > x.length) {{slideIndex = 1}} if (n < 1) {{slideIndex = x.length}} ; for (i = 0; i < x.length; i++) {{ x[i].style.display = "none"; }} x[slideIndex-1].style.display = "block"; }} </script> </body> </html> """ with open(html_fname, "w") as OUT: OUT.write(html_string) logger.info("Created html report") return html_string # for local testing # create_html([0,20], 'par1 -> val1\n verylongpar2 -> val2')
mit
mlee92/Programming
Econ/supply_demand_elasticity/demand_elasticity.py
2
1413
# Elasticity of demand is a measure of how strongly consumers respond to a change in the price of a good # Formally, % change in demand / % change in price # Problem: Graph the histogram of average-elasticity for a linear-demand good with random coefficients (a, b) import random import matplotlib.pyplot as plt import numpy as np SIM = 1000; UNIT_RANGE = range(1, 50) AVGS = list() COEF = [0, 0] def generate_coefficients(): global COEF a = random.randint(1, 25) b = random.randint(a*50, 25*50) COEF = [a, b] def price(unit): return COEF[1] - COEF[0]*unit def graph_price(): x = np.linspace(1,50,50) y = price(x) plt.plot(x, y) plt.show() def elasticity(d1, d2): cPrice = price(d2) - price(d1) cDemand = d2 - d1 pPrice = cPrice / price(d1) pDemand = cDemand / d1 return abs(pDemand / pPrice) def simulate(): global AVGS, COEF, UNIT_RANGE generate_coefficients() elast_list = list() for i in UNIT_RANGE: for j in UNIT_RANGE: if(i != j): elast_list.append(elasticity(i, j)) mu = np.mean(elast_list) print(COEF, mu) AVGS.append(mu) def init(): for i in range(0, SIM): simulate() init() print(SIM) plt.hist(AVGS) plt.show()
gpl-2.0
mkocka/galaxytea
modeling/domcek/plots.py
1
4294
import matplotlib.pyplot as plt from numpy import * ###List of variables # r_in [10**10 cm] innder radius # r_out [10**10 cm] outer radius # step [10**10 cm] step of plot # alfa [] parameter of accretion # M_16 [10**16 g.s**(-1)] accretion flow # m_1 [solar mass] mass of compact object # R_hv [10**10 cm] radius of compact object # R_10 [10**10 cm] distance from compact object # f numerical factor ###List of computed parameters # Surface density [g.cm**(-2)] (sigma) # Height [cm] (H) # Density [g.cm**(-3)] (rho) # Central disc temeprature [K] (T_c) # Opacity [] (tau) # viscosity [cm**2.s**(-1)] (nu) # radial velocity towards center [cm.s**(-1)] (v_r) ###function solutions parameters # parameter 1 r_in # parameter 2 r_out # parameter 3 step # parameter 4 alfa # parameter 5 M_16 # parameter 6 m_1 # parameter 7 R_hv def solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv): #defining lists list_function = arange(r_in,r_out,step) R_10_l,surface_density_l,height_l,density_l,Fx = ([] for i in range(5)) temperature_l,opacity_l,viscosity_l,radial_velocity_l = ([] for i in range(4)) #computation and appending to lists for R_10 in list_function: f=(1-((R_hv)/(R_10))**(1.0/2))**(1.0/4) surface_density = 5.2*alfa**(-4.0/5)*M_16**(7.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(14.0/5) height = 1.7*10**8*alfa**(-1.0/10)*M_16**(3.0/20)*m_1**(-3.0/8)*R_10**(9.0/8)*f**(3.0/5) density = 3.1*10**(-8)*alfa**(-7.0/10)*M_16**(11.0/20)*m_1**(5.0/8)*R_10**(-15.0/8)*f**(11.0/5) temperature = 1.4*10**4*alfa**(-1.0/5)*M_16**(3.0/10)*m_1**(1.0/4)*R_10**(-3.0/4)*f**(6.0/5) opacity = 190*alfa**(-4.0/5)*M_16**(1.0/5)*f**(4.0/5) viscosity = 1.8*10**14*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(3.0/4)*f**(6.0/5) radial_velocity = 2.7*10**4*alfa**(4.0/5)*M_16**(3.0/10)*m_1**(-1.0/4)*R_10**(-1.0/4)*f**(-14.0/5) R_10_l.append(R_10) surface_density_l.append(surface_density) height_l.append(height) density_l.append(density) temperature_l.append(temperature) opacity_l.append(opacity) viscosity_l.append(viscosity) radial_velocity_l.append(radial_velocity) Fx.append(f) #transformation R_10 to kolimeters R_km = [ x / 10**(-4) for x in R_10_l] return R_km, surface_density_l, height_l, density_l,temperature_l,opacity_l,viscosity_l,radial_velocity_l,Fx #for definitions of parameters look up r_in =1.0001*10**(-4) r_out =10**(-2) step = 10**(-6) alfa = 0.5 M_16 = 63 m_1 = 1.5 R_hv = 1.0*10**(-4) lists=solutions(r_in,r_out,step,alfa,M_16,m_1,R_hv) print 30*"-" print "Used parameter values" print 30*"-" print "innder radius:", 10*".",r_in, 10*".", "[10$^{10}$ cm]" print "outer radius:", 10*".", r_out, 10*".", "[10$^{10}$ cm]" print "step of plot:", 10*".", step, 10*".", "[10$^{10}$ cm]" print "parameter of accretion alfa:", 10*".", alfa print "accretion flow:", 10*".", M_16, 10*".", "[10$^6$ g.s${-1)}$]" print "mass of compact object:", 10*".", m_1, 10*".", "[solar mass]" print "radius of compact object:", 10*".", R_hv, 10*".", "[10$^{10}$ cm]" plt.plot(lists[0], lists[1]) plt.title('surface density') plt.xlabel('radius [km]') plt.ylabel('surface density [g.cm$^{-2}$] ') plt.grid() plt.savefig("surface density") plt.gcf().clear() plt.plot(lists[0], lists[2]) plt.title('height') plt.xlabel('radius [km]') plt.ylabel('height [cm] ') plt.grid() plt.savefig("height") plt.gcf().clear() plt.plot(lists[0], lists[3]) plt.title('density') plt.xlabel('radius [km]') plt.ylabel('density [g.cm$^{-3}$] ') plt.grid() plt.savefig("density") plt.gcf().clear() plt.plot(lists[0], lists[4]) plt.title('temperature') plt.xlabel('radius [km]') plt.ylabel('temperature [K] ') plt.grid() plt.savefig("temperature") plt.gcf().clear() plt.plot(lists[0], lists[5]) plt.title('opacity') plt.xlabel('radius [km]') plt.ylabel('opacity ') plt.grid() plt.savefig("opacity") plt.gcf().clear() plt.plot(lists[0], lists[6]) plt.title('viscosity') plt.xlabel('radius [km]') plt.ylabel('viscosity [cm$^{2}$.s$^{-1}$] ') plt.grid() plt.savefig("viscosity") plt.gcf().clear() plt.plot(lists[0], lists[7]) plt.title('radial velocity') plt.xlabel('radius [km]') plt.ylabel('radial velocity [cm.s$^{-1}$] ') plt.grid() plt.savefig("radial velocity") plt.gcf().clear()
mit
kasperschmidt/TDOSE
tdose_extract_spectra.py
1
43824
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = import numpy as np import sys import astropy.io.fits as afits import collections import tdose_utilities as tu import tdose_extract_spectra as tes import tdose_build_mock_cube as tbmc import pdb import scipy.ndimage.filters as snf import matplotlib as mpl mpl.use('Agg') # prevent pyplot from opening window; enables closing ssh session with detached screen running TDOSE import matplotlib.pyplot as plt # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = def extract_spectra(model_cube_file,source_association_dictionary=None,nameext='tdose_spectrum',outputdir='./',clobber=False, variance_cube_file=None,variance_cube_ext='ERROR',source_model_cube_file=None,source_cube_ext='DATA', model_cube_ext='DATA',layer_scale_ext='WAVESCL',data_cube_file=None,verbose=True): """ Assemble the spectra determined by the wavelength layer scaling of the normalized models when generating the source model cube --- INPUT --- model_cube_file Model cube to base extraction on (using header info and layer scales) source_association_dictionary Source association dictionary defining what sources should be combined into objects (individual spectra). nameext The name extension to use for saved spectra outputdir Directory to save spectra to clobber Overwrite spectra if they already exists variance_cube_file File containing variance cube of data to be used to estimate nois on 1D spectrum variance_cube_ext Extension of variance cube to use source_model_cube_file The source model cube defining the individual sources source_cube_ext Extension of source model cube file that contins source models model_cube_ext Extension of model cube file that contains model layer_scale_ext Extension of model cube file that contains the layer scales data_cube_file File containing original data cube used for extraction of aperture spectra verbose --- EXAMPLE OF USE --- """ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Loading data needed for spectral assembly') model_cube = afits.open(model_cube_file)[model_cube_ext].data model_cube_hdr = afits.open(model_cube_file)[model_cube_ext].header layer_scale_arr = afits.open(model_cube_file)[layer_scale_ext].data if variance_cube_file is not None: stddev_cube = np.sqrt(afits.open(variance_cube_file)[variance_cube_ext].data) # turn varinace into standard deviation source_model_cube = afits.open(source_model_cube_file)[source_cube_ext].data else: stddev_cube = None source_model_cube = None Nsources = layer_scale_arr.shape[0] # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if data_cube_file is not None: if verbose: print(' - Loading data cube ') data_cube = afits.open(data_cube_file)[model_cube_ext].data else: data_cube = None # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if source_association_dictionary is None: if verbose: print(' - Building default source association dictionary ' \ '(determining what sources are combined into objects), i.e., one source per object ') sourcIDs_dic = collections.OrderedDict() for oo in np.arange(int(Nsources)): sourcIDs_dic[str(oo)] = [oo] else: sourcIDs_dic = source_association_dictionary Nobj = len(list(sourcIDs_dic.keys())) if verbose: print(' - Found '+str(Nobj)+' objects to generate spectra for in source_association_dictionary ') # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Assembling wavelength vector for spectra ') wavelengths = np.arange(model_cube_hdr['NAXIS3'])*model_cube_hdr['CD3_3']+model_cube_hdr['CRVAL3'] # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - specfiles = [] for oo, key in enumerate(sourcIDs_dic.keys()): obj_cube_hdr = model_cube_hdr.copy() try: specid = str("%.10d" % int(key)) except: specid = str(key) specname = outputdir+nameext+'_'+specid+'.fits' specfiles.append(specname) sourceIDs = sourcIDs_dic[key] obj_cube_hdr.append(('OBJID ',specid ,'ID of object'),end=True) obj_cube_hdr.append(('SRCIDS ',str(sourceIDs) ,'IDs of sources combined in object'),end=True) if verbose: infostr = ' - Extracting spectrum '+str("%6.f" % (oo+1))+' / '+str("%6.f" % Nobj) sys.stdout.write("%s\r" % infostr) sys.stdout.flush() sourceoutput = tes.extract_spectrum(sourceIDs,layer_scale_arr,wavelengths,noise_cube=stddev_cube, source_model_cube=source_model_cube, data_cube=data_cube, specname=specname,obj_cube_hdr=obj_cube_hdr,clobber=clobber,verbose=True) if verbose: print('\n - Done extracting spectra. Returning list of fits files generated') return specfiles # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = def extract_spectrum(sourceIDs,layer_scale_arr,wavelengths,noise_cube=None,source_model_cube=None, specname='tdose_extract_spectra_extractedspec.fits',obj_cube_hdr=None,data_cube=None, clobber=False,verbose=True): """ Extracting a spectrum based on the layer scale image from the model cube provided a list of sources to combine. Noise is estimated from the noise cube (of the data) If all layer_scales are 1 a data_cube for the extractions is expected --- INPUT --- sourceIDs The source IDs to combine into spectrum layer_scale_arr Layer scale array (or image) produced when generating the model cube fractional flux belonging to the source in each pixel wavelengths Wavelength vector to use for extracted 1D spectrum. noise_cube Cube with uncertainties (sqrt(variance)) of data cube to be used for estimating 1D uncertainties To estimate S/N and 1D noise, providing a source model cube is required source_model_cube Source model cube containing the model cube for each individual source seperately Needed in order to estimate noise from noise-cube specname Name of file to save spectrum to obj_cube_hdr Provide a template header to save the object cube (from combining the individual source cube) as an extension to the extracted spectrum data_cube In case all layers scales are 1, it is assumed that the source_model_cube contains a mask for the spectral extraction, which will then be performed on this data_cube. clobber To overwrite existing files set clobber=True verbose Toggle verbosity --- EXAMPLE OF USE --- """ # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Checking shape of wavelengths and layer_scale_arr') if wavelengths.shape[0] != layer_scale_arr.shape[1]: sys.exit(' ---> Shape of wavelength vector ('+str(wavelengths.shape)+ ') and wavelength dimension of layer scale array ('+ layer_scale_arr.shape[1].shape+') do not match.') else: if verbose: print(' dimensions match; proceeding...') # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Checking all sources have spectra in layer_scale_arr') maxsource = np.max(sourceIDs) if maxsource >= layer_scale_arr.shape[0]: sys.exit(' ---> Sources in list '+str(str(sourceIDs))+ ' not available among '+str(layer_scale_arr.shape[0])+' sources in layer_scale_arr.') else: if verbose: print(' All sources exist in layer_scale_arr; proceeding...') # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Assembling object spectrum from source scaling') source_ent = np.asarray(sourceIDs).astype(int) if (layer_scale_arr == 1).all(): if verbose: print(' - All layer scales are 1; assuming source model cube contain mask for spectral extraction') object_cube = np.sum(np.abs(source_model_cube[source_ent,:,:]),axis=0) if data_cube is None: sys.exit(' ---> Did not find a data cube to extrac spectra from as expected') object_mask = (object_cube == 0) # masking all zeros in object mask invalid_mask = np.ma.masked_invalid(data_cube).mask comb_mask = (invalid_mask | object_mask) spec_1D_masked = np.sum(np.sum( np.ma.array(data_cube,mask=comb_mask) ,axis=1),axis=1) spec_1D = spec_1D_masked.filled(fill_value=0.0) if noise_cube is not None: if verbose: print(' Calculating noise as d_spec_k = sqrt( SUMij d_pix_ij**2 ), i.e., as the sqrt of variances summed') invalid_mask_noise = np.ma.masked_invalid(noise_cube).mask comb_mask = (comb_mask | invalid_mask_noise) variance_1D_masked = np.ma.array(noise_cube,mask=comb_mask)**2 noise_1D_masked = np.sqrt( np.sum( np.sum( variance_1D_masked, axis=1), axis=1) ) noise_1D = noise_1D_masked.filled(fill_value=np.nan) if verbose: print(' Generating S/N vector') SN_1D = spec_1D / noise_1D else: if verbose: print(' - No "noise_cube" provided. Setting all errors and S/N values to NaN') SN_1D = np.zeros(spec_1D.shape)*np.NaN noise_1D = np.zeros(spec_1D.shape)*np.NaN else: if verbose: print(' - Some layer scales are different from 1; hence assembling spectra using layer scales') if len(source_ent) < 1: spec_1D = layer_scale_arr[source_ent,:] else: spec_1D = np.sum( layer_scale_arr[source_ent,:],axis=0) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if noise_cube is not None: if verbose: print(' - Estimate S/N at each wavelength for 1D spectrum (see Eq. 16 of Kamann+2013)') if verbose: print(' Estimating fraction of flux in each pixel wrt. total flux in each layer') object_cube = np.sum((source_model_cube[source_ent,:,:,:]),axis=0) # summing source models for all source IDs fluxfrac_cube_sents = np.zeros(source_model_cube.shape[1:]) for sent in source_ent: object_cube_sent = np.sum((source_model_cube[[sent],:,:,:]),axis=0) # getting source model for model 'sent' fluxscale1D_sent = layer_scale_arr[sent,:] fluxfrac_cube_sent = object_cube_sent / fluxscale1D_sent[:,None,None] fluxfrac_cube_sents = fluxfrac_cube_sents + fluxfrac_cube_sent fluxfrac_cube = fluxfrac_cube_sents / len(source_ent) # renormalizing flux-fraction cube if verbose: print(' Defining pixel mask (ignoring NaN pixels) ') #+\ # 'and pixels with <'+str(fluxfrac_min)+' of total pixel flux in model cube) ' # pix_mask = (fluxfrac_cube < fluxfrac_min) invalid_mask1 = np.ma.masked_invalid(fluxfrac_cube).mask invalid_mask2 = np.ma.masked_invalid(noise_cube).mask # combining mask making sure all individual mask pixels have True for it to be true in combined mask comb_mask = (invalid_mask1 | invalid_mask2) # | pix_mask if verbose: print(' Calculating noise propogated as d_spec_k = 1/sqrt( SUMij (fluxfrac_ij**2 / d_pix_ij**2) )') squared_ratio = np.ma.array(fluxfrac_cube,mask=comb_mask)**2 / np.ma.array(noise_cube,mask=comb_mask)**2 inv_noise_masked = np.sqrt( np.sum( np.sum( squared_ratio, axis=1), axis=1) ) noise_1D = (1.0/inv_noise_masked).filled(fill_value=0.0) if verbose: print(' Generating S/N vector') SN_1D = spec_1D / noise_1D else: if verbose: print(' - No "noise_cube" provided. Setting all errors and S/N values to NaN') SN_1D = np.zeros(spec_1D.shape)*np.NaN noise_1D = np.zeros(spec_1D.shape)*np.NaN # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Saving extracted 1D spectrum and source cube to \n '+specname) mainHDU = afits.PrimaryHDU() # primary HDU # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - c1 = afits.Column(name='wave', format='D', unit='ANGSTROMS', array=wavelengths) c2 = afits.Column(name='flux', format='D', unit='', array=spec_1D) c3 = afits.Column(name='fluxerror', format='D', unit='', array=noise_1D) c4 = afits.Column(name='s2n', format='D', unit='', array=SN_1D) coldefs = afits.ColDefs([c1,c2,c3,c4]) th = afits.BinTableHDU.from_columns(coldefs) # creating default header # writing hdrkeys:'---KEY--', '----------------MAX LENGTH COMMENT-------------' th.header.append(('EXTNAME ','SPEC1D' ,'cube containing source'),end=True) head = th.header tbHDU = afits.BinTableHDU.from_columns(coldefs, header=head) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if obj_cube_hdr is not None: objHDU = afits.ImageHDU(object_cube) for hdrkey in list(obj_cube_hdr.keys()): if not hdrkey in list(objHDU.header.keys()): objHDU.header.append((hdrkey,obj_cube_hdr[hdrkey],obj_cube_hdr.comments[hdrkey]),end=True) try: objHDU.header.append(('EXTNAMEC',objHDU.header['EXTNAME'] ,'EXTNAME of original source cube'),end=True) del objHDU.header['EXTNAME'] except: pass objHDU.header.append(('EXTNAME ','SOURCECUBE' ,'cube containing source'),end=True) hdulist = afits.HDUList([mainHDU,tbHDU,objHDU]) else: hdulist = afits.HDUList([mainHDU,tbHDU]) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - hdulist.writeto(specname, overwrite=clobber) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - return wavelengths, spec_1D, noise_1D, object_cube # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = def extract_spectra_viasourcemodelcube(datacube,sourcemodelcube,wavelengths,speclist,specids='None',outputdir='./', noisecube=False,sourcemodel_hdr='None',verbose=True): """ Wrapper for tes.extract_spectrum_viasourcemodelcube() to extract mutliple spectra --- INPUT ---- datacube Datacube to extract spectra from sourcemodelcube Cube containing the source models for each object used as "extraction cube" Dimensions should be [Nsources,datacube.shape] wavelengths Wavelength vector to use for extracted 1D spectrum. speclist List of spectra to extract. Indexes corresponding to the source models in the sourcemodlecube specids List of IDs to use in naming of output for source models referred to in "speclist" outputdir Directory to store spectra to noisecube Cube with uncertainties (sqrt(variance)) of data cube to be used in extraction souremodel_hdr If not 'None' provide a basic fits header for the source model cubes extracted and they will be appended to the individual output fits file containing the extracted spectra. verbose Toggle verbosity --- EXAMPLE OF USE --- """ if verbose: print(' - Check that source models indicated are present in source model cube ') specnames = [] Nmodels = sourcemodelcube.shape[0] maxobj = np.max(speclist) if maxobj >= Nmodels: sys.exit(' ---> Object model "'+str(maxobj)+'" is not included in source model cube (models start at 0)') else: if verbose: print(' All object models appear to be included in the '+str(Nmodels)+' source models found in cube') if datacube.shape != sourcemodelcube[0].shape: sys.exit(' ---> Shape of datacube ('+str(datacube.shape)+') and shape of source models ('+ sourcemodelcube[0].shape+') do not match.') sourcemodel_sum = np.sum(sourcemodelcube,axis=0) for ss, spec in enumerate(speclist): if specids == 'None': specid = spec else: specid = specids[ss] specname = outputdir+'tdose_spectrum_'+str("%.12d" % specid)+'.fits' specnames.append(specname) sourcemodel = sourcemodelcube[spec,:,:,:] sourceweights = sourcemodel/sourcemodel_sum # fractional flux of model for given source in each pixel sourcemodel_hdr.append(('OBJMODEL',spec ,'Source model number in parent source model cube'),end=True) sourcemodel_hdr.append(('OBJID ',specid ,'ID of source'),end=True) if verbose: infostr = ' - Extracting spectrum '+str("%6.f" % (spec+1))+' / '+str("%6.f" % len(speclist)) sys.stdout.write("%s\r" % infostr) sys.stdout.flush() sourceoutput = tes.extract_spectrum_viasourcemodelcube(datacube,sourceweights,wavelengths,specname=specname, noisecube=noisecube,spec1Dmethod='sum', sourcecube_hdr=sourcemodel_hdr,verbose=verbose) if verbose: print('\n - Done extracting spectra. Returning list of fits files containing spectra') return specnames # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = def extract_spectrum_viasourcemodelcube(datacube,sourceweights,wavelengths, specname='tdose_extract_spectra_extractedspec.fits', noisecube=None,spec1Dmethod='sum',sourcecube_hdr='None',verbose=True): """ Extracting a spectrum from a data cube given a source model (cube) to be used as 'extraction cube' --- INPUT --- datacube Datacube to extract spectra from sourceweights Weights from source model to use as "extraction cube". The weights should contain the fractional flux belonging to the source in each pixel wavelengths Wavelength vector to use for extracted 1D spectrum. specname Name of spectrum to generate noisecube Cube with uncertainties (sqrt(variance)) of data cube to be used in extraction spec1Dmethod Method used to extract 1D spectrum from source cube with sourcecube_hdr If not 'None' provide a fits header for the source cube and it ill be appended to the output fits file. verbose Toggle verbosity --- EXAMPLE OF USE --- """ if verbose: print(' - Checking shape of data and source model cubes') if datacube.shape != sourceweights.shape: sys.exit(' ---> Shape of datacube ('+str(datacube.shape)+') and source weights ('+ sourceweights.shape+') do not match.') else: if verbose: print(' dimensions match; proceeding with extraction ') # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Applying weights to "datacube" to obtain source cube ') sourcecube = datacube*sourceweights if noisecube is not None: if verbose: print(' - Using "noisecube" for error propagation ') datanoise = noisecube else: if verbose: print(' - No "noisecube" provided. Setting all errors to 1') datanoise = np.ones(datacube.shape) if verbose: print(' - Assuming uncertainty on source weights equals the datanoise when propgating errors') sourceweights_err = datanoise sourcecube_err = sourcecube * np.sqrt( (datanoise/datacube)**2 + (sourceweights_err/sourceweights)**2 ) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Generating 1D spectrum from source cube via:') spec_wave = wavelengths maskinvalid = np.ma.masked_invalid(sourcecube * sourcecube_err).mask if spec1Dmethod == 'sum': if verbose: print(' Simple summation of fluxes in sourcecube.') spec_flux = np.sum(np.sum(np.ma.array(sourcecube,mask=maskinvalid),axis=1),axis=1).filled() if verbose: print(' Errors are propagated as sum of squares.') spec_err = np.sqrt( np.sum( np.sum(np.ma.array(sourcecube_err,mask=maskinvalid)**2,axis=1),axis=1) ).filled() elif spec1Dmethod == 'sum_SNweight': pdb.set_trace() else: sys.exit(' ---> The chosen spec1Dmethod ('+str(spec1Dmethod)+') is invalid') # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if verbose: print(' - Saving extracted 1D spectrum and source cube to \n '+specname) mainHDU = afits.PrimaryHDU() # primary HDU # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - c1 = afits.Column(name='wave', format='D', unit='ANGSTROMS', array=spec_wave) c2 = afits.Column(name='flux', format='D', unit='', array=spec_flux) c3 = afits.Column(name='fluxerror', format='D', unit='', array=spec_err) coldefs = afits.ColDefs([c1,c2,c3]) th = afits.BinTableHDU.from_columns(coldefs) # creating default header # writing hdrkeys:'---KEY--', '----------------MAX LENGTH COMMENT-------------' th.header.append(('EXTNAME ','SPEC1D' ,'cube containing source'),end=True) th.header.append(('SPECMETH' , spec1Dmethod ,'Method used for spectral extraction'),end=True) head = th.header tbHDU = afits.BinTableHDU.from_columns(coldefs, header=head) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if sourcecube_hdr != 'None': sourceHDU = afits.ImageHDU(sourcecube) # default HDU with default minimal header for hdrkey in list(sourcecube_hdr.keys()): if not hdrkey in list(sourceHDU.header.keys()): sourceHDU.header.append((hdrkey,sourcecube_hdr[hdrkey],sourcecube_hdr.comments[hdrkey]),end=True) sourceHDU.header.append(('EXTNAME ','SOURCECUBE' ,'cube containing source'),end=True) hdulist = afits.HDUList([mainHDU,tbHDU,sourceHDU]) else: hdulist = afits.HDUList([mainHDU,tbHDU]) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - hdulist.writeto(specname, overwrite=True) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - return sourcecube, sourcecube_err, spec_wave, spec_flux, spec_err # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = def plot_1Dspecs(filelist,plotname='./tdose_1Dspectra.pdf',colors=None,labels=None,plotSNcurve=False, tdose_wavecol='wave',tdose_fluxcol='flux',tdose_errcol='fluxerror', simsources=None,simsourcefile='/Users/kschmidt/work/TDOSE/mock_cube_sourcecat161213_all.fits', sim_cube_dim=None,comparisonspecs=None,comp_colors=['blue'],comp_labels=None, comp_wavecol='WAVE_AIR',comp_fluxcol='FLUX',comp_errcol='FLUXERR', xrange=None,yrange=None,showspecs=False,shownoise=True, skyspecs=None,sky_colors=['red'],sky_labels=['sky'], sky_wavecol='lambda',sky_fluxcol='data',sky_errcol='stat', showlinelists=None,linelistcolors=['gray'],smooth=0,ylog=False, plotratio=False, verbose=True,pubversion=False): """ Plots of multiple 1D spectra --- INPUT --- filelist List of spectra filenames to plot plotname Name of plot to generate colors Colors of the spectra in filelist to use labels Labels of the spectra in filelist to use plotSNcurve Show signal-to-noise curve instead of flux spectra tdose_wavecol Wavelength column of the spectra in filelist tdose_fluxcol Flux column of the spectra in filelist tdose_errcol Flux error column of the spectra in filelist simsources To plot simulated sources provide ids here simsourcefile Source file with simulated sources to plot sim_cube_dim Dimensions of simulated cubes comparisonspecs To plot comparison spectra provide the filenames of those here comp_colors Colors of the spectra in comparisonspecs list to use comp_labels Labels of the spectra in comparisonspecs list to use comp_wavecol Wavelength column of the spectra in comparisonspecs list comp_fluxcol Flux column of the spectra in comparisonspecs list comp_errcol Flux error column of the spectra in comparisonspecs list xrange Xrange of plot yrange Yrange of plot showspecs To show plot instead of storing it to disk set showspecs=True shownoise To add noise envelope around spectrum set shownoise=True skyspecs To plot sky spectra provide the filenames of those here sky_colors Colors of the spectra in skyspecs list to use sky_labels Labels of the spectra in skyspecs list to use sky_wavecol Wavelength column of the spectra in skyspecs list sky_fluxcol Flux column of the spectra in skyspecs list sky_errcol Flux error column of the spectra in skyspecs list showlinelists To show line lists provide a list of arrays of dimension (Nlines,2) where each row in the arrays contains [waveobs, name], where 'waveobs' is the observed wavelengths and 'name' is a string with the name of each of the Nlines postions to mark on the spectrum. linelistcolors List of colors for line lists provided in showlinelists smooth To smooth the spectra, provide sigma of the 1D gaussian smoothing kernel to apply. For smooth = 0, no smoothing is performed. ylog To plot y-axis in log scale set to true plotratio To plot the ratio between the main spectrum and the comparison spectra instead of the actual spectra, set this keyword to true. verbose Toggle verbosity pubversion Generate more publication friendly version of figure """ if len(filelist) == 1: if verbose: print(' - Plotting data from '+filelist[0]) else: if verbose: print(' - Plotting data from filelist ') if pubversion: fig = plt.figure(figsize=(6, 3)) fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.15, right=0.95, bottom=0.18, top=0.83) Fsize = 12 else: fig = plt.figure(figsize=(10, 3)) fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.06, right=0.81, bottom=0.15, top=0.95) Fsize = 10 # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - # Looking for flux units in spectra bunit = 'BUNIT FLUX' # Default BUNIT for unitspec in filelist: if bunit == 'BUNIT FLUX': try: sourcecubehdr = afits.open(unitspec)['SOURCECUBE'].header bunit = sourcecubehdr['BUNIT'] except: try: # Backwards compatibility to TDOSE v2.0 extractions sourcecubehdr = afits.open(unitspec)[2].header bunit = sourcecubehdr['BUNIT'] except: pass if bunit == 'BUNIT FLUX': if verbose: print(' - Did not find BUNIT in SOURCECUBE header for any spectra in filelist - are they not from TDOSE?') if bunit == '10**(-20)*erg/s/cm**2/Angstrom': # Making bunit LaTeXy for MUSE-Wide BUNIT format bunit = '1e-20 erg/s/cm$^2$/\AA' else: bunit = '$'+bunit+'$' # minimizing pronlems with LaTeXing plot axes # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - lthick = 1 plt.rc('text', usetex=True) plt.rc('font', family='serif',size=Fsize) plt.rc('xtick', labelsize=Fsize) plt.rc('ytick', labelsize=Fsize) plt.clf() plt.ioff() #plt.title(plotname.split('TDOSE 1D spectra'),fontsize=Fsize) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - for ff, specfile in enumerate(filelist): specdat = afits.open(specfile)[1].data if colors is None: spec_color = None else: spec_color = colors[ff] if labels is None: spec_label = specfile else: spec_label = labels[ff] if xrange is not None: goodent = np.where((specdat[tdose_wavecol] > xrange[0]) & (specdat[tdose_wavecol] < xrange[1]))[0] if goodent == []: if verbose: print(' - The chosen xrange is not covered by the input spectrum. Plotting full spectrum') goodent = np.arange(len(specdat[tdose_wavecol])) else: goodent = np.arange(len(specdat[tdose_wavecol])) if plotSNcurve: try: s2ndat = specdat['s2n'][goodent] except: s2ndat = specdat[tdose_fluxcol][goodent]/specdat[tdose_errcol][goodent] if smooth > 0: s2ndat = snf.gaussian_filter(s2ndat, smooth) if not plotratio: plt.plot(specdat[tdose_wavecol][goodent],s2ndat,color=spec_color,lw=lthick, label=spec_label) ylabel = 'S/N' else: plt.plot(specdat[tdose_wavecol][goodent],s2ndat/s2ndat,color=spec_color,lw=lthick, label=None) ylabel = 'S/N ratio' #plotname = plotname.replace('.pdf','_S2N.pdf') else: fillalpha = 0.30 fluxdat = specdat[tdose_fluxcol][goodent] errlow = specdat[tdose_fluxcol][goodent]-specdat[tdose_errcol][goodent] errhigh = specdat[tdose_fluxcol][goodent]+specdat[tdose_errcol][goodent] if smooth > 0: fluxdat = snf.gaussian_filter(fluxdat, smooth) errlow = snf.gaussian_filter(errlow, smooth) errhigh = snf.gaussian_filter(errhigh, smooth) if smooth > 0: fluxdat = snf.gaussian_filter(fluxdat, smooth) if not plotratio: if shownoise: plt.fill_between(specdat[tdose_wavecol][goodent],errlow,errhigh, alpha=fillalpha,color=spec_color) plt.plot(specdat[tdose_wavecol][goodent],fluxdat, color=spec_color,lw=lthick, label=spec_label) ylabel = tdose_fluxcol else: plt.plot(specdat[tdose_wavecol][goodent],fluxdat/fluxdat, color=spec_color,lw=lthick, label=None) ylabel = tdose_fluxcol+' ratio ' # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if simsources is not None: sim_total = np.zeros(len(specdat[tdose_wavecol])) for sourcenumber in simsources: sourcedat = afits.open(simsourcefile)[1].data xpos = sourcedat['xpos'][sourcenumber] ypos = sourcedat['ypos'][sourcenumber] fluxscale = sourcedat['fluxscale'][sourcenumber] sourcetype = sourcedat['sourcetype'][sourcenumber] spectype = sourcedat['spectype'][sourcenumber] sourcecube = tbmc.gen_source_cube([ypos,xpos],fluxscale,sourcetype,spectype,cube_dim=sim_cube_dim, verbose=verbose,showsourceimgs=False) simspec = np.sum( np.sum(sourcecube, axis=1), axis=1) sim_total = sim_total + simspec if smooth > 0: simspec = snf.gaussian_filter(simspec, smooth) plt.plot(specdat[tdose_wavecol],simspec,'--',color='black',lw=lthick) plt.plot(specdat[tdose_wavecol],sim_total,'--',color='black',lw=lthick, label='Sim. spectrum: \nsimsource='+str(simsources)) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if comparisonspecs is not None: for cc, comparisonspec in enumerate(comparisonspecs): compdat = afits.open(comparisonspec)[1].data if xrange is not None: goodent = np.where((compdat[comp_wavecol] > xrange[0]) & (compdat[comp_wavecol] < xrange[1]))[0] if goodent == []: if verbose: print(' - The chosen xrange is not covered by the comparison spectrum. Plotting full spectrum') goodent = np.arange(len(compdat[comp_wavecol])) else: goodent = np.arange(len(compdat[comp_wavecol])) if comp_colors is None: comp_color = None else: comp_color = comp_colors[cc] if comp_labels is None: comp_label = comparisonspec else: comp_label = comp_labels[cc] if plotSNcurve: s2ncompdat = compdat[comp_fluxcol][goodent]/compdat[comp_errcol][goodent] if smooth > 0: s2ncompdat = snf.gaussian_filter(s2ncompdat, smooth) if not plotratio: plt.plot(compdat[comp_wavecol][goodent],s2ncompdat, color=comp_color,lw=lthick, label=comp_label) else: plt.plot(compdat[comp_wavecol][goodent],s2ndat/s2ncompdat, color=comp_color,lw=lthick, label=comp_label) else: fillalpha = 0.30 fluxcompdat = compdat[comp_fluxcol][goodent] errlow = compdat[comp_fluxcol][goodent]-compdat[comp_errcol][goodent] errhigh = compdat[comp_fluxcol][goodent]+compdat[comp_errcol][goodent] if smooth > 0: fluxcompdat = snf.gaussian_filter(fluxcompdat, smooth) errlow = snf.gaussian_filter(errlow, smooth) errhigh = snf.gaussian_filter(errhigh, smooth) if not plotratio: if shownoise: plt.fill_between(compdat[comp_wavecol][goodent],errlow,errhigh, alpha=fillalpha,color=comp_color) plt.plot(compdat[comp_wavecol][goodent],fluxcompdat, color=comp_color,lw=lthick, label=comp_label) else: plt.plot(compdat[comp_wavecol][goodent],fluxdat/fluxcompdat, color=comp_color,lw=lthick, label=comp_label) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if skyspecs is not None: for ss, skyspec in enumerate(skyspecs): skydat = afits.open(skyspec)[1].data if xrange is not None: goodent = np.where((skydat[sky_wavecol] > xrange[0]) & (skydat[sky_wavecol] < xrange[1]))[0] if goodent == []: if verbose: print(' - The chosen xrange is not covered by the sky spectrum. Plotting full spectrum') goodent = np.arange(len(skydat[sky_wavecol])) else: goodent = np.arange(len(skydat[sky_wavecol])) if sky_colors is None: sky_color = None else: sky_color = sky_colors[ss] if sky_labels is None: sky_label = skyspec else: sky_label = sky_labels[ss] if plotSNcurve: s2nsky = skydat[sky_fluxcol][goodent]/skydat[sky_errcol][goodent] if smooth > 0: s2nsky = snf.gaussian_filter(s2nsky, smooth) plt.plot(skydat[sky_wavecol][goodent],s2nsky, color=sky_color,lw=lthick, label=sky_label) else: fillalpha = 0.30 fluxsky = skydat[sky_fluxcol][goodent] errlow = skydat[sky_fluxcol][goodent]-skydat[sky_errcol][goodent] errhigh = skydat[sky_fluxcol][goodent]+skydat[sky_errcol][goodent] if smooth > 0: fluxsky = snf.gaussian_filter(fluxsky, smooth) errlow = snf.gaussian_filter(errlow, smooth) errhigh = snf.gaussian_filter(errhigh, smooth) if shownoise: plt.fill_between(skydat[sky_wavecol][goodent],errlow,errhigh, alpha=fillalpha,color=sky_color) plt.plot(skydat[sky_wavecol][goodent],fluxsky, color=sky_color,lw=lthick, label=sky_label) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if xrange is None: xvals = [4800,9300] else: xvals = xrange plt.plot(xvals,[0,0],'--k',lw=lthick) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - plt.xlabel('Wavelength [\AA]', fontsize=Fsize) if pubversion: if plotSNcurve: ylabel = 'Signal-to-Noise' else: ylabel = 'Flux ['+str(bunit)+']' if plotratio: ylabel = ylabel+' ratio' plt.ylabel(ylabel, fontsize=Fsize) if ylog: plt.yscale('log') if yrange is not None: plt.ylim(yrange) if xrange is not None: plt.xlim(xrange) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if showlinelists is not None: for sl, showlinelist in enumerate(showlinelists): ymin, ymax = plt.ylim() xmin, xmax = plt.xlim() for ww, wave in enumerate(showlinelist[:,0]): wave = float(wave) if (wave < xmax) & (wave > xmin): plt.plot([wave,wave],[ymin,ymax],linestyle='--',color=linelistcolors[sl],lw=lthick) plt.text(wave,ymin+1.03*np.abs([ymax-ymin]),showlinelist[:,1][ww],color=linelistcolors[sl], fontsize=Fsize-2., ha='center') # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if pubversion: leg = plt.legend(fancybox=True, loc='upper center',prop={'size':Fsize-2},ncol=4,numpoints=1, bbox_to_anchor=(0.44, 1.27)) # add the legend else: leg = plt.legend(fancybox=True, loc='upper right',prop={'size':Fsize},ncol=1,numpoints=1, bbox_to_anchor=(1.25, 1.03)) # add the legend leg.get_frame().set_alpha(0.7) if showspecs: if verbose: print(' Showing plot (not saving to file)') plt.show() else: if verbose: print(' Saving plot to',plotname) plt.savefig(plotname) plt.clf() plt.close('all') # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = def plot_histograms(datavectors,plotname='./tdose_cubehist.pdf',colors=None,labels=None,bins=None, xrange=None,yrange=None,verbose=True,norm=True,ylog=True): """ Plot histograms of a set of data vectors. --- INPUT --- datavectors Set of data vectors to plot histograms of plotname Name of plot to generate colors Colors to use for histograms labels Labels for the data vectors bins Bins to use for histograms. Can be generated with np.arange(minval,maxval+binwidth,binwidth) xrange Xrange of plot yrange Yrange of plot verbose Toggle verbosity norm Noramlize the histograms ylog Use a logarithmic y-axes when plotting """ Ndat = len(datavectors) if verbose: print(' - Plotting histograms of N = '+str(Ndat)+' data vectors') if colors is None: colors = ['blue']*Ndat if labels is None: labels = ['data vector no. '+str(ii+1) for ii in np.arange(Ndat)] if bins is None: bins = np.arange(-100,102,2) fig = plt.figure(figsize=(10, 3)) fig.subplots_adjust(wspace=0.1, hspace=0.1,left=0.08, right=0.81, bottom=0.1, top=0.95) Fsize = 10 lthick = 1 plt.rc('text', usetex=True) plt.rc('font', family='serif',size=Fsize) plt.rc('xtick', labelsize=Fsize) plt.rc('ytick', labelsize=Fsize) plt.clf() plt.ioff() #plt.title(plotname.split('TDOSE 1D spectra'),fontsize=Fsize) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - for dd, datavec in enumerate(datavectors): hist = plt.hist(datavec[~np.isnan(datavec)],color=colors[dd],bins=bins,histtype="step",lw=lthick, label=labels[dd],normed=norm) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if yrange is None: yvals = [1e-5,1e8] else: yvals = yrange plt.plot([0,0],yvals,'--k',lw=lthick) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - plt.xlabel('', fontsize=Fsize) plt.ylabel('\#', fontsize=Fsize) if yrange is not None: plt.ylim(yrange) if xrange is not None: plt.xlim(xrange) if ylog: plt.yscale('log') leg = plt.legend(fancybox=True, loc='upper right',prop={'size':Fsize},ncol=1,numpoints=1, bbox_to_anchor=(1.25, 1.03)) # add the legend leg.get_frame().set_alpha(0.7) if verbose: print(' Saving plot to',plotname) plt.savefig(plotname) plt.clf() plt.close('all') # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
mit
abimannans/scikit-learn
examples/linear_model/plot_logistic_path.py
349
1195
#!/usr/bin/env python """ ================================= Path with L1- Logistic Regression ================================= Computes path on IRIS dataset. """ print(__doc__) # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # License: BSD 3 clause from datetime import datetime import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model from sklearn import datasets from sklearn.svm import l1_min_c iris = datasets.load_iris() X = iris.data y = iris.target X = X[y != 2] y = y[y != 2] X -= np.mean(X, 0) ############################################################################### # Demo path functions cs = l1_min_c(X, y, loss='log') * np.logspace(0, 3) print("Computing regularization path ...") start = datetime.now() clf = linear_model.LogisticRegression(C=1.0, penalty='l1', tol=1e-6) coefs_ = [] for c in cs: clf.set_params(C=c) clf.fit(X, y) coefs_.append(clf.coef_.ravel().copy()) print("This took ", datetime.now() - start) coefs_ = np.array(coefs_) plt.plot(np.log10(cs), coefs_) ymin, ymax = plt.ylim() plt.xlabel('log(C)') plt.ylabel('Coefficients') plt.title('Logistic Regression Path') plt.axis('tight') plt.show()
bsd-3-clause
alee156/clviz
prototype/connectivity.py
2
8155
#!/usr/bin/env python #-*- coding:utf-8 -*- import matplotlib from matplotlib import pyplot as plt import numpy as np from numpy import linalg as LA import cv2 import math import plotly from plotly.graph_objs import * from plotly.offline import download_plotlyjs, init_notebook_mode, iplot from plotly import tools import time import collections as col from collections import OrderedDict import ast from ndreg import * import ndio.remote.neurodata as neurodata import nibabel as nib import networkx as nx import re import pandas as pd import requests import json import seaborn as sns import csv, gc from sklearn.manifold import spectral_embedding as se import scipy.sparse as sp plotly.offline.init_notebook_mode() def spec_clust(graphx, num_components): """ Function for doing the spectral embedding. :param graphx: :param num_components: :return: """ adj_mat = nx.adjacency_matrix(graphx) # result = se(adj_mat, n_components=num_components, drop_first=True) result = se(adj_mat, n_components=num_components, drop_first=False) return result def add_to_dict(d, region, index): if region in d: d[region].append(index) else: d[region] = [index] return d def get_adj_mat(regions_path): points = np.genfromtxt(regions_path, delimiter=',') x_dim = np.max(points[:, 0]) y_dim = np.max(points[:, 1]) z_dim = np.max(points[:, 2]) am = SparseMatrix(x_dim, y_dim, z_dim) for point in points: am.add(tuple(point[0:3]), point[4]) return am def get_dict_real(g, se_result, regions_path): nodes = g.nodes() points = np.genfromtxt(regions_path, delimiter=',') orig_dict = OrderedDict() d = {} sparse_mat = get_adj_mat(regions_path) for index, node in enumerate(nodes): s = g.node[node]['attr'] point = ast.literal_eval(s) region = sparse_mat.get(tuple(point)) # if region == -1: # # error # print 'FUCK' add_to_dict(d, region, index) for point in points: region = point[4] # if region in orig_dict: # orig_dict[region] = np.vstack((orig_dict[region], point[0:3])) # else: # orig_dict[region] = np.array([point[0:3]]) add_to_dict(orig_dict, region, point[0:3]) se_regions_nodes = {} se_regions = {} for key, value in d.iteritems(): index_list = value nodes_arr = np.array(nodes) pt_names = nodes_arr[index_list] se_pts = se_result[index_list] nodes_to_se = dict(zip(pt_names, se_pts)) # maps from node names to embedded point coordinates se_regions_nodes[key] = nodes_to_se se_regions[key] = se_pts return se_regions, orig_dict, se_regions_nodes def create_connectivity_graph(orig_avg_dict, se_avg_dict, max_dist=0.02): g = nx.Graph() for key, avg in se_avg_dict.iteritems(): for key2, avg2 in se_avg_dict.iteritems(): avg_np = np.array(avg) avg2_np = np.array(avg2) diff = np.linalg.norm(avg_np - avg2_np) diff = max_dist if diff > max_dist else diff g.add_edge(key, key2, weight=diff) # Setting the coordinate attribute for each region node to the average of that region. for key, avg in orig_avg_dict.iteritems(): g.node[key]['attr'] = avg return g def get_connectivity_hard(eig_dict, orig_dict=None, max_dist=0.02): """ Uses create_connectivity_graph. :param eig_dict: :param orig_dict: :return: """ eigenvector_index = 1 # the second smallest eigenvector avg_dict = {} orig_avg_dict = OrderedDict() # dict that maps from region to most connected region con_dict = OrderedDict() orig_con_dict = OrderedDict() if orig_dict != None: # Getting the original averages. for key, region in orig_dict.iteritems(): tmp_x = [] tmp_y = [] y_vals = [] for j in range(len(region)): y_vals.append(region[j]) y_vals = np.array(y_vals) x_avg = np.mean(y_vals[:, 0]) y_avg = np.mean(y_vals[:, 1]) z_avg = np.mean(y_vals[:, 2]) orig_avg_dict[key] = [x_avg, y_avg, z_avg] # avg = np.mean(y_vals) # orig_avg_dict[key] = avg # print 'orignal averages' # print orig_avg_dict # Getting connectivity for original points. for key, avg in orig_avg_dict.iteritems(): min_key = '' min_diff = float('inf') for key2, avg2 in orig_avg_dict.iteritems(): if key2 == key: continue avg_np = np.array(avg) avg2_np = np.array(avg2) diff = np.linalg.norm(avg_np - avg2_np) if diff < min_diff: min_diff = diff min_key = key2 orig_con_dict[float(key)] = [float(min_key), min_diff] # Getting the average first 2 eigenvector components for each of the regions for key, region in eig_dict.iteritems(): # print(key) y_vals = [] for j in range(len(region)): y_vals.append(region[j]) y_vals = np.array(y_vals) x_avg = np.mean(y_vals[:, 0]) y_avg = np.mean(y_vals[:, 1]) z_avg = np.mean(y_vals[:, 2]) avg_dict[key] = [x_avg, y_avg, z_avg] # print('getcon avg_dict') # print(avg_dict) # Computing connectivity between regions using the distance between averages for key, avg in avg_dict.iteritems(): min_key = '' min_diff = float('inf') for key2, avg2 in avg_dict.iteritems(): if key2 == key: continue avg_np = np.array(avg) avg2_np = np.array(avg2) diff = np.linalg.norm(avg_np - avg2_np) if diff < min_diff: min_diff = diff min_key = key2 con_dict[float(key)] = [float(min_key), min_diff] con_dict = OrderedDict(sorted(con_dict.items())) orig_con_dict = OrderedDict(sorted(orig_con_dict.items())) g = create_connectivity_graph(orig_avg_dict, avg_dict, max_dist) if orig_dict == None: return con_dict else: return con_dict, orig_con_dict, g class SparseMatrix: def __init__(self, x, y, z): # self._max_index = 0 x_dim = x y_dim = y z_dim = z self._vector = {} def add(self, index, value): # vector starts at index one, because it reads from the file and the file # always has the index of the features start at 1 self._vector[index] = value # if index > self._max_index: # self._max_index = index def get(self, index): # if the index doesn't exist in the dict, return 0 because it's sparse anyways if index in self._vector: return self._vector[index] return -1 def get_sparse_matrix(self): return self._vector # return self._vector.keys() # def get_full_vector(self, size=None): # """ Returns a full vector of features as a numpy array. """ # size = (self._max_index + 1) if size == None else size # full_vector = np.zeros(size) # 0 indexed # for key, value in self._vector.iteritems(): # full_vector[key] = value # return full_vector def __str__(self): return str(self._vector) def plot_con_mat(con_adj_mat, output_path=None, show=False): title = 'Connectivity Heatmap' data = [ Heatmap( z = con_adj_mat, # x = con_graph.nodes(), # y = con_graph.nodes() ) ] layout = Layout( title = title, xaxis=dict(title='region'), yaxis=dict(title='region') ) fig = Figure(data=data, layout=layout) if show: iplot(fig) if output_path != None: plotly.offline.plot(fig, filename=output_path) return fig
apache-2.0
eshasharma/mase
src/old/lib.py
13
4501
from __future__ import print_function, unicode_literals from __future__ import absolute_import, division """ # Lib: Standard Utilities Standard imports: used everywhere. ## Code Standards Narrow code (52 chars, max); use ``i'', not ``self'', set indent to two characters, In a repo (or course). Markdown comments (which means we can do tricks like auto-generating this documentation from comments in the file). Not Python3, but use Python3 headers. good reseraoiuces for advance people: Norving's infrenqencly asked questions David Isaacon's Pything tips, tricks, and Hacks.http://www.siafoo.net/article/52 Environemnt that supports matplotlib, scikitlearn. Easy to get there. Old school: install linux. New school: install virtualbox. Newer school: work online. To checn if you ahve a suseful envorunment, try the following (isntall pip, matpolotlib, scikitlearn) Learn Python. Learn tdd Attitude to coding. not code byt"set yourself up to et rapid feedback on some issue" """ import random, pprint, re, datetime, time,traceback from contextlib import contextmanager import pprint,sys """ Unit test engine, inspired by Kent Beck. """ def ok(*lst): for one in lst: unittest(one) return one class unittest: tries = fails = 0 # tracks the record so far @staticmethod def score(): t = unittest.tries f = unittest.fails return "# TRIES= %s FAIL= %s %%PASS = %s%%" % ( t,f,int(round(t*100/(t+f+0.001)))) def __init__(i,test): unittest.tries += 1 try: test() except Exception,e: unittest.fails += 1 i.report(e,test) def report(i,e,test): print(traceback.format_exc()) print(unittest.score(),':',test.__name__, e) """ Simple container class (offers simple initialization). """ class o: def __init__(i,**d) : i + d def __add__(i,d) : i.__dict__.update(d) def __setitem__(i,k,v) : i.__dict__[k] = v def __getitem__(i,k) : return i.__dict__[k] def __repr__(i) : return str(i.items()) def items(i,x=None) : x = x or i if isinstance(x,o): return [(k,i.items(v)) for k,v in x.__dict__.values() if not k[0] == "_" ] else: return x """ The settings system. """ the = o() def setting(f): name = f.__name__ def wrapper(**d): tmp = f() tmp + d the[name] = tmp return tmp wrapper() return wrapper @setting def LIB(): return o( seed = 1, has = o(decs = 3, skip="_", wicked=True), show = o(indent=2, width=80) ) #------------------------------------------------- r = random.random any = random.choice seed = random.seed isa = isinstance def lt(x,y): return x < y def gt(x,y): return x > y def first(lst): return lst[0] def last(lst): return lst[-1] def shuffle(lst): random.shuffle(lst) return lst def ntiles(lst, tiles=[0.1,0.3,0.5,0.7,0.9], norm=False, f=3): if norm: lo,hi = lst[0], lst[-1] lst= g([(x - lo)/(hi-lo+0.0001) for x in lst],f) at = lambda x: lst[ int(len(lst)*x) ] lst = [ at(tile) for tile in tiles ] return lst def say(*lst): sys.stdout.write(', '.join(map(str,lst))) sys.stdout.flush() def g(lst,f=3): return map(lambda x: round(x,f),lst) #------------------------------------------------- def show(x, indent=None, width=None): print(pprint.pformat(has(x), indent= indent or the.LIB.show.indent, width = width or the.LIB.show.width)) def cache(f): name = f.__name__ def wrapper(i): i._cache = i._cache or {} key = (name, i.id) if key in i._cache: x = i._cache[key] else: x = f(i) # sigh, gonna have to call it i._cache[key] = x # ensure ache holds 'c' return x return wrapper @contextmanager def duration(): t1 = time.time() yield t2 = time.time() print("\n" + "-" * 72) print("# Runtime: %.3f secs" % (t2-t1)) def use(x,**y): return (x,y) @contextmanager def settings(*usings): for (using, override) in usings: using(**override) yield for (using,_) in usings: using() @contextmanager def study(what,*usings): print("\n#" + "-" * 50, "\n#", what, "\n#", datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S")) for (using, override) in usings: using(**override) seed(the.LIB.seed) show(the) with duration(): yield for (using,_) in usings: using()
unlicense
droundy/deft
papers/thesis-kirstie/figs/plot_LJ_Potential.py
1
1142
#!/usr/bin/python3 #RUN this program from the directory it is listed in #with command ./plot_LJ_Potential.py from scipy import special import numpy as np import matplotlib.pyplot as plt import math #Plot WCA Potential vs r #R=1/1.781797436 #for a sigma=1 DOESN'T WORK!! graph wrong shape! R=1/1.781797436 epsilon=1 sigma=1 #print sigma #r=np.linspace(.1, 2*R, 200) #r=np.linspace(.9, 4, 200) #SAVE!!! for plotting r r=np.linspace(.9, 2.5, 200) r_dless=sigma/r #plot dimensionless quantity! sigma_over_r_to_pow6=(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless)*(r_dless) #V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) + epsilon #WCA potential #V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential but looks like WCA V=4*epsilon*(sigma_over_r_to_pow6*sigma_over_r_to_pow6 - sigma_over_r_to_pow6) #LJ potential plt.plot(1/r_dless,V) plt.xlim(right=2.5) plt.ylim(top=V.max()) plt.xlabel('r/$\sigma$') #plt.xlabel('r') plt.ylabel('V(r)/$\epsilon$') plt.title('Leonard-Jones Potential') #plt.legend() plt.savefig("LJ_Potential.pdf") # plt.show()
gpl-2.0
FowlerLab/Enrich2
enrich2/seqlib.py
1
15885
from __future__ import print_function import logging import os.path import pandas as pd import numpy as np from collections import OrderedDict from matplotlib.backends.backend_pdf import PdfPages import sys from .plots import counts_plot from .storemanager import StoreManager, fix_filename, ELEMENT_LABELS class SeqLib(StoreManager): """ Abstract class for handling count data from a single sequencing library. """ # Note: the following block is referenced by line number above # When adding new messages, update the documentation line numbers also! filter_messages = OrderedDict( [ ("min quality", "single-base quality"), ("avg quality", "average quality"), ("max N", "excess N bases"), ("chastity", "not chaste"), ("remove unresolvable", "unresolvable mismatch"), ("merge failure", "unable to merge reads"), ("total", "total"), ] ) store_suffix = "lib" def __init__(self): StoreManager.__init__(self) self.logger = logging.getLogger("{}.{}".format(__name__, self.__class__)) self.timepoint = None self.counts_file = None self.report_filtered = None self._filters = dict() self.filter_stats = dict() self.default_filters = dict() self.default_filters.update({"min quality": 0}) self.default_filters.update({"max N": sys.maxsize}) self.default_filters.update({"avg quality": 0}) self.default_filters.update({"chastity": False}) @property def filters(self): return self._filters @filters.setter def filters(self, config_filters): """ Set up the filters dictionary using the options selected in *config_filters*, filling in missing entries with defaults. """ self._filters.clear() self._filters.update(self.default_filters) unused = list() for key in config_filters: if key in self._filters: if config_filters[key] is not None: self._filters[key] = config_filters[key] else: unused.append(key) if len(unused) > 0: self.logger.warning( "Unused filter parameters ({})" "".format(", ".join(unused)) ) self.filter_stats.clear() for key in self._filters: self.filter_stats[key] = 0 self.filter_stats["total"] = 0 def serialize_filters(self): """ Return a dictionary of filtering options that have non-default values. """ cfg = dict() for key in self.filters.keys(): if self.filters[key] != self.default_filters[key]: cfg[key] = self.filters[key] return cfg def _children(self): """ These objects have no children. Returns ``None``. """ return None def add_child(self, child): """ No children, raises an AttributeError. """ raise AttributeError("SeqLib objects do not support adding children") def remove_child_id(self, tree_id): """ No children, raises an AttributeError. """ raise AttributeError("SeqLib objects do not support removing children") def validate(self): """ Validates paramaters for a configured SeqLib. Currently does nothing. """ pass def has_wt_sequence(self): """ Returns whether or not the object has a wild type sequence. Returns ``False`` unless overloaded by a derived class (such as :py:class:`~seqlib.seqlib.VariantSeqLib`). """ return False def configure(self, cfg): """ Set up the object using the config object *cfg*, usually derived from a ``.json`` file. """ StoreManager.configure(self, cfg) self.logger = logging.getLogger( "{}.{} - {}".format(__name__, self.__class__.__name__, self.name) ) try: self.timepoint = int(cfg["timepoint"]) if "report filtered reads" in cfg: self.report_filtered = cfg["report filtered reads"] else: self.report_filtered = False if "counts file" in cfg: self.counts_file = cfg["counts file"] else: self.counts_file = None except KeyError as key: raise KeyError( "Missing required config value {key}" "".format(key=key), self.name ) except ValueError as value: raise ValueError( "Invalid parameter value {value}" "".format(value=value), self.name ) def serialize(self): """ Format this object (and its children) as a config object suitable for dumping to a config file. """ cfg = StoreManager.serialize(self) cfg["timepoint"] = self.timepoint cfg["report filtered reads"] = self.report_filtered if self.counts_file is not None: cfg["counts file"] = self.counts_file return cfg def calculate(self): """ Pure virtual method that defines how the data are counted. """ raise NotImplementedError("must be implemented by subclass") def report_filtered_read(self, fq, filter_flags): """ Write the :py:class:`~fqread.FQRead` object *fq* to the ``DEBUG`` log. The dictionary *filter_flags* contains ``True`` values for each filtering option that applies to *fq*. Keys in *filter_flags* are converted to messages using the ``SeqLib.filter_messages`` dictionary. """ self.logger.debug( "Filtered read ({messages})\n{read!s}".format( messages=", ".join( SeqLib.filter_messages[x] for x in filter_flags if filter_flags[x] ), name=self.name, read=fq, ) ) def save_counts(self, label, df_dict, raw): """ Convert the counts in the dictionary *df_dict* into a DataFrame object and save it to the data store. If *raw* is ``True``, the counts are stored under ``"/raw/label/counts"``; else ``"/main/label/counts"``. """ if len(df_dict.keys()) == 0: raise ValueError("Failed to count {} [{}]".format(label, self.name)) df = pd.DataFrame.from_dict(df_dict, orient="index", dtype=np.int32) df.columns = ["count"] df.sort_values("count", ascending=False, inplace=True) self.logger.info( "Counted {n} {label} ({u} unique)".format( n=df["count"].sum(), u=len(df.index), label=label ) ) if raw: key = "/raw/{}/counts".format(label) else: key = "/main/{}/counts".format(label) self.store.put(key, df, format="table", data_columns=df.columns) del df def save_filtered_counts(self, label, query): """ Filter the counts in ``"/raw/label/counts"`` using the *query* string and store the result in ``"/main/label/counts"`` For more information on building query strings, see http://pandas.pydata.org/pandas-docs/stable/io.html#querying-a-table """ self.logger.info("Converting raw {} counts to main counts".format(label)) raw_table = "/raw/{}/counts".format(label) main_table = "/main/{}/counts".format(label) self.map_table(source=raw_table, destination=main_table, source_query=query) self.logger.info( "Counted {n} {label} ({u} unique) after query".format( n=self.store[main_table]["count"].sum(), u=len(self.store[main_table].index), label=label, ) ) def report_filter_stats(self): """ Create report file for the number of filtered reads. The report file is located in the output directory, named ``SeqLibName.filter.txt``. It contains the number of reads filtered for each category, plus the total number filtered. .. note:: Reads are checked for all quality-based criteria before \ filtering. """ with open( os.path.join(self.output_dir, fix_filename(self.name) + ".filter.txt"), "w" ) as handle: for key in sorted( self.filter_stats, key=self.filter_stats.__getitem__, reverse=True ): if key != "total" and self.filter_stats[key] > 0: print( SeqLib.filter_messages[key], self.filter_stats[key], sep="\t", file=handle, ) print("total", self.filter_stats["total"], sep="\t", file=handle) self.logger.info("Wrote filtering statistics") def save_filter_stats(self): """ Save a DataFrame containing the number of filtered reads under ``'/raw/filter'``. This DataFrame contains the same information as ``report_filter_stats`` """ df = pd.DataFrame(index=SeqLib.filter_messages.values(), columns=["count"]) for key in self.filter_stats.keys(): if self.filter_stats[key] > 0 or key == "total": df.loc[SeqLib.filter_messages[key], "count"] = self.filter_stats[key] df.dropna(inplace=True) self.store.put( "/raw/filter", df.astype(int), format="table", data_columns=df.columns ) def read_quality_filter(self, fq): """ Check the quality of the FQRead object *fq*. Checks ``'chastity'``, ``'min quality'``, ``'avg quality'``, ``'max N'``, and ``'remove unresolvable'``. Counts failed reads for later output and reports the filtered read if desired. Returns ``True`` if the read passes all filters, else ``False``. """ filter_flags = dict() for key in self.filters: filter_flags[key] = False if self.filters["chastity"]: if not fq.is_chaste(): self.filter_stats["chastity"] += 1 filter_flags["chastity"] = True if self.filters["min quality"] > 0: if fq.min_quality() < self.filters["min quality"]: self.filter_stats["min quality"] += 1 filter_flags["min quality"] = True if self.filters["avg quality"] > 0: if fq.mean_quality() < self.filters["avg quality"]: self.filter_stats["avg quality"] += 1 filter_flags["avg quality"] = True if self.filters["max N"] >= 0: if fq.sequence.upper().count("N") > self.filters["max N"]: self.filter_stats["max N"] += 1 filter_flags["max N"] = True if "remove unresolvable" in self.filters: # OverlapSeqLib only if self.filters["remove unresolvable"]: if "X" in fq.sequence: self.filter_stats["remove unresolvable"] += 1 filter_flags["remove unresolvable"] = True # update total and report if failed if any(filter_flags.values()): self.filter_stats["total"] += 1 if self.report_filtered: self.report_filtered_read(fq, filter_flags) return False else: return True def make_plots(self): """ Make plots that are shared by all :py:class:`~seqlib.seqlib.SeqLib` objects. Creates counts histograms for all labels. """ if self.plots_requested: self.logger.info("Creating plots") pdf = PdfPages(os.path.join(self.plot_dir, "counts.pdf")) for label in self.labels: counts_plot(self, label, pdf, log=True) counts_plot(self, label, pdf, log=False) pdf.close() def write_tsv(self): """ Write each table from the store to its own tab-separated file. Files are written to a ``tsv`` directory in the default output location. File names are the HDF5 key with ``'_'`` substituted for ``'/'``. """ if self.tsv_requested: self.logger.info("Generating tab-separated output files") for k in self.store.keys(): self.write_table_tsv(k) def counts_from_file_h5(self, fname): """ If an HDF store containing raw counts has been specified, open the store, copy those counts into this store, and close the counts store. Copies all tables in the ``'/raw'`` group along with their metadata. """ store = pd.HDFStore(fname) self.logger.info( "Using existing HDF5 data store '{}' for raw data" "".format(fname) ) # this could probably be much more efficient, but the PyTables docs # don't explain copying subsets of files adequately raw_keys = [key for key in store.keys() if key.startswith("/raw/")] if len(raw_keys) == 0: raise ValueError( "No raw counts found in '{}' [{}]" "".format(fname, self.name) ) else: for k in raw_keys: # copy the data table raw = store[k] self.store.put(k, raw, format="table", data_columns=raw.columns) # copy the metadata self.set_metadata(k, self.get_metadata(k, store=store), update=False) self.logger.info("Copied raw data '{}'".format(k)) store.close() def counts_from_file_tsv(self, fname): """ If a counts file in tsv format has been specified, read the counts into a new dataframe and save as raw counts. """ df = pd.read_table(fname, sep="\t", header=0, index_col=0) if df.columns != ["count"]: raise ValueError( "Invalid column names for counts file [{}]" "".format(self.name) ) if len(df) == 0: raise ValueError("Empty counts file [{}]".format(self.name)) label = None for elem in ELEMENT_LABELS: if elem in self.labels: label = elem break if label is None: raise ValueError("No valid element labels [{}]".format(self.name)) key = "/raw/{}/counts".format(label) self.store.put(key, df, format="table", data_columns=df.columns, dtype=np.int32) def counts_from_file(self, fname): """Get raw counts from a counts file instead of FASTQ_ file. The ``'/raw/<element>/counts'`` table will be populated using the given input file. The input file should be a two-column file readable by ``pandas`` as a series or two-column dataframe or an Enrich2 HDF5 file. If the input file is a two-column file, the index will be checked using the SeqLib's ``validate_index()`` method. If the input file is an HDF5 file, the entire set of ``'/raw'`` tables will be copied over, with the metadata intact. """ if not os.path.exists(fname): raise IOError("Counts file '{}' not found [{}]" "".format(fname, self.name)) elif os.path.splitext(fname)[-1].lower() in (".h5"): self.counts_from_file_h5(self.counts_file) elif os.path.splitext(fname)[-1].lower() in (".txt", ".tsv", ".csv"): self.counts_from_file_tsv(self.counts_file) else: raise ValueError( "Unrecognized counts file extension for '{}' " "[{}]".format(fname, self.name) )
bsd-3-clause
q1ang/scikit-learn
examples/preprocessing/plot_function_transformer.py
161
1949
""" ========================================================= Using FunctionTransformer to select columns ========================================================= Shows how to use a function transformer in a pipeline. If you know your dataset's first principle component is irrelevant for a classification task, you can use the FunctionTransformer to select all but the first column of the PCA transformed data. """ import matplotlib.pyplot as plt import numpy as np from sklearn.cross_validation import train_test_split from sklearn.decomposition import PCA from sklearn.pipeline import make_pipeline from sklearn.preprocessing import FunctionTransformer def _generate_vector(shift=0.5, noise=15): return np.arange(1000) + (np.random.rand(1000) - shift) * noise def generate_dataset(): """ This dataset is two lines with a slope ~ 1, where one has a y offset of ~100 """ return np.vstack(( np.vstack(( _generate_vector(), _generate_vector() + 100, )).T, np.vstack(( _generate_vector(), _generate_vector(), )).T, )), np.hstack((np.zeros(1000), np.ones(1000))) def all_but_first_column(X): return X[:, 1:] def drop_first_component(X, y): """ Create a pipeline with PCA and the column selector and use it to transform the dataset. """ pipeline = make_pipeline( PCA(), FunctionTransformer(all_but_first_column), ) X_train, X_test, y_train, y_test = train_test_split(X, y) pipeline.fit(X_train, y_train) return pipeline.transform(X_test), y_test if __name__ == '__main__': X, y = generate_dataset() plt.scatter(X[:, 0], X[:, 1], c=y, s=50) plt.show() X_transformed, y_transformed = drop_first_component(*generate_dataset()) plt.scatter( X_transformed[:, 0], np.zeros(len(X_transformed)), c=y_transformed, s=50, ) plt.show()
bsd-3-clause
khkaminska/scikit-learn
examples/ensemble/plot_forest_importances.py
241
1761
""" ========================================= Feature importances with forests of trees ========================================= This examples shows the use of forests of trees to evaluate the importance of features on an artificial classification task. The red bars are the feature importances of the forest, along with their inter-trees variability. As expected, the plot suggests that 3 features are informative, while the remaining are not. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn.datasets import make_classification from sklearn.ensemble import ExtraTreesClassifier # Build a classification task using 3 informative features X, y = make_classification(n_samples=1000, n_features=10, n_informative=3, n_redundant=0, n_repeated=0, n_classes=2, random_state=0, shuffle=False) # Build a forest and compute the feature importances forest = ExtraTreesClassifier(n_estimators=250, random_state=0) forest.fit(X, y) importances = forest.feature_importances_ std = np.std([tree.feature_importances_ for tree in forest.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(10): print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]])) # Plot the feature importances of the forest plt.figure() plt.title("Feature importances") plt.bar(range(10), importances[indices], color="r", yerr=std[indices], align="center") plt.xticks(range(10), indices) plt.xlim([-1, 10]) plt.show()
bsd-3-clause
ndingwall/scikit-learn
sklearn/metrics/_plot/tests/test_plot_det_curve.py
11
2224
import pytest import numpy as np from numpy.testing import assert_allclose from sklearn.datasets import load_iris from sklearn.linear_model import LogisticRegression from sklearn.metrics import det_curve from sklearn.metrics import plot_det_curve @pytest.fixture(scope="module") def data(): return load_iris(return_X_y=True) @pytest.fixture(scope="module") def data_binary(data): X, y = data return X[y < 2], y[y < 2] @pytest.mark.parametrize( "response_method", ["predict_proba", "decision_function"] ) @pytest.mark.parametrize("with_sample_weight", [True, False]) @pytest.mark.parametrize("with_strings", [True, False]) def test_plot_det_curve( pyplot, response_method, data_binary, with_sample_weight, with_strings ): X, y = data_binary pos_label = None if with_strings: y = np.array(["c", "b"])[y] pos_label = "c" if with_sample_weight: rng = np.random.RandomState(42) sample_weight = rng.randint(1, 4, size=(X.shape[0])) else: sample_weight = None lr = LogisticRegression() lr.fit(X, y) viz = plot_det_curve( lr, X, y, alpha=0.8, sample_weight=sample_weight, ) y_pred = getattr(lr, response_method)(X) if y_pred.ndim == 2: y_pred = y_pred[:, 1] fpr, fnr, _ = det_curve( y, y_pred, sample_weight=sample_weight, pos_label=pos_label, ) assert_allclose(viz.fpr, fpr) assert_allclose(viz.fnr, fnr) assert viz.estimator_name == "LogisticRegression" # cannot fail thanks to pyplot fixture import matplotlib as mpl # noqal assert isinstance(viz.line_, mpl.lines.Line2D) assert viz.line_.get_alpha() == 0.8 assert isinstance(viz.ax_, mpl.axes.Axes) assert isinstance(viz.figure_, mpl.figure.Figure) assert viz.line_.get_label() == "LogisticRegression" expected_pos_label = 1 if pos_label is None else pos_label expected_ylabel = ( f"False Negative Rate (Positive label: {expected_pos_label})" ) expected_xlabel = ( f"False Positive Rate (Positive label: {expected_pos_label})" ) assert viz.ax_.get_ylabel() == expected_ylabel assert viz.ax_.get_xlabel() == expected_xlabel
bsd-3-clause
bloyl/mne-python
tutorials/inverse/30_mne_dspm_loreta.py
3
5666
""" .. _tut-inverse-methods: Source localization with MNE/dSPM/sLORETA/eLORETA ================================================= The aim of this tutorial is to teach you how to compute and apply a linear minimum-norm inverse method on evoked/raw/epochs data. """ import os.path as op import numpy as np import matplotlib.pyplot as plt import mne from mne.datasets import sample from mne.minimum_norm import make_inverse_operator, apply_inverse ############################################################################### # Process MEG data data_path = sample.data_path() raw_fname = op.join(data_path, 'MEG', 'sample', 'sample_audvis_filt-0-40_raw.fif') raw = mne.io.read_raw_fif(raw_fname) # already has an average reference events = mne.find_events(raw, stim_channel='STI 014') event_id = dict(aud_l=1) # event trigger and conditions tmin = -0.2 # start of each epoch (200ms before the trigger) tmax = 0.5 # end of each epoch (500ms after the trigger) raw.info['bads'] = ['MEG 2443', 'EEG 053'] baseline = (None, 0) # means from the first instant to t = 0 reject = dict(grad=4000e-13, mag=4e-12, eog=150e-6) epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True, picks=('meg', 'eog'), baseline=baseline, reject=reject) ############################################################################### # Compute regularized noise covariance # ------------------------------------ # For more details see :ref:`tut-compute-covariance`. noise_cov = mne.compute_covariance( epochs, tmax=0., method=['shrunk', 'empirical'], rank=None, verbose=True) fig_cov, fig_spectra = mne.viz.plot_cov(noise_cov, raw.info) ############################################################################### # Compute the evoked response # --------------------------- # Let's just use the MEG channels for simplicity. evoked = epochs.average().pick('meg') evoked.plot(time_unit='s') evoked.plot_topomap(times=np.linspace(0.05, 0.15, 5), ch_type='mag', time_unit='s') ############################################################################### # It's also a good idea to look at whitened data: evoked.plot_white(noise_cov, time_unit='s') del epochs, raw # to save memory ############################################################################### # Inverse modeling: MNE/dSPM on evoked and raw data # ------------------------------------------------- # Here we first read the forward solution. You will likely need to compute # one for your own data -- see :ref:`tut-forward` for information on how # to do it. fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-oct-6-fwd.fif' fwd = mne.read_forward_solution(fname_fwd) ############################################################################### # Next, we make an MEG inverse operator. inverse_operator = make_inverse_operator( evoked.info, fwd, noise_cov, loose=0.2, depth=0.8) del fwd # You can write it to disk with:: # # >>> from mne.minimum_norm import write_inverse_operator # >>> write_inverse_operator('sample_audvis-meg-oct-6-inv.fif', # inverse_operator) ############################################################################### # Compute inverse solution # ------------------------ # We can use this to compute the inverse solution and obtain source time # courses: method = "dSPM" snr = 3. lambda2 = 1. / snr ** 2 stc, residual = apply_inverse(evoked, inverse_operator, lambda2, method=method, pick_ori=None, return_residual=True, verbose=True) ############################################################################### # Visualization # ------------- # We can look at different dipole activations: fig, ax = plt.subplots() ax.plot(1e3 * stc.times, stc.data[::100, :].T) ax.set(xlabel='time (ms)', ylabel='%s value' % method) ############################################################################### # Examine the original data and the residual after fitting: fig, axes = plt.subplots(2, 1) evoked.plot(axes=axes) for ax in axes: ax.texts = [] for line in ax.lines: line.set_color('#98df81') residual.plot(axes=axes) ############################################################################### # Here we use peak getter to move visualization to the time point of the peak # and draw a marker at the maximum peak vertex. # sphinx_gallery_thumbnail_number = 9 vertno_max, time_max = stc.get_peak(hemi='rh') subjects_dir = data_path + '/subjects' surfer_kwargs = dict( hemi='rh', subjects_dir=subjects_dir, clim=dict(kind='value', lims=[8, 12, 15]), views='lateral', initial_time=time_max, time_unit='s', size=(800, 800), smoothing_steps=10) brain = stc.plot(**surfer_kwargs) brain.add_foci(vertno_max, coords_as_verts=True, hemi='rh', color='blue', scale_factor=0.6, alpha=0.5) brain.add_text(0.1, 0.9, 'dSPM (plus location of maximal activation)', 'title', font_size=14) # The documentation website's movie is generated with: # brain.save_movie(..., tmin=0.05, tmax=0.15, interpolation='linear', # time_dilation=20, framerate=10, time_viewer=True) ############################################################################### # There are many other ways to visualize and work with source data, see # for example: # # - :ref:`tut-viz-stcs` # - :ref:`ex-morph-surface` # - :ref:`ex-morph-volume` # - :ref:`ex-vector-mne-solution` # - :ref:`tut-dipole-orientations` # - :ref:`tut-mne-fixed-free` # - :ref:`examples using apply_inverse # <sphx_glr_backreferences_mne.minimum_norm.apply_inverse>`.
bsd-3-clause
bderembl/mitgcm_configs
eddy_airsea/analysis/ode_wave.py
1
1112
#!/usr/bin/env python import numpy as np import matplotlib.pyplot as plt import scipy.integrate as integrate plt.ion() f0 = 1e-4 u0 = 1.0 R0 = 40e3 # radius vmax = -1.0 # m/s def v1(rr): v = -vmax*rr/R0*np.exp(-0.5*(rr/R0)**2) # v = -vmax*np.tanh(rr/R0)/(np.cosh(rr/R0))**2/(np.tanh(1.0)/(np.cosh(1.0))**2) return v def dv1(rr): v = -vmax/R0*np.exp(-0.5*(rr/R0)**2)*(1-(rr/R0)**2) # v = -vmax*2/R0*np.tanh(rr/R0)/((np.cosh(rr/R0))**2)*(1/(np.cosh(rr/R0))**2 - (np.tanh(rr/R0))**2)/(np.tanh(1.0)/(np.cosh(1.0))**2) return v def f(r, t): omega = np.sqrt((dv1(r)+v1(r)/r + f0)*(2*v1(r)/r + f0)) return u0*np.sin(omega*t) si_r = 30 si_t = 30000 r0 = np.linspace(1,5*R0,si_r) t = np.linspace(0, si_t/f0/1000, si_t) ra = np.zeros((si_t,si_r)) for ni in range(0,si_r): ra[:,ni] = integrate.odeint(f, r0[ni], t).squeeze() plt.figure() plt.plot(t*f0/(2*np.pi),ra/R0,'k',linewidth=1) plt.xlabel(r'$tf/2\pi$') plt.ylabel(r'$r_p/R_0$') plt.xlim([np.min(t*f0/(2*np.pi)), np.max(t*f0/(2*np.pi))]) plt.ylim([np.min(ra/R0), 1.05*np.max(ra/R0)]) plt.savefig("ode_k0.pdf",bbox_inches='tight')
mit
arjoly/scikit-learn
examples/gaussian_process/plot_gpc_iris.py
81
2231
""" ===================================================== Gaussian process classification (GPC) on iris dataset ===================================================== This example illustrates the predicted probability of GPC for an isotropic and anisotropic RBF kernel on a two-dimensional version for the iris-dataset. The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by assigning different length-scales to the two feature dimensions. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import datasets from sklearn.gaussian_process import GaussianProcessClassifier from sklearn.gaussian_process.kernels import RBF # import some data to play with iris = datasets.load_iris() X = iris.data[:, :2] # we only take the first two features. y = np.array(iris.target, dtype=int) h = .02 # step size in the mesh kernel = 1.0 * RBF([1.0]) gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y) kernel = 1.0 * RBF([1.0, 1.0]) gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y) # create a mesh to plot in x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) titles = ["Isotropic RBF", "Anisotropic RBF"] plt.figure(figsize=(10, 5)) for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)): # Plot the predicted probabilities. For that, we will assign a color to # each point in the mesh [x_min, m_max]x[y_min, y_max]. plt.subplot(1, 2, i + 1) Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape((xx.shape[0], xx.shape[1], 3)) plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower") # Plot also the training points plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y]) plt.xlabel('Sepal length') plt.ylabel('Sepal width') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.xticks(()) plt.yticks(()) plt.title("%s, LML: %.3f" % (titles[i], clf.log_marginal_likelihood(clf.kernel_.theta))) plt.tight_layout() plt.show()
bsd-3-clause
LinErinG/foxsi-smex
pyfoxsi/src/pyfoxsi/response/response.py
4
8272
""" Response is a module to handle the response of the FOXSI telescopes """ from __future__ import absolute_import import pandas as pd import numpy as np import warnings import os import matplotlib.pyplot as plt import astropy.units as u from scipy import interpolate import pyfoxsi import h5py __all__ = ['Response', 'Material'] class Response(object): """An object which provides the FOXSI telescope response Parameters ---------- shutter_state : int, default 0 A number representing the state of the shutter (0 - no shutter, 1 - thin shutter, 2 - thick shutter) configuration : int, default 1 Choose the optics configuration 1 : 15 meters 2 : 10 meters 3 modules 3 : 10 meters 2 modules Examples -------- >>> from pyfoxsi.response import Response >>> resp = Response() >>> resp1 = Response(shutter_state=1) """ def __init__(self, shutter_state=0, configuration=1): path = os.path.dirname(pyfoxsi.__file__) for i in np.arange(3): path = os.path.dirname(path) path = os.path.join(path, 'data/') filename = 'effective_area_per_module.csv' effarea_file = os.path.join(path, filename) optics_effective_area = pd.read_csv(effarea_file, index_col=0, skiprows=4) optics_effective_area = optics_effective_area[optics_effective_area.columns[configuration-1]] if configuration == 1: pyfoxsi.focal_length = 15 * u.m pyfoxsi.number_of_telescopes = 3 elif configuration == 2: pyfoxsi.focal_length = 10 * u.m pyfoxsi.number_of_telescopes = 3 elif configuration == 3: pyfoxsi.focal_length = 10 * u.m pyfoxsi.number_of_telescopes = 2 self.optics_effective_area = pd.DataFrame(dict(total=optics_effective_area.copy(), module=optics_effective_area.copy())) # find what shells are missing #shell_numbers = np.array(self._eff_area_per_shell.columns, np.uint) #missing_shells = np.setdiff1d(shell_numbers, pyfoxsi.shell_ids) # remove the missing shells self.__number_of_telescopes = 1 #for missing_shell in missing_shells: # self._eff_area_per_shell.drop(str(missing_shell), 1, inplace=True) # now add the effective area of all of the shells together #self.optics_effective_area = pd.DataFrame({'module': self._eff_area_per_shell.sum(axis=1), 'total': self._eff_area_per_shell.sum(axis=1)}) self.effective_area = pd.DataFrame(dict(total=self.optics_effective_area['total'].copy(), module=self.optics_effective_area['module'].copy())) self.number_of_telescopes = pyfoxsi.number_of_telescopes self._set_default_optical_path() if shutter_state > 0: self.__optical_path.append(Material('al', pyfoxsi.shutters_thickness[shutter_state])) self.__shutter_state = shutter_state self._add_optical_path_to_effective_area() def plot(self, axes=None): """Plot the effective area""" if axes is None: axes = plt.gca() a = self.effective_area.plot(axes=axes) axes.set_title(pyfoxsi.mission_title + ' ' + str(self.number_of_telescopes) + 'x ' + 'Shutter State ' + str(self.shutter_state)) axes.set_ylabel('Effective area [cm$^2$]') axes.set_xlabel('Energy [keV]') def _set_default_optical_path(self): self.__optical_path = [Material('mylar', pyfoxsi.blanket_thickness), Material(pyfoxsi.detector_material, pyfoxsi.detector_thickness)] @property def number_of_telescopes(self): """The total number of telescope modules""" return self.__number_of_telescopes @number_of_telescopes.setter def number_of_telescopes(self, x): self.optics_effective_area['total'] = self.optics_effective_area['total'] / self.__number_of_telescopes * x self.__number_of_telescopes = x @property def optical_path(self): """The materials in the optical path including the detector""" return self.__optical_path @optical_path.setter def optical_path(self, x): self.optical_path = x self._add_optical_path_to_effective_area() @property def shutter_state(self): """The shutter state, allowed values are 0, 1, 2""" return self.__shutter_state @shutter_state.setter def shutter_state(self, x): raise AttributeError('Cannot change shutter state. Create new object with desired shutter state') def _add_optical_path_to_effective_area(self): """Add the effect of the optical path to the effective area""" energies = np.array(self.optics_effective_area.index) # Remove 10% of flux due to spiders factor = np.ones_like(energies) * 0.9 # Apply all of the materials in the optical path to factor for material in self.optical_path: print(material.name) if material.name == pyfoxsi.detector_material: # if it is the detector than we want the absorption factor *= material.absorption(energies) else: factor *= material.transmission(energies) self.effective_area['factor'] = factor self.effective_area['total'] = factor * self.optics_effective_area['total'] self.effective_area['module'] = factor * self.optics_effective_area['module'] class Material(object): """An object which provides the optical properties of a material in x-rays Parameters ---------- material : str A string representing a material (e.g. cdte, be, mylar, si) thickness : `astropy.units.Quantity` The thickness of the material in the optical path. Examples -------- >>> from pyfoxsi.response import Material >>> import astropy.units as u >>> detector = Material('cdte', 500 * u.um) >>> thermal_blankets = Material('mylar', 0.5 * u.mm) """ def __init__(self, material, thickness): self.name = material self.thickness = thickness path = os.path.dirname(pyfoxsi.__file__) for i in np.arange(3): path = os.path.dirname(path) path = os.path.join(path, 'data/') filename = 'mass_attenuation_coefficient.hdf5' data_file = os.path.join(path, filename) h = h5py.File(data_file, 'r') data = h[self.name] self._source_data = data self.density = u.Quantity(self._source_data.attrs['density'], self._source_data.attrs['density unit']) data_energy_kev = np.log10(self._source_data[0,:] * 1000) data_attenuation_coeff = np.log10(self._source_data[1,:]) self._f = interpolate.interp1d(data_energy_kev, data_attenuation_coeff, bounds_error=False, fill_value=0.0) self._mass_attenuation_coefficient_func = lambda x: 10 ** self._f(np.log10(x)) def __repr__(self): """Returns a human-readable representation.""" return '<Material ' + str(self.name) + ' ' + str(self.thickness) + '>' def transmission(self, energy): """Provide the transmission fraction (0 to 1). Parameters ---------- energy : `astropy.units.Quantity` An array of energies in keV """ coefficients = self._mass_attenuation_coefficient_func(energy) * u.cm ** 2 / u.gram transmission = np.exp(- coefficients * self.density * self.thickness) return transmission def absorption(self, energy): """Provides the absorption fraction (0 to 1). Parameters ---------- energy : `astropy.units.Quantity` An array of energies in keV. """ return 1 - self.transmission(energy) def plot(self, axes=None): if axes is None: axes = plt.gca() energies = np.arange(1, 60) axes.plot(energies, self.transmission(energies), label='Transmission') axes.plot(energies, self.absorption(energies), label='Absorption') axes.set_ylim(0, 1.2) axes.legend() axes.set_title(self.name + ' ' + str(self.thickness)) axes.set_xlabel('Energy [keV]')
mit
qtproject/pyside-pyside
doc/inheritance_diagram.py
10
12497
# -*- coding: utf-8 -*- r""" sphinx.ext.inheritance_diagram ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Defines a docutils directive for inserting inheritance diagrams. Provide the directive with one or more classes or modules (separated by whitespace). For modules, all of the classes in that module will be used. Example:: Given the following classes: class A: pass class B(A): pass class C(A): pass class D(B, C): pass class E(B): pass .. inheritance-diagram: D E Produces a graph like the following: A / \ B C / \ / E D The graph is inserted as a PNG+image map into HTML and a PDF in LaTeX. :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. :copyright: Copyright 2010-2011 by the PySide team. :license: BSD, see LICENSE for details. """ import os import re import sys import inspect try: from hashlib import md5 except ImportError: from md5 import md5 from docutils import nodes from docutils.parsers.rst import directives from sphinx.ext.graphviz import render_dot_html, render_dot_latex from sphinx.util.compat import Directive class_sig_re = re.compile(r'''^([\w.]*\.)? # module names (\w+) \s* $ # class/final module name ''', re.VERBOSE) class InheritanceException(Exception): pass class InheritanceGraph(object): """ Given a list of classes, determines the set of classes that they inherit from all the way to the root "object", and then is able to generate a graphviz dot graph from them. """ def __init__(self, class_names, currmodule, show_builtins=False, parts=0): """ *class_names* is a list of child classes to show bases from. If *show_builtins* is True, then Python builtins will be shown in the graph. """ self.class_names = class_names classes = self._import_classes(class_names, currmodule) self.class_info = self._class_info(classes, show_builtins, parts) if not self.class_info: raise InheritanceException('No classes found for ' 'inheritance diagram') def _import_class_or_module(self, name, currmodule): """ Import a class using its fully-qualified *name*. """ try: path, base = class_sig_re.match(name).groups() except (AttributeError, ValueError): raise InheritanceException('Invalid class or module %r specified ' 'for inheritance diagram' % name) fullname = (path or '') + base path = (path and path.rstrip('.') or '') # two possibilities: either it is a module, then import it try: __import__(fullname) todoc = sys.modules[fullname] except ImportError: # else it is a class, then import the module if not path: if currmodule: # try the current module path = currmodule else: raise InheritanceException( 'Could not import class %r specified for ' 'inheritance diagram' % base) try: __import__(path) todoc = getattr(sys.modules[path], base) except (ImportError, AttributeError): raise InheritanceException( 'Could not import class or module %r specified for ' 'inheritance diagram' % (path + '.' + base)) # If a class, just return it if inspect.isclass(todoc): return [todoc] elif inspect.ismodule(todoc): classes = [] for cls in todoc.__dict__.values(): if inspect.isclass(cls) and cls.__module__ == todoc.__name__: classes.append(cls) return classes raise InheritanceException('%r specified for inheritance diagram is ' 'not a class or module' % name) def _import_classes(self, class_names, currmodule): """Import a list of classes.""" classes = [] for name in class_names: classes.extend(self._import_class_or_module(name, currmodule)) return classes def _class_info(self, classes, show_builtins, parts): """Return name and bases for all classes that are ancestors of *classes*. *parts* gives the number of dotted name parts that is removed from the displayed node names. """ all_classes = {} builtins = __builtins__.values() def recurse(cls): if not show_builtins and cls in builtins: return nodename = self.class_name(cls, parts) fullname = self.class_name(cls, 0) baselist = [] all_classes[cls] = (nodename, fullname, baselist) for base in cls.__bases__: if not show_builtins and base in builtins: continue if base.__name__ == "Object" and base.__module__ == "Shiboken": continue baselist.append(self.class_name(base, parts)) if base not in all_classes: recurse(base) for cls in classes: recurse(cls) return all_classes.values() def class_name(self, cls, parts=0): """Given a class object, return a fully-qualified name. This works for things I've tested in matplotlib so far, but may not be completely general. """ module = cls.__module__ if module == '__builtin__': fullname = cls.__name__ else: fullname = '%s.%s' % (module, cls.__name__) if parts == 0: return fullname name_parts = fullname.split('.') return '.'.join(name_parts[-parts:]) def get_all_class_names(self): """ Get all of the class names involved in the graph. """ return [fullname for (_, fullname, _) in self.class_info] # These are the default attrs for graphviz default_graph_attrs = { 'rankdir': 'LR', 'size': '"8.0, 12.0"', } default_node_attrs = { 'shape': 'box', 'fontsize': 10, 'height': 0.25, 'fontname': 'Vera Sans, DejaVu Sans, Liberation Sans, ' 'Arial, Helvetica, sans', 'style': '"setlinewidth(0.5)"', } default_edge_attrs = { 'arrowsize': 0.5, 'style': '"setlinewidth(0.5)"', } def _format_node_attrs(self, attrs): return ','.join(['%s=%s' % x for x in attrs.items()]) def _format_graph_attrs(self, attrs): return ''.join(['%s=%s;\n' % x for x in attrs.items()]) def generate_dot(self, name, urls={}, env=None, graph_attrs={}, node_attrs={}, edge_attrs={}): """ Generate a graphviz dot graph from the classes that were passed in to __init__. *name* is the name of the graph. *urls* is a dictionary mapping class names to HTTP URLs. *graph_attrs*, *node_attrs*, *edge_attrs* are dictionaries containing key/value pairs to pass on as graphviz properties. """ g_attrs = self.default_graph_attrs.copy() n_attrs = self.default_node_attrs.copy() e_attrs = self.default_edge_attrs.copy() g_attrs.update(graph_attrs) n_attrs.update(node_attrs) e_attrs.update(edge_attrs) if env: g_attrs.update(env.config.inheritance_graph_attrs) n_attrs.update(env.config.inheritance_node_attrs) e_attrs.update(env.config.inheritance_edge_attrs) res = [] res.append('digraph %s {\n' % name) res.append(self._format_graph_attrs(g_attrs)) for name, fullname, bases in self.class_info: # Write the node this_node_attrs = n_attrs.copy() url = urls.get(fullname) if url is not None: this_node_attrs['URL'] = '"%s"' % url res.append(' "%s" [%s];\n' % (name, self._format_node_attrs(this_node_attrs))) # Write the edges for base_name in bases: res.append(' "%s" -> "%s" [%s];\n' % (base_name, name, self._format_node_attrs(e_attrs))) res.append('}\n') return ''.join(res) class inheritance_diagram(nodes.General, nodes.Element): """ A docutils node to use as a placeholder for the inheritance diagram. """ pass class InheritanceDiagram(Directive): """ Run when the inheritance_diagram directive is first encountered. """ has_content = False required_arguments = 1 optional_arguments = 0 final_argument_whitespace = True option_spec = { 'parts': directives.nonnegative_int, } def run(self): node = inheritance_diagram() node.document = self.state.document env = self.state.document.settings.env class_names = self.arguments[0].split() class_role = env.get_domain('py').role('class') # Store the original content for use as a hash node['parts'] = self.options.get('parts', 0) node['content'] = ', '.join(class_names) # Create a graph starting with the list of classes try: graph = InheritanceGraph( class_names, env.temp_data.get('py:module'), parts=node['parts']) except InheritanceException, err: return [node.document.reporter.warning(err.args[0], line=self.lineno)] # Create xref nodes for each target of the graph's image map and # add them to the doc tree so that Sphinx can resolve the # references to real URLs later. These nodes will eventually be # removed from the doctree after we're done with them. for name in graph.get_all_class_names(): refnodes, x = class_role( 'class', ':class:`%s`' % name, name, 0, self.state) node.extend(refnodes) # Store the graph object so we can use it to generate the # dot file later node['graph'] = graph return [node] def get_graph_hash(node): return md5(node['content'] + str(node['parts'])).hexdigest()[-10:] def html_visit_inheritance_diagram(self, node): """ Output the graph for HTML. This will insert a PNG with clickable image map. """ graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash # Create a mapping from fully-qualified class names to URLs. urls = {} for child in node: if child.get('refuri') is not None: urls[child['reftitle']] = child.get('refuri') elif child.get('refid') is not None: urls[child['reftitle']] = '#' + child.get('refid') dotcode = graph.generate_dot(name, urls, env=self.builder.env) render_dot_html(self, node, dotcode, [], 'inheritance', 'inheritance', alt='Inheritance diagram of ' + node['content']) raise nodes.SkipNode def latex_visit_inheritance_diagram(self, node): """ Output the graph for LaTeX. This will insert a PDF. """ graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash dotcode = graph.generate_dot(name, env=self.builder.env, graph_attrs={'size': '"6.0,6.0"'}) render_dot_latex(self, node, dotcode, [], 'inheritance') raise nodes.SkipNode def skip(self, node): raise nodes.SkipNode def setup(app): app.setup_extension('sphinx.ext.graphviz') app.add_node( inheritance_diagram, latex=(latex_visit_inheritance_diagram, None), html=(html_visit_inheritance_diagram, None), text=(skip, None), man=(skip, None)) app.add_directive('inheritance-diagram', InheritanceDiagram) app.add_config_value('inheritance_graph_attrs', {}, False), app.add_config_value('inheritance_node_attrs', {}, False), app.add_config_value('inheritance_edge_attrs', {}, False),
lgpl-2.1
sonnyhu/scikit-learn
examples/svm/plot_separating_hyperplane_unbalanced.py
329
1850
""" ================================================= SVM: Separating hyperplane for unbalanced classes ================================================= Find the optimal separating hyperplane using an SVC for classes that are unbalanced. We first find the separating plane with a plain SVC and then plot (dashed) the separating hyperplane with automatically correction for unbalanced classes. .. currentmodule:: sklearn.linear_model .. note:: This example will also work by replacing ``SVC(kernel="linear")`` with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour such as that of a SVC with a linear kernel. For example try instead of the ``SVC``:: clf = SGDClassifier(n_iter=100, alpha=0.01) """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm #from sklearn.linear_model import SGDClassifier # we create 40 separable points rng = np.random.RandomState(0) n_samples_1 = 1000 n_samples_2 = 100 X = np.r_[1.5 * rng.randn(n_samples_1, 2), 0.5 * rng.randn(n_samples_2, 2) + [2, 2]] y = [0] * (n_samples_1) + [1] * (n_samples_2) # fit the model and get the separating hyperplane clf = svm.SVC(kernel='linear', C=1.0) clf.fit(X, y) w = clf.coef_[0] a = -w[0] / w[1] xx = np.linspace(-5, 5) yy = a * xx - clf.intercept_[0] / w[1] # get the separating hyperplane using weighted classes wclf = svm.SVC(kernel='linear', class_weight={1: 10}) wclf.fit(X, y) ww = wclf.coef_[0] wa = -ww[0] / ww[1] wyy = wa * xx - wclf.intercept_[0] / ww[1] # plot separating hyperplanes and samples h0 = plt.plot(xx, yy, 'k-', label='no weights') h1 = plt.plot(xx, wyy, 'k--', label='with weights') plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired) plt.legend() plt.axis('tight') plt.show()
bsd-3-clause
ChanderG/scipy
doc/source/conf.py
40
10928
# -*- coding: utf-8 -*- import sys, os, re # Check Sphinx version import sphinx if sphinx.__version__ < "1.1": raise RuntimeError("Sphinx 1.1 or newer required") needs_sphinx = '1.1' # ----------------------------------------------------------------------------- # General configuration # ----------------------------------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. sys.path.insert(0, os.path.abspath('../sphinxext')) sys.path.insert(0, os.path.abspath(os.path.dirname(__file__))) extensions = ['sphinx.ext.autodoc', 'sphinx.ext.mathjax', 'numpydoc', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', 'sphinx.ext.autosummary', 'scipyoptdoc'] # Determine if the matplotlib has a recent enough version of the # plot_directive. try: from matplotlib.sphinxext import plot_directive except ImportError: use_matplotlib_plot_directive = False else: try: use_matplotlib_plot_directive = (plot_directive.__version__ >= 2) except AttributeError: use_matplotlib_plot_directive = False if use_matplotlib_plot_directive: extensions.append('matplotlib.sphinxext.plot_directive') else: raise RuntimeError("You need a recent enough version of matplotlib") # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General substitutions. project = 'SciPy' copyright = '2008-2014, The Scipy community' # The default replacements for |version| and |release|, also used in various # other places throughout the built documents. import scipy version = re.sub(r'\.dev-.*$', r'.dev', scipy.__version__) release = scipy.__version__ print "Scipy (VERSION %s)" % (version,) # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # The reST default role (used for this markup: `text`) to use for all documents. default_role = "autolink" # List of directories, relative to source directories, that shouldn't be searched # for source files. exclude_dirs = [] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = False # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # ----------------------------------------------------------------------------- # HTML output # ----------------------------------------------------------------------------- themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme') if os.path.isdir(themedir): html_theme = 'scipy' html_theme_path = [themedir] if 'scipyorg' in tags: # Build for the scipy.org website html_theme_options = { "edit_link": True, "sidebar": "right", "scipy_org_logo": True, "rootlinks": [("http://scipy.org/", "Scipy.org"), ("http://docs.scipy.org/", "Docs")] } else: # Default build html_theme_options = { "edit_link": False, "sidebar": "left", "scipy_org_logo": False, "rootlinks": [] } html_logo = '_static/scipyshiny_small.png' html_sidebars = {'index': 'indexsidebar.html'} else: # Build without scipy.org sphinx theme present if 'scipyorg' in tags: raise RuntimeError("Get the scipy-sphinx-theme first, " "via git submodule init & update") else: html_style = 'scipy_fallback.css' html_logo = '_static/scipyshiny_small.png' html_sidebars = {'index': 'indexsidebar.html'} html_title = "%s v%s Reference Guide" % (project, version) html_static_path = ['_static'] html_last_updated_fmt = '%b %d, %Y' html_additional_pages = {} html_use_modindex = True html_copy_source = False html_file_suffix = '.html' htmlhelp_basename = 'scipy' pngmath_use_preview = True pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent'] # ----------------------------------------------------------------------------- # LaTeX output # ----------------------------------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, document class [howto/manual]). _stdauthor = 'Written by the SciPy community' latex_documents = [ ('index', 'scipy-ref.tex', 'SciPy Reference Guide', _stdauthor, 'manual'), # ('user/index', 'scipy-user.tex', 'SciPy User Guide', # _stdauthor, 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. latex_preamble = r''' \usepackage{amsmath} \DeclareUnicodeCharacter{00A0}{\nobreakspace} % In the parameters etc. sections, align uniformly, and adjust label emphasis \usepackage{expdlist} \let\latexdescription=\description \let\endlatexdescription=\enddescription \renewenvironment{description}% {\begin{latexdescription}[\setleftmargin{60pt}\breaklabel\setlabelstyle{\bfseries\itshape}]}% {\end{latexdescription}} % Make Examples/etc section headers smaller and more compact \makeatletter \titleformat{\paragraph}{\normalsize\normalfont\bfseries\itshape}% {\py@NormalColor}{0em}{\py@NormalColor}{\py@NormalColor} \titlespacing*{\paragraph}{0pt}{1ex}{0pt} \makeatother % Save vertical space in parameter lists and elsewhere \makeatletter \renewenvironment{quote}% {\list{}{\topsep=0pt% \parsep \z@ \@plus\p@}% \item\relax}% {\endlist} \makeatother % Fix footer/header \renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}} \renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}} ''' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. latex_use_modindex = False # ----------------------------------------------------------------------------- # Intersphinx configuration # ----------------------------------------------------------------------------- intersphinx_mapping = { 'http://docs.python.org/dev': None, 'http://docs.scipy.org/doc/numpy': None, } # ----------------------------------------------------------------------------- # Numpy extensions # ----------------------------------------------------------------------------- # If we want to do a phantom import from an XML file for all autodocs phantom_import_file = 'dump.xml' # Generate plots for example sections numpydoc_use_plots = True # ----------------------------------------------------------------------------- # Autosummary # ----------------------------------------------------------------------------- if sphinx.__version__ >= "0.7": import glob autosummary_generate = glob.glob("*.rst") # ----------------------------------------------------------------------------- # Coverage checker # ----------------------------------------------------------------------------- coverage_ignore_modules = r""" """.split() coverage_ignore_functions = r""" test($|_) (some|all)true bitwise_not cumproduct pkgload generic\. """.split() coverage_ignore_classes = r""" """.split() coverage_c_path = [] coverage_c_regexes = {} coverage_ignore_c_items = {} #------------------------------------------------------------------------------ # Plot #------------------------------------------------------------------------------ plot_pre_code = """ import numpy as np np.random.seed(123) """ plot_include_source = True plot_formats = [('png', 96), 'pdf'] plot_html_show_formats = False import math phi = (math.sqrt(5) + 1)/2 font_size = 13*72/96.0 # 13 px plot_rcparams = { 'font.size': font_size, 'axes.titlesize': font_size, 'axes.labelsize': font_size, 'xtick.labelsize': font_size, 'ytick.labelsize': font_size, 'legend.fontsize': font_size, 'figure.figsize': (3*phi, 3), 'figure.subplot.bottom': 0.2, 'figure.subplot.left': 0.2, 'figure.subplot.right': 0.9, 'figure.subplot.top': 0.85, 'figure.subplot.wspace': 0.4, 'text.usetex': False, } if not use_matplotlib_plot_directive: import matplotlib matplotlib.rcParams.update(plot_rcparams) # ----------------------------------------------------------------------------- # Source code links # ----------------------------------------------------------------------------- import inspect from os.path import relpath, dirname for name in ['sphinx.ext.linkcode', 'linkcode', 'numpydoc.linkcode']: try: __import__(name) extensions.append(name) break except ImportError: pass else: print "NOTE: linkcode extension not found -- no links to source generated" def linkcode_resolve(domain, info): """ Determine the URL corresponding to Python object """ if domain != 'py': return None modname = info['module'] fullname = info['fullname'] submod = sys.modules.get(modname) if submod is None: return None obj = submod for part in fullname.split('.'): try: obj = getattr(obj, part) except: return None try: fn = inspect.getsourcefile(obj) except: fn = None if not fn: try: fn = inspect.getsourcefile(sys.modules[obj.__module__]) except: fn = None if not fn: return None try: source, lineno = inspect.getsourcelines(obj) except: lineno = None if lineno: linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1) else: linespec = "" fn = relpath(fn, start=dirname(scipy.__file__)) if 'dev' in scipy.__version__: return "http://github.com/scipy/scipy/blob/master/scipy/%s%s" % ( fn, linespec) else: return "http://github.com/scipy/scipy/blob/v%s/scipy/%s%s" % ( scipy.__version__, fn, linespec)
bsd-3-clause
rob-nn/open_gait_analytics
api/oga_api/ml/basic_cmac.py
2
6480
import oga_api.ml.cmac as cmac import numpy as np import matplotlib.pyplot as plt import oga_api.physics.cinematic as c class BasicCMAC(cmac.CMAC): def __init__(self, trajectories, pos_angles, time_frame, markers, angles, activations, output, num_iterations): self._num_iterations = num_iterations confs = [] conf = None data_set = None for marker in markers: if 'xCheckedForInput' in marker and marker['xCheckedForInput'] and 'qx'in marker: data = c.get_vectorial_velocities(trajectories[marker['index'], 0, :], time_frame) conf = cmac.SignalConfiguration(data.min(), data.max(), marker['qx'], marker['description']) if conf != None: confs.append(conf) if data_set == None: data_set = np.reshape(data, (len(data), 1)) else: data_set = np.concatenate((data_set, np.reshape(data, (len(data), 1))), axis=1) if 'yCheckedForInput' in marker and marker['yCheckedForInput'] and 'qy'in marker: data = c.get_vectorial_velocities(trajectories[marker['index'], 1, :], time_frame) conf = cmac.SignalConfiguration(data.min(), data.max(), marker['qy'], marker['description']) if conf != None: confs.append(conf) if data_set == None: data_set = np.reshape(data, (len(data), 1)) else: data_set = np.concatenate((data_set, np.reshape(data, (len(data), 1))), axis=1) if 'zCheckedForInput' in marker and marker['zCheckedForInput'] and 'qz'in marker: data = c.get_vectorial_velocities(trajectories[marker['index'], 2, :], time_frame) conf = cmac.SignalConfiguration(data.min(), data.max(), marker['qz'], marker['description']) if conf != None: confs.append(conf) if data_set == None: data_set = np.reshape(data, (len(data), 1)) else: data_set = np.concatenate((data_set, np.reshape(data, (len(data), 1))), axis=1) super(BasicCMAC, self).__init__(confs, activations) if data_set == None: raise ParameterInvalid('No data do process') if len(confs) == 0: raise ParameterInvalid('No input valid input sginal') self._data_set = data_set self._get_output_data(output, trajectories, pos_angles, time_frame) self._generate_data_for_training_and_test() @property def data_in(self): return self._data_in @property def data_in_test(self): return self._data_in_test @property def data_set(self): return self._data_set @property def out_data(self): return self._out_data @property def data_out(self): return self._data_out @property def data_out_test(self): return self._data_out_test def _get_output_data(self, output, trajectories, pos_angles, time_frame): if output['type'] == 0: #Marker component = 0 if output['component'] =='x': component = 0 elif output['component'] == 'y': component = 1 else: component == 2 # component == z self._out_data = trajectories[output['_id'], component, :] else: #1 Angle #import pdb; pdb.set_trace() angle = pos_angles[int(output['_id'])] origin = trajectories[int(angle['origin']), 0:3, :] component_a = trajectories[int(angle['component_a']), 0:3, :] component_b = trajectories[int(angle['component_b']), 0:3, :] if output['component'] == 'a': # angle self._out_data = c.get_angles(origin.T, component_a.T, component_b.T) else: # v - angular velocities self._out_data = c.calc_angular_velocities(origin.T, component_a.T, component_b.T, time_frame) #import pdb; pdb.set_trace() def _generate_data_for_training_and_test(self): data_in = None data_in_test = None data_out = np.array([]) data_out_test = np.array([]) for i in np.arange(self._data_set.shape[0]): if i % 2 == 0: if data_in == None: data_in = np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])) else: data_in = np.concatenate((data_in, np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])))) data_out = np.append(data_out, np.array([self._out_data[i]])) else: if data_in_test == None: data_in_test = np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])) else: data_in_test = np.concatenate((data_in_test, np.reshape(self._data_set[i,:], (1, self._data_set.shape[1])))) data_out_test = np.append(data_out_test, np.array([self._out_data[i]])) self._data_in = data_in self._data_in_test = data_in_test self._data_out = data_out self._data_out_test = data_out_test def train(self): if self._num_iterations < 1: raise ParameterInvalid('Number of iterations must be greater than 1') t = cmac.Train(self, self._data_in, self._data_out, 1, self._num_iterations) t.train() self.t = t def fire_all(self, inputs): result = [] for data in inputs: result.append(self.fire(data)) return np.array(result) def fire_test(self): return self.fire_all(self._data_in_test) """ def plot_aproximation(self, time = None): real = self._data_test aproximations = self.fire_test) if time == None: t = arange(0, real.shape[0]) * (1./315.) else: t = time plt.figure() plt.plot(self.t.E) plt.figure() plt.hold(True) p1 = plt.plot(t.tolist(), real, 'b', linewidth=4) p2 = plt.plot(t.tolist(), aproximation, 'r', linewidth=2) plt.xlabel('t (sec.)', fontsize=15) plt.ylabel('Angular Velocities (rads/sec.)', fontsize=15) plt.legend(['Human Knee', 'CMAC Prediction']) plt.show() """ class ParameterInvalid(BaseException): def __init__(self, description): self._description = description @property def description(self): return self._description
mit
Knewton/lentil
lentil/viztools.py
2
9752
""" Module for visualizing skill embeddings @author Siddharth Reddy <sgr45@cornell.edu> """ import logging import matplotlib from matplotlib import pyplot as plt import numpy as np from . import models _logger = logging.getLogger(__name__) def plot_embedding( model, timestep=-1, show_students=True, show_assessments=True, show_lessons=None, show_prereqs=None, show_concepts=None, show_student_ids=False, show_assessment_ids=False, show_lesson_ids=False, show_concept_ids=False, id_padding_x=0.01, id_padding_y=0.01, alpha=0.5, size=20, title='', show_legend=True, force_invariant_axis_limits=True, axis_limit_padding=0.1, show_pass_rates=False, x_axis_limits=None, y_axis_limits=None): """ Plot students, assessments, lessons, and prereqs in a two-dimensional skill embedding Students, assessments, prereqs = points Lessons = vectors See nb/toy_examples.ipynb for example invocations :param EmbeddingModel model: A skill embedding model :param int timestep: A timestep. By default, timestep=-1 => latest snapshot :param float id_padding_x: Padding between object and id along x-axis :param float id_padding_y: Padding between object and id along y-axis :param float alpha: Alpha level for scatterplot points' color :param int size: Size of scatterplot points :param str|None title: Title of plot :param bool show_legend: True => show legend in upper left corner False => do not show legend :param bool force_invariant_axis_limits: True => plot will have same axes limits regardless of timestep, False => plot may have different axes limits depending on timestep :param float axis_limit_padding: Padding for axis limits (to prevent points from being stuck at the edges of the plot) :param bool show_pass_rates: True => color assessments by pass rate, False => don't color assessments :param list[int,int]|None x_axis_limits: [x_min, x_max] :param list[int,int]|None y_axis_limits: [y_min, y_max] """ if model.embedding_dimension != 2: raise ValueError('Invalid embedding dimension!') if timestep<-1 or timestep>=model.history.duration(): raise ValueError('Invalid timestep!') if size<=0: raise ValueError('Invalid scatterplot point size!') if axis_limit_padding<0: raise ValueError('Invalid axis limit padding!') if show_lessons is None: show_lessons = model.using_lessons if show_prereqs is None: show_prereqs = model.using_prereqs if show_lessons and not model.using_lessons: raise ValueError( 'Cannot show lessons because model does not use lessons!') if show_prereqs and not model.using_prereqs: raise ValueError( 'Cannot show prereqs because model does not use prereqs!') if show_concepts and not model.using_graph_prior: raise ValueError( 'Cannot show concepts because model does not use a graph prior!') if show_student_ids and not show_students: raise ValueError('Cannot show student_ids without students!') if show_assessment_ids and not show_assessments: raise ValueError('Cannot show assessment_ids without assessments!') if show_lesson_ids and not show_lessons and not show_prereqs: raise ValueError('Cannot show lesson_ids without lessons and/or prereqs!') if show_pass_rates and not show_assessments: raise ValueError('Cannot show pass rates without assessments!') if show_concept_ids and not show_concepts: raise ValueError('Cannot show concept_ids without concepts!') if show_pass_rates and model.history.num_students() > 1: _logger.warning('Showing pass rates for more than one student!') _, ax = plt.subplots() if show_students: student_embeddings_x = model.student_embeddings[:, 0, timestep] student_embeddings_y = model.student_embeddings[:, 1, timestep] ax.scatter( student_embeddings_x, student_embeddings_y, alpha=alpha, marker='o', s=size, label='student') if show_student_ids: for student_id in model.history.iter_students(): student_idx = model.history.idx_of_student_id(student_id) student_x = student_embeddings_x[student_idx] student_y = student_embeddings_y[student_idx] student_id_x = student_x + id_padding_x student_id_y = student_y + id_padding_y ax.annotate(student_id, xy=( student_x, student_y), xytext=( student_id_x, student_id_y)) if show_assessments: assessment_embeddings_x = model.assessment_embeddings[:, 0] assessment_embeddings_y = model.assessment_embeddings[:, 1] if show_pass_rates: num_assessments = model.history.num_assessments() pass_rates = [model.history.assessment_pass_rate( model.history.id_of_assessment_idx( i), timestep if timestep!=-1 else None) for i in xrange( num_assessments)] ax.scatter( assessment_embeddings_x, assessment_embeddings_y, c=pass_rates, alpha=alpha, marker='s', s=size, label='assessment', cmap=matplotlib.cm.cool) else: ax.scatter( assessment_embeddings_x, assessment_embeddings_y, alpha=alpha, marker='s', s=size, label='assessment') if show_assessment_ids: for assessment_id in model.history.iter_assessments(): assessment_idx = model.history.idx_of_assessment_id(assessment_id) assessment_x = assessment_embeddings_x[assessment_idx] assessment_y = assessment_embeddings_y[assessment_idx] assessment_id_x = assessment_x + id_padding_x assessment_id_y = assessment_y + id_padding_y ax.annotate(assessment_id, xy=( assessment_x, assessment_y), xytext=( assessment_id_x, assessment_id_y)) if show_concepts: concept_embeddings_x = model.concept_embeddings[:, 0] concept_embeddings_y = model.concept_embeddings[:, 1] ax.scatter( concept_embeddings_x, concept_embeddings_y, alpha=alpha, marker='^', s=size, label='concept') if show_concept_ids: for concept_id, concept_idx in model.graph.idx_of_concept_id.iteritems(): concept_x = concept_embeddings_x[concept_idx] concept_y = concept_embeddings_y[concept_idx] concept_id_x = concept_x + id_padding_x concept_id_y = concept_y + id_padding_y ax.annotate(concept_id, xy=( concept_x, concept_y), xytext=( concept_id_x, concept_id_y)) if show_lessons: if model.using_prereqs and show_prereqs: prereq_embeddings_x = model.prereq_embeddings[:, 0] prereq_embeddings_y = model.prereq_embeddings[:, 1] else: prereq_embeddings_x = prereq_embeddings_y = [0] * ( model.history.num_lessons()) lesson_embeddings_x = model.lesson_embeddings[:, 0] lesson_embeddings_y = model.lesson_embeddings[:, 1] ax.quiver( prereq_embeddings_x, prereq_embeddings_y, lesson_embeddings_x, lesson_embeddings_y, pivot='tail') if show_lesson_ids: for lesson_id in model.history.iter_lessons(): lesson_idx = model.history.idx_of_lesson_id(lesson_id) lesson_x = prereq_embeddings_x[lesson_idx] if model.using_prereqs else 0 lesson_y = prereq_embeddings_y[lesson_idx] if model.using_prereqs else 0 lesson_id_x = lesson_x + id_padding_x lesson_id_y = lesson_y + id_padding_y ax.annotate(lesson_id, xy=( lesson_x, lesson_y), xytext=( lesson_id_x, lesson_id_y)) if show_legend: ax.legend(loc='upper left') if force_invariant_axis_limits: x = [] y = [] if show_students: x += np.unique(model.student_embeddings[:, 0, :]).tolist() y += np.unique(model.student_embeddings[:, 1, :]).tolist() if show_assessments: x += np.unique(model.assessment_embeddings[:, 0]).tolist() y += np.unique(model.assessment_embeddings[:, 1]).tolist() if show_lessons: x += np.unique(model.lesson_embeddings[:, 0] + ( model.prereq_embeddings[:, 0] if show_prereqs else 0)).tolist() y += np.unique(model.lesson_embeddings[:, 1] + ( model.prereq_embeddings[:, 1] if show_prereqs else 0)).tolist() if show_concepts: x += np.unique(model.concept_embeddings[:, 0]).tolist() y += np.unique(model.concept_embeddings[:, 1]).tolist() ax.set_xlim([min(x)-axis_limit_padding, max(x)+axis_limit_padding]) ax.set_ylim([min(y)-axis_limit_padding, max(y)+axis_limit_padding]) if x_axis_limits is not None: ax.set_xlim(x_axis_limits) if y_axis_limits is not None: ax.set_ylim(y_axis_limits) if title is None: title = 'Latent Skill Space' ax.set_title(title) ax.set_xlabel('Skill 1') ax.set_ylabel('Skill 2') plt.show()
apache-2.0
jbloom/epitopefinder
scripts/epitopefinder_plotdistributioncomparison.py
1
3447
#!python """Script for plotting distributions of epitopes per site for two sets of sites. Uses matplotlib. Designed to analyze output of epitopefinder_getepitopes.py. Written by Jesse Bloom.""" import os import sys import random import epitopefinder.io import epitopefinder.plot def main(): """Main body of script.""" random.seed(1) # seed random number generator in case P values are being computed if not epitopefinder.plot.PylabAvailable(): raise ImportError("Cannot import matplotlib / pylab, which are required by this script.") # output is written to out, currently set to standard out out = sys.stdout out.write("Beginning execution of epitopefinder_plotdistributioncomparison.py\n") # read input file and parse arguments args = sys.argv[1 : ] if len(args) != 1: raise IOError("Script must be called with exactly one argument specifying the input file") infilename = sys.argv[1] if not os.path.isfile(infilename): raise IOError("Failed to find infile %s" % infilename) d = epitopefinder.io.ParseInfile(open(infilename)) out.write("\nRead input arguments from %s\n" % infilename) out.write('Read the following key / value pairs:\n') for (key, value) in d.iteritems(): out.write("%s %s\n" % (key, value)) plotfile = epitopefinder.io.ParseStringValue(d, 'plotfile').strip() epitopesbysite1_list = [] epitopesbysite2_list = [] for (xlist, xf) in [(epitopesbysite1_list, 'epitopesfile1'), (epitopesbysite2_list, 'epitopesfile2')]: epitopesfile = epitopefinder.io.ParseFileList(d, xf) if len(epitopesfile) != 1: raise ValueError("%s specifies more than one file" % xf) epitopesfile = epitopesfile[0] for line in open(epitopesfile).readlines()[1 : ]: if not (line.isspace() or line[0] == '#'): (site, n) = line.split(',') (site, n) = (int(site), int(n)) xlist.append(n) if not xlist: raise ValueError("%s failed to specify information for any sites" % xf) set1name = epitopefinder.io.ParseStringValue(d, 'set1name') set2name = epitopefinder.io.ParseStringValue(d, 'set2name') title = epitopefinder.io.ParseStringValue(d, 'title').strip() if title.upper() in ['NONE', 'FALSE']: title = None pvalue = epitopefinder.io.ParseStringValue(d, 'pvalue') if pvalue.upper() in ['NONE', 'FALSE']: pvalue = None pvaluewithreplacement = None else: pvalue = int(pvalue) pvaluewithreplacement = epitopefinder.io.ParseBoolValue(d, 'pvaluewithreplacement') if pvalue < 1: raise ValueError("pvalue must be >= 1") if len(epitopesbysite2_list) >= len(epitopesbysite1_list): raise ValueError("You cannot use pvalue since epitopesbysite2_list is not a subset of epitopesbysite1_list -- it does not contain fewer sites with specified epitope counts.") ymax = None if 'ymax' in d: ymax = epitopefinder.io.ParseFloatValue(d, 'ymax') out.write('\nNow creating the plot file %s\n' % plotfile) epitopefinder.plot.PlotDistributionComparison(epitopesbysite1_list, epitopesbysite2_list, set1name, set2name, plotfile, 'number of epitopes', 'fraction of sites', title, pvalue, pvaluewithreplacement, ymax=ymax) out.write("\nScript is complete.\n") if __name__ == '__main__': main() # run the script
gpl-3.0
balazssimon/ml-playground
udemy/lazyprogrammer/deep-reinforcement-learning-python/mountaincar/q_learning.py
1
6102
# This takes 4min 30s to run in Python 2.7 # But only 1min 30s to run in Python 3.5! # # Note: gym changed from version 0.7.3 to 0.8.0 # MountainCar episode length is capped at 200 in later versions. # This means your agent can't learn as much in the earlier episodes # since they are no longer as long. import gym import os import sys import numpy as np import matplotlib import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D from gym import wrappers from datetime import datetime from sklearn.pipeline import FeatureUnion from sklearn.preprocessing import StandardScaler from sklearn.kernel_approximation import RBFSampler from sklearn.linear_model import SGDRegressor # SGDRegressor defaults: # loss='squared_loss', penalty='l2', alpha=0.0001, # l1_ratio=0.15, fit_intercept=True, n_iter=5, shuffle=True, # verbose=0, epsilon=0.1, random_state=None, learning_rate='invscaling', # eta0=0.01, power_t=0.25, warm_start=False, average=False # Inspired by https://github.com/dennybritz/reinforcement-learning class FeatureTransformer: def __init__(self, env, n_components=500): observation_examples = np.array([env.observation_space.sample() for x in range(10000)]) scaler = StandardScaler() scaler.fit(observation_examples) # Used to converte a state to a featurizes represenation. # We use RBF kernels with different variances to cover different parts of the space featurizer = FeatureUnion([ ("rbf1", RBFSampler(gamma=5.0, n_components=n_components)), ("rbf2", RBFSampler(gamma=2.0, n_components=n_components)), ("rbf3", RBFSampler(gamma=1.0, n_components=n_components)), ("rbf4", RBFSampler(gamma=0.5, n_components=n_components)) ]) example_features = featurizer.fit_transform(scaler.transform(observation_examples)) self.dimensions = example_features.shape[1] self.scaler = scaler self.featurizer = featurizer def transform(self, observations): # print "observations:", observations scaled = self.scaler.transform(observations) # assert(len(scaled.shape) == 2) return self.featurizer.transform(scaled) # Holds one SGDRegressor for each action class Model: def __init__(self, env, feature_transformer, learning_rate): self.env = env self.models = [] self.feature_transformer = feature_transformer for i in range(env.action_space.n): model = SGDRegressor(learning_rate=learning_rate) model.partial_fit(feature_transformer.transform( [env.reset()] ), [0]) self.models.append(model) def predict(self, s): X = self.feature_transformer.transform([s]) result = np.stack([m.predict(X) for m in self.models]).T assert(len(result.shape) == 2) return result def update(self, s, a, G): X = self.feature_transformer.transform([s]) assert(len(X.shape) == 2) self.models[a].partial_fit(X, [G]) def sample_action(self, s, eps): # eps = 0 # Technically, we don't need to do epsilon-greedy # because SGDRegressor predicts 0 for all states # until they are updated. This works as the # "Optimistic Initial Values" method, since all # the rewards for Mountain Car are -1. if np.random.random() < eps: return self.env.action_space.sample() else: return np.argmax(self.predict(s)) # returns a list of states_and_rewards, and the total reward def play_one(model, env, eps, gamma): observation = env.reset() done = False totalreward = 0 iters = 0 while not done and iters < 10000: action = model.sample_action(observation, eps) prev_observation = observation observation, reward, done, info = env.step(action) # update the model next = model.predict(observation) # assert(next.shape == (1, env.action_space.n)) G = reward + gamma*np.max(next[0]) model.update(prev_observation, action, G) totalreward += reward iters += 1 return totalreward def plot_cost_to_go(env, estimator, num_tiles=20): x = np.linspace(env.observation_space.low[0], env.observation_space.high[0], num=num_tiles) y = np.linspace(env.observation_space.low[1], env.observation_space.high[1], num=num_tiles) X, Y = np.meshgrid(x, y) # both X and Y will be of shape (num_tiles, num_tiles) Z = np.apply_along_axis(lambda _: -np.max(estimator.predict(_)), 2, np.dstack([X, Y])) # Z will also be of shape (num_tiles, num_tiles) fig = plt.figure(figsize=(10, 5)) ax = fig.add_subplot(111, projection='3d') surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=matplotlib.cm.coolwarm, vmin=-1.0, vmax=1.0) ax.set_xlabel('Position') ax.set_ylabel('Velocity') ax.set_zlabel('Cost-To-Go == -V(s)') ax.set_title("Cost-To-Go Function") fig.colorbar(surf) plt.show() def plot_running_avg(totalrewards): N = len(totalrewards) running_avg = np.empty(N) for t in range(N): running_avg[t] = totalrewards[max(0, t-100):(t+1)].mean() plt.plot(running_avg) plt.title("Running Average") plt.show() def main(show_plots=True): env = gym.make('MountainCar-v0') ft = FeatureTransformer(env) model = Model(env, ft, "constant") gamma = 0.99 if 'monitor' in sys.argv: filename = os.path.basename(__file__).split('.')[0] monitor_dir = './' + filename + '_' + str(datetime.now()) env = wrappers.Monitor(env, monitor_dir) N = 300 totalrewards = np.empty(N) for n in range(N): # eps = 1.0/(0.1*n+1) eps = 0.1*(0.97**n) if n == 199: print("eps:", eps) # eps = 1.0/np.sqrt(n+1) totalreward = play_one(model, env, eps, gamma) totalrewards[n] = totalreward if (n + 1) % 100 == 0: print("episode:", n, "total reward:", totalreward) print("avg reward for last 100 episodes:", totalrewards[-100:].mean()) print("total steps:", -totalrewards.sum()) if show_plots: plt.plot(totalrewards) plt.title("Rewards") plt.show() plot_running_avg(totalrewards) # plot the optimal state-value function plot_cost_to_go(env, model) if __name__ == '__main__': # for i in range(10): # main(show_plots=False) main()
apache-2.0
CINPLA/exana
exana/tracking/fields.py
1
32391
import numpy as np def spatial_rate_map(x, y, t, spike_train, binsize=0.01, box_xlen=1, box_ylen=1, mask_unvisited=True, convolve=True, return_bins=False, smoothing=0.02): """Divide a 2D space in bins of size binsize**2, count the number of spikes in each bin and divide by the time spent in respective bins. The map can then be convolved with a gaussian kernel of size csize determined by the smoothing factor, binsize and box_xlen. Parameters ---------- spike_train : neo.SpikeTrain x : float 1d vector of x positions y : float 1d vector of y positions t : float 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : quantities scalar in m side length of quadratic box mask_unvisited: bool mask bins which has not been visited by nans convolve : bool convolve the rate map with a 2D Gaussian kernel Returns ------- out : rate map if return_bins = True out : rate map, xbins, ybins """ if not all([len(var) == len(var2) for var in [x,y,t] for var2 in [x,y,t]]): raise ValueError('x, y, t must have same number of elements') if box_xlen < x.max() or box_ylen < y.max(): raise ValueError('box length must be larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals) remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals) if remainderx != 0 or remaindery != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') # interpolate one extra timepoint t_ = np.append(t, t[-1] + np.median(np.diff(t))) spikes_in_bin, _ = np.histogram(spike_train, t_) time_in_bin = np.diff(t_) xbins = np.arange(0, box_xlen + binsize, binsize) ybins = np.arange(0, box_ylen + binsize, binsize) ix = np.digitize(x, xbins, right=True) iy = np.digitize(y, ybins, right=True) spike_pos = np.zeros((xbins.size, ybins.size)) time_pos = np.zeros((xbins.size, ybins.size)) for n in range(len(x)): spike_pos[ix[n], iy[n]] += spikes_in_bin[n] time_pos[ix[n], iy[n]] += time_in_bin[n] # correct for shifting of map spike_pos = spike_pos[1:, 1:] time_pos = time_pos[1:, 1:] with np.errstate(divide='ignore', invalid='ignore'): rate = np.divide(spike_pos, time_pos) if convolve: rate[np.isnan(rate)] = 0. # for convolution from astropy.convolution import Gaussian2DKernel, convolve_fft csize = (box_xlen / binsize) * smoothing kernel = Gaussian2DKernel(csize) rate = convolve_fft(rate, kernel) # TODO edge correction if mask_unvisited: was_in_bin = np.asarray(time_pos, dtype=bool) rate[np.invert(was_in_bin)] = np.nan if return_bins: return rate.T, xbins, ybins else: return rate.T def gridness(rate_map, box_xlen, box_ylen, return_acorr=False, step_size=0.1, method='iter', return_masked_acorr=False): '''Calculates gridness of a rate map. Calculates the normalized autocorrelation (A) of a rate map B where A is given as A = 1/n\Sum_{x,y}(B - \bar{B})^{2}/\sigma_{B}^{2}. Further, the Pearsson's product-moment correlation coefficients is calculated between A and A_{rot} rotated 30 and 60 degrees. Finally the gridness is calculated as the difference between the minimum of coefficients at 60 degrees and the maximum of coefficients at 30 degrees i.e. gridness = min(r60) - max(r30). If the method 'iter' is chosen: In order to focus the analysis on symmetry of A the the central and the outer part of the gridness is maximized by increasingly mask A at steps of ``step_size``. If the method 'puncture' is chosen: This is the standard way of calculating gridness, by masking the central autocorrelation bump, in addition to rounding the map. See examples. Parameters ---------- rate_map : numpy.ndarray box_xlen : float side length of quadratic box step_size : float step size in masking, only applies to the method "iter" return_acorr : bool return autocorrelation map or not return_masked_acorr : bool return masked autocorrelation map or not method : 'iter' or 'puncture' Returns ------- out : gridness, (autocorrelation map, masked autocorrelation map) Examples -------- >>> from exana.tracking.tools import make_test_grid_rate_map >>> import matplotlib.pyplot as plt >>> rate_map, pos = make_test_grid_rate_map() >>> iter_score = gridness(rate_map, box_xlen=1, box_ylen=1, method='iter') >>> print('%.2f' % iter_score) 1.39 >>> puncture_score = gridness(rate_map, box_xlen=1, box_ylen=1, method='puncture') >>> print('%.2f' % puncture_score) 0.96 .. plot:: import matplotlib.pyplot as plt import numpy as np from exana.tracking.tools import make_test_grid_rate_map from exana.tracking import gridness import matplotlib.pyplot as plt rate_map, _ = make_test_grid_rate_map() fig, axs = plt.subplots(2, 2) g1, acorr, m_acorr1 = gridness(rate_map, box_xlen=1, box_ylen=1, return_acorr=True, return_masked_acorr=True, method='iter') g2, m_acorr2 = gridness(rate_map, box_xlen=1, box_ylen=1, return_masked_acorr=True, method='puncture') mats = [rate_map, m_acorr1, acorr, m_acorr2] titles = ['Rate map', 'Masked acorr "iter", gridness = %.2f' % g1, 'Autocorrelation', 'Masked acorr "puncture", gridness = %.2f' % g2] for ax, mat, title in zip(axs.ravel(), mats, titles): ax.imshow(mat) ax.set_title(title) plt.tight_layout() plt.show() ''' import numpy.ma as ma from exana.misc.tools import fftcorrelate2d from exana.tracking.tools import gaussian2D from scipy.optimize import curve_fit tmp_map = rate_map.copy() tmp_map[~np.isfinite(tmp_map)] = 0 acorr = fftcorrelate2d(tmp_map, tmp_map, mode='full', normalize=True) rows, cols = acorr.shape b_x = np.linspace(- box_xlen / 2., box_xlen / 2., rows) b_y = np.linspace(- box_ylen / 2., box_ylen / 2., cols) B_x, B_y = np.meshgrid(b_x, b_y) if method == 'iter': if return_masked_acorr: m_acorrs = [] gridscores = [] for outer in np.arange(box_xlen / 4, box_xlen / 2, step_size): m_acorr = ma.masked_array( acorr, mask=np.sqrt(B_x**2 + B_y**2) > outer) for inner in np.arange(0, box_xlen / 4, step_size): m_acorr = ma.masked_array( m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < inner) r30, r60 = rotate_corr(m_acorr) gridscores.append(np.min(r60) - np.max(r30)) if return_masked_acorr: m_acorrs.append(m_acorr) gridscore = max(gridscores) if return_masked_acorr: m_acorr = m_acorrs[gridscores.index(gridscore)] elif method == 'puncture': # round picture edges _gaussian = lambda pos, a, s: gaussian2D(a, pos[0], pos[1], 0, 0, s).ravel() p0 = (max(acorr.ravel()), min(box_xlen, box_ylen) / 100) popt, pcov = curve_fit(_gaussian, (B_x, B_y), acorr.ravel(), p0=p0) m_acorr = ma.masked_array( acorr, mask=np.sqrt(B_x**2 + B_y**2) > min(box_xlen, box_ylen) / 2) m_acorr = ma.masked_array( m_acorr, mask=np.sqrt(B_x**2 + B_y**2) < popt[1]) r30, r60 = rotate_corr(m_acorr) gridscore = float(np.min(r60) - np.max(r30)) if return_acorr and return_masked_acorr: return gridscore, acorr, m_acorr if return_masked_acorr: return gridscore, m_acorr if return_acorr: return gridscore, acorr # acorrs[grids.index(max(grids))] else: return gridscore def rotate_corr(acorr): from exana.misc.tools import masked_corrcoef2d from scipy.ndimage.interpolation import rotate angles = range(30, 180+30, 30) corr = [] # Rotate and compute correlation coefficient for angle in angles: rot_acorr = rotate(acorr, angle, reshape=False) corr.append(masked_corrcoef2d(rot_acorr, acorr)[0, 1]) r60 = corr[1::2] r30 = corr[::2] return r30, r60 def occupancy_map(x, y, t, binsize=0.01, box_xlen=1, box_ylen=1, mask_unvisited=True, convolve=True, return_bins=False, smoothing=0.02): '''Divide a 2D space in bins of size binsize**2, count the time spent in each bin. The map can be convolved with a gaussian kernel of size csize determined by the smoothing factor, binsize and box_xlen. Parameters ---------- x : array 1d vector of x positions y : array 1d vector of y positions t : array 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : float side length of quadratic box mask_unvisited: bool mask bins which has not been visited by nans convolve : bool convolve the rate map with a 2D Gaussian kernel Returns ------- occupancy_map : numpy.ndarray if return_bins = True out : occupancy_map, xbins, ybins ''' if not all([len(var) == len(var2) for var in [ x, y, t] for var2 in [x, y, t]]): raise ValueError('x, y, t must have same number of elements') if box_xlen < x.max() or box_ylen < y.max(): raise ValueError( 'box length must be larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals) remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals) if remainderx != 0 or remaindery != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') # interpolate one extra timepoint t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) time_in_bin = np.diff(t_) xbins = np.arange(0, box_xlen + binsize, binsize) ybins = np.arange(0, box_ylen + binsize, binsize) ix = np.digitize(x, xbins, right=True) iy = np.digitize(y, ybins, right=True) time_pos = np.zeros((xbins.size, ybins.size)) for n in range(len(x) - 1): time_pos[ix[n], iy[n]] += time_in_bin[n] # correct for shifting of map since digitize returns values at right edges time_pos = time_pos[1:, 1:] if convolve: rate[np.isnan(rate)] = 0. # for convolution from astropy.convolution import Gaussian2DKernel, convolve_fft csize = (box_xlen / binsize) * smoothing kernel = Gaussian2DKernel(csize) rate = convolve_fft(rate, kernel) # TODO edge correction if mask_unvisited: was_in_bin = np.asarray(time_pos, dtype=bool) rate[np.invert(was_in_bin)] = np.nan if return_bins: return rate.T, xbins, ybins else: return rate.T def nvisits_map(x, y, t, binsize=0.01, box_xlen=1, box_ylen=1, return_bins=False): '''Divide a 2D space in bins of size binsize**2, count the number of visits in each bin. The map can be convolved with a gaussian kernel of size determined by the smoothing factor, binsize and box_xlen. Parameters ---------- x : array 1d vector of x positions y : array 1d vector of y positions t : array 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : float side length of quadratic box Returns ------- nvisits_map : numpy.ndarray if return_bins = True out : nvisits_map, xbins, ybins ''' if not all([len(var) == len(var2) for var in [ x, y, t] for var2 in [x, y, t]]): raise ValueError('x, y, t must have same number of elements') if box_xlen < x.max() or box_ylen < y.max(): raise ValueError( 'box length must be larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(box_xlen)*decimals) % dec(float(binsize)*decimals) remaindery = dec(float(box_ylen)*decimals) % dec(float(binsize)*decimals) if remainderx != 0 or remaindery != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') xbins = np.arange(0, box_xlen + binsize, binsize) ybins = np.arange(0, box_ylen + binsize, binsize) ix = np.digitize(x, xbins, right=True) iy = np.digitize(y, ybins, right=True) nvisits_map = np.zeros((xbins.size, ybins.size)) for n in range(len(x)): if n == 0: nvisits_map[ix[n], iy[n]] = 1 else: if ix[n-1] != ix[n] or iy[n-1] != iy[n]: nvisits_map[ix[n], iy[n]] += 1 # correct for shifting of map since digitize returns values at right edges nvisits_map = nvisits_map[1:, 1:] if return_bins: return nvisits_map.T, xbins, ybins else: return nvisits_map.T def spatial_rate_map_1d(x, t, spike_train, binsize=0.01, track_len=1, mask_unvisited=True, convolve=True, return_bins=False, smoothing=0.02): """Take x coordinates of linear track data, divide in bins of binsize, count the number of spikes in each bin and divide by the time spent in respective bins. The map can then be convolved with a gaussian kernel of size csize determined by the smoothing factor, binsize and box_xlen. Parameters ---------- spike_train : array x : array 1d vector of x positions t : array 1d vector of times at x, y positions binsize : float spatial binsize box_xlen : float side length of quadratic box mask_unvisited: bool mask bins which has not been visited by nans convolve : bool convolve the rate map with a 2D Gaussian kernel Returns ------- out : rate map if return_bins = True out : rate map, xbins """ if not all([len(var) == len(var2) for var in [x, t] for var2 in [x, t]]): raise ValueError('x, t must have same number of elements') if track_len < x.max(): raise ValueError('track length must be\ larger or equal to max path length') from decimal import Decimal as dec decimals = 1e10 remainderx = dec(float(track_len)*decimals) % dec(float(binsize)*decimals) if remainderx != 0: raise ValueError('the remainder should be zero i.e. the ' + 'box length should be an exact multiple ' + 'of the binsize') # interpolate one extra timepoint t_ = np.array(t.tolist() + [t.max() + np.median(np.diff(t))]) spikes_in_bin, _ = np.histogram(spike_train, t_) time_in_bin = np.diff(t_) xbins = np.arange(0, track_len + binsize, binsize) ix = np.digitize(x, xbins, right=True) spike_pos = np.zeros(xbins.size) time_pos = np.zeros(xbins.size) for n in range(len(x)): spike_pos[ix[n]] += spikes_in_bin[n] time_pos[ix[n]] += time_in_bin[n] # correct for shifting of map since digitize returns values at right edges spike_pos = spike_pos[1:] time_pos = time_pos[1:] with np.errstate(divide='ignore', invalid='ignore'): rate = np.divide(spike_pos, time_pos) if convolve: rate[np.isnan(rate)] = 0. # for convolution from astropy.convolution import Gaussian2DKernel, convolve_fft csize = (track_len / binsize) * smoothing kernel = Gaussian2DKernel(csize) rate = convolve_fft(rate, kernel) # TODO edge correction if mask_unvisited: was_in_bin = np.asarray(time_pos, dtype=bool) rate[np.invert(was_in_bin)] = np.nan if return_bins: return rate.T, xbins else: return rate.T def separate_fields(rate_map, laplace_thrsh=0, center_method='maxima', cutoff_method='none', box_xlen=1, box_ylen=1, index=False): """Separates fields using the laplacian to identify fields separated by a negative second derivative. Parameters ---------- rate_map : np 2d array firing rate in each bin laplace_thrsh : float value of laplacian to separate fields by relative to the minima. Should be on the interval 0 to 1, where 0 cuts off at 0 and 1 cuts off at min(laplace(rate_map)). Default 0. center_method : string method to find field centers. Valid options = ['center_of_mass', 'maxima','gaussian_fit'] cutoff_method (optional) : string or function function to exclude small fields. If local field value of function is lower than global function value, the field is excluded. Valid string_options = ['median', 'mean','none']. index : bool, default False return bump center values as index or xy-pos Returns ------- fields : numpy array, shape like rate_map. contains areas all filled with same value, corresponding to fields in rate_map. The values are in range(1,nFields + 1), sorted by size of the field (sum of all field values). 0 elsewhere. n_field : int field count bump_centers : (n_field x 2) np ndarray Coordinates of field centers """ cutoff_functions = {'mean':np.mean, 'median':np.median, 'none':None} if not callable(cutoff_method): try: cutoff_func = cutoff_functions[cutoff_method] except KeyError: msg = "invalid cutoff_method flag '%s'" % cutoff_method raise ValueError(msg) else: cutoff_func = cutoff_method from scipy import ndimage l = ndimage.laplace(rate_map) l[l>laplace_thrsh*np.min(l)] = 0 # Labels areas of the laplacian not connected by values > 0. fields, n_fields = ndimage.label(l) # index 0 is the background indx = np.arange(1,n_fields+1) # Use cutoff method to remove unwanted fields if cutoff_method != 'none': try: total_value = cutoff_func(fields) except: print('Unexpected error, cutoff_func doesnt like the input:') raise field_values = ndimage.labeled_comprehension(rate_map, fields, indx, cutoff_func, float, 0) try: is_field = field_values >= total_value except: print('cutoff_func return_values doesnt want to compare:') raise if np.sum(is_field) == 0: return np.zeros(rate_map.shape), 0, np.array([[],[]]) for i in indx: if not is_field[i-1]: fields[fields == i] = 0 n_fields = ndimage.label(fields, output=fields) indx = np.arange(1,n_fields + 1) # Sort by largest mean sizes = ndimage.labeled_comprehension(rate_map, fields, indx, np.mean, float, 0) size_sort = np.argsort(sizes)[::-1] new = np.zeros_like(fields) for i in np.arange(n_fields): new[fields == size_sort[i]+1] = i+1 fields = new bc = get_bump_centers(rate_map,labels=fields,ret_index=index,indices=indx,method=center_method, units=box_xlen.units) # TODO exclude fields where maxima is on the edge of the field? return fields, n_fields, bc def get_bump_centers(rate_map, labels, ret_index=False, indices=None, method='maxima', units=1): """Finds center of fields at labels.""" from scipy import ndimage if method not in ['maxima','center_of_mass','gaussian_fit']: msg = "invalid center_method flag '%s'" % method raise ValueError(msg) if indices is None: indices = np.arange(1,np.max(labels)+1) if method == 'maxima': bc = ndimage.maximum_position(rate_map, labels=labels, index=indices) elif method == 'center_of_mass': bc = ndimage.center_of_mass(rate_map, labels=labels, index=indices) elif method == 'gaussian_fit': from exana.tracking.tools import fit_gauss_asym bc = np.zeros((len(indices),2)) import matplotlib.pyplot as plt for i in indices: r = rate_map.copy() r[labels != i] = 0 popt = fit_gauss_asym(r, return_data=False) # TODO Find out which axis is x and which is y bc[i-1] = (popt[2],popt[1]) if ret_index: msg = 'ret_index not implemented for gaussian fit' raise NotImplementedError(msg) if not ret_index and not method=='gaussian_fit': bc = (bc + np.array((0.5,0.5)))/rate_map.shape return np.array(bc)*units def find_avg_dist(rate_map, thrsh = 0, plot=False): """Uses autocorrelation and separate_fields to find average distance between bumps. Is dependent on high gridness to get separate bumps in the autocorrelation Parameters ---------- rate_map : np 2d array firing rate in each bin thrsh (optional) : float, default 0 cutoff value for the laplacian of the autocorrelation function. Should be a negative number. Gives better separation if bumps are connected by "bridges" or saddles where the laplacian is negative. plot (optional) : bool, default False plot acorr and the separated acorr, with bump centers Returns ------- avg_dist : float relative units from 0 to 1 of the box size """ from scipy.ndimage import maximum_position from exana.misc.tools import fftcorrelate2d # autocorrelate. Returns array (2x - 1) the size of rate_map acorr = fftcorrelate2d(rate_map,rate_map, mode = 'full', normalize = True) #acorr[acorr<0] = 0 # TODO Fix this f, nf, bump_centers = separate_fields(acorr,laplace_thrsh=thrsh, center_method='maxima',cutoff_method='median') # TODO Find a way to find valid value for # thrsh, or remove. bump_centers = np.array(bump_centers) # find dists from center in (autocorrelation)relative units (from 0 to 1) distances = np.linalg.norm(bump_centers - (0.5,0.5), axis = 1) dist_sort = np.argsort(distances) distances = distances[dist_sort] # use maximum 6 closest values except center value avg_dist = np.median(distances[1:7]) # correct for difference in shapes avg_dist *= acorr.shape[0]/rate_map.shape[0] # = 1.98 # TODO : raise warning if too big difference between points if plot: import matplotlib.pyplot as plt fig,[ax1,ax2] = plt.subplots(1,2) ax1.imshow(acorr,extent = (0,1,0,1),origin='lower') ax1.scatter(*(bump_centers[:,::-1].T)) ax2.imshow(f,extent = (0,1,0,1),origin='lower') ax2.scatter(*(bump_centers[:,::-1].T)) return avg_dist def fit_hex(bump_centers, avg_dist=None, plot_bumps = False, method='best'): """Fits a hex grid to a given set of bumps. Uses the three bumps most Parameters ---------- bump_centers : Nx2 np.array x,y positions of bump centers, x,y /in (0,1) avg_dist (optional): float average spacing between bumps plot_bumps (optional): bool if True, plots at the three bumps most likely to be in correct hex-position to the current matplotlib axes. method (optional): string, valid options: ['closest', 'best'] method to find angle from neighboring bumps. 'closest' uses six bumps nearest to center bump 'best' uses the two bumps nearest to avg_dist Returns ------- displacement : float distance of bump closest to the center in meters orientation : float orientation of hexagon (in degrees) """ valid_methods = ['closest', 'best'] if method not in valid_methods: msg = "invalid method flag '%s'" % method raise ValueError(msg) bump_centers = np.array(bump_centers) # sort by distance to center d = np.linalg.norm(bump_centers - (0.5,0.5), axis=1) d_sort = np.argsort(d) dist_sorted = bump_centers[d_sort] center_bump = dist_sorted[0]; others = dist_sorted[1:] displacement = d[d_sort][0] # others distances to center bumps relpos = others - center_bump reldist = np.linalg.norm(relpos, axis=1) if method == 'closest': # get 6 closest bumps rel_sort = np.argsort(reldist) closest = others[rel_sort][:6] relpos = relpos[rel_sort][:6] elif method == 'best': # get 2 bumps such that /sum_{i\neqj}(\abs{r_i-r_j}-avg_ist)^2 is minimized squares = 1e32*np.ones((others.shape[0], others.shape[0])) for i in range(len(relpos)): for j in range(i,len(relpos)): rel1 = (reldist[i] - avg_dist)**2 rel2 = (reldist[j] - avg_dist)**2 rel3 = (np.linalg.norm(relpos[i]-relpos[j]) - avg_dist)**2 squares[i,j] = rel1 + rel2 + rel3 rel_slice = np.unravel_index(np.argmin(squares), squares.shape) rel_slice = np.array(rel_slice) #rel_sort = np.argsort(np.abs(reldist-avg_dist)) closest = others[rel_slice] relpos = relpos[rel_slice] # sort by angle a = np.arctan2(relpos[:,1], relpos[:,0])%(2*np.pi) a_sort = np.argsort(a) # extract lowest angle and convert to degrees orientation = a[a_sort][0] *180/np.pi # hex grid is symmetric under rotations of 60deg orientation %= 60 if plot_bumps: import matplotlib.pyplot as plt ax=plt.gca() i = 1 xmin, xmax = ax.get_xlim() ymin, ymax = ax.get_ylim() dx = xmax-xmin; dy = ymax - ymin closest = closest[a_sort] edges = [center_bump] if method == 'best' else [] edges += [c for c in closest] edges = np.array(edges)*(dx,dy) + (xmin, ymin) poly = plt.Polygon(edges, alpha=0.5,color='r') ax.add_artist(poly) return displacement, orientation def calculate_grid_geometry(rate_map, plot_fields=False, **kwargs): """Calculates quantitative information about grid field. Find bump centers, bump spacing, center diplacement and hexagon orientation Parameters ---------- rate_map : np 2d array firing rate in each bin plot_fields : if True, plots the field labels with field centers to the current matplotlib ax. Default False thrsh : float, default 0 see find_avg_dist() center_method : string, valid options: ['maxima', 'center_of_mass'] default: 'center_of_mass' see separate_fields() method : string, valid options: ['closest', 'best'] see fit_hex() Returns ------- bump_centers : 2d np.array x,y positions of bump centers avg_dist : float average spacing between bumps, \in [0,1] displacement : float distance of bump closest to the center orientation : float orientation of hexagon (in degrees) Examples -------- >>> import numpy as np >>> rate_map = np.zeros((5,5)) >>> pos = np.array([ [0,2], ... [1,0],[1,4], ... [2,2], ... [3,0],[3,4], ... [4,2]]) >>> for(i,j) in pos: ... rate_map[i,j] = 1 ... >>> result = calculate_grid_geometry(rate_map) """ # TODO add back the following when it is correct # (array([[0.5, 0.9], # [0.9, 0.7], # [0.1, 0.7], # [0.5, 0.5], # [0.9, 0.3], # [0.1, 0.3], # [0.5, 0.1]]) * m, 0.4472135954999579, 0.0, 26.565051177077983) from scipy.ndimage import mean, center_of_mass # TODO: smooth data? # smooth_rate_map = lambda x:x # rate_map = smooth_rate_map(rate_map) center_method = kwargs.pop('center_method',None) if center_method: fields, nfields, bump_centers = separate_fields(rate_map, center_method=center_method) else: fields, nfields, bump_centers = separate_fields(rate_map) if bump_centers.size == 0: import warnings msg = 'couldnt find bump centers, returning None' warnings.warn(msg, RuntimeWarning, stacklevel=2) return None,None,None,None, sh = np.array(rate_map.shape) if plot_fields: print(fields) import matplotlib.pyplot as plt x=np.linspace(0,1,sh[0]+1) y=np.linspace(0,1,sh[1]+1) x,y = np.meshgrid(x,y) ax = plt.gca() print('nfields: ',nfields) plt.pcolormesh(x,y, fields) # switch from row-column to x-y bump_centers = bump_centers[:,::-1] thrsh = kwargs.pop('thrsh', None) if thrsh: avg_dist = find_avg_dist(rate_map, thrsh) else: avg_dist = find_avg_dist(rate_map) displacement, orientation = fit_hex(bump_centers, avg_dist, plot_bumps=plot_fields, **kwargs) return bump_centers, avg_dist, displacement, orientation class RandomDisplacementBounds(object): """random displacement with bounds""" def __init__(self, xmin, xmax, stepsize=0.5): self.xmin = np.array(xmin) self.xmax = np.array(xmax) self.stepsize = stepsize def __call__(self, x): """take a random step but ensure the new position is within the bounds""" while True: # this could be done in a much more clever way, but it will work for example purposes xnew = x + (self.xmax-self.xmin)*np.random.uniform(-self.stepsize, self.stepsize, np.shape(x)) if np.all(xnew < self.xmax) and np.all(xnew > self.xmin): break return xnew def optimize_sep_fields(rate_map,step = 0.04, niter=40, T = 1.0, method = 'SLSQP', glob=True, x0 = [0.065,0.1],callback=None): """Optimizes the separation of the fields by minimizing an error function Parameters ---------- rate_map : method : valid methods=['L-BFGS-B', 'TNC', 'SLSQP'] x0 : list initial values for smoothing smoothing and laplace_thrsh Returns -------- res : Result of the optimization. Contains smoothing and laplace_thrsh in attribute res.x""" from scipy import optimize from exana.tracking.tools import separation_error_func as err_func valid_methods = ['L-BFGS-B', 'TNC', 'SLSQP'] if method not in valid_methods: raise ValueError('invalid method flag %s' %method) rate_map[np.isnan(rate_map)] = 0. method = 'SLSQP' xmin = [0.025, 0] xmax = [0.2, 1] bounds = [(low,high) for low,high in zip(xmin,xmax)] obj_func = lambda args: err_func(args[0], args[1], rate_map) if glob: take_step = RandomDisplacementBounds(xmin, xmax,stepsize=step) minimizer_kwargs = dict(method=method, bounds=bounds) res = optimize.basinhopping(obj_func, x0, niter=niter, T = T, minimizer_kwargs=minimizer_kwargs, take_step=take_step,callback=callback) else: res = optimize.minimize(obj_func, x0, method=method, bounds = bounds, options={'disp': True}) return res if __name__ == "__main__": import doctest doctest.testmod()
gpl-3.0
lrp/tftools
tftools.py
2
9758
# tftools.py: Utilities for optimizing transfer function excitation signals # Copyright (C) 2013 Larry Price # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from __future__ import division import numpy as np import scipy.signal as sig from matplotlib.mlab import csd, psd import sys def tukeywin(m, a=0.5): ''' Produces a tukey window a = overlap parameter between 0 returns a square window and 1 returns a Hann(ing) window m = number of points in the window see, e.g., https://en.wikipedia.org/wiki/Window_function#Tukey_window ''' if a <= 0: return np.ones(m) elif a >= 1: return np.hanning(m) x = np.linspace(0, 1, m) w = np.ones_like(x) w[x < a/2] = (1 + np.cos(2*np.pi/a * (x[x < a/2] - a/2) )) / 2 w[x >= (1 - a/2)] = (1 + np.cos(2*np.pi/a * (x[x >= (1 - a/2)] - 1 + a/2))) / 2 return w def getTF(exc,resp,tmeas,Fs,Nfft,padto=None): """ compute transfer function along with coherence and SNR. uses PSD/CSD method with 50% overlapping windows returns f, TF, Coherence, SNR exc = excitation signal resp = system response tmeas = duration of mesurement in seconds Fs = sample rate (Hz) Nfft = number of data points to be used in each block for fft padto = pad to this many points """ N = 1.89 * tmeas / (Nfft / Fs) Sx, f = psd(exc,NFFT=Nfft,Fs=Fs,noverlap=int(Nfft/2),pad_to=padto) Sy = psd(resp,NFFT=Nfft,Fs=Fs,noverlap=int(Nfft/2),pad_to=padto)[0] Sxy = csd(exc,resp,NFFT=Nfft,Fs=Fs,noverlap=int(Nfft/2),pad_to=padto)[0] Cxy = (Sxy * np.conj(Sxy)) / (Sx * Sy) snr = np.sqrt(Cxy * 2 * N / (1 - Cxy) ) return f, Sxy / Sx, Cxy, snr def fishersfzpk(ltisys,w,Sn): """ create the single-frequency fisher matrix for a transfer function in zpk form, i.e. __ ||_i (w - z_i) H(w) = k -------------------- __ ||_i (w - p_i) *** the excitation signal is assumed to be a sum of sines *** *** the denominator must be monic *** arguments: ltisys = a scipy.signal.lti instance of the transfer function w = (angular) frequencies of interest (in rad/s) Sn = PSD of the noise as an array (same length as w) returns: an NxN numpy array with each value being an array of len(w) (the collection of all single frequency Fisher matrices at frequencies w) """ ###FIXME: add some basic error handling #tf is in terms of iw, not w s = 1j*w #create the transfer function #use the lower case w because scipy puts the i in the transfer function for us tf = ltisys.freqresp(w)[1] #do the magnitude squared here once and for all tfmagsq = tf * np.conj(tf) #get the number of parameters if ltisys.gain == 1: N = len(ltisys.zeros) + len(ltisys.poles) else: N = len(ltisys.zeros) + len(ltisys.poles) + 1 #take all the derivatives Dz = np.zeros([len(ltisys.zeros),len(s)],dtype=np.complex128) Dp = np.zeros([len(ltisys.poles),len(s)],dtype=np.complex128) for i,z in enumerate(ltisys.zeros): Dz[i] = -1 / (s - z) for i,p in enumerate(ltisys.poles): Dp[i] = 1 / (s - p) #check for unity gain and the absence of zeros if ltisys.gain == 1 and ltisys.zeros.size: deez = np.vstack((Dz,Dp)) elif ltisys.gain == 1 and not ltisys.zeros.size: deez = Dp elif ltisys.gain != 1 and ltisys.zeros.size: deez = np.vstack((np.vstack((Dz,Dp)), 1/ltisys.gain[0] * np.ones(len(s)))) else: deez = np.vstack((Dp,1/ltisys.gain[0] * np.ones(len(s)))) #put it together to make the fisher matrix fisher = np.zeros([N,N,len(w)],dtype=np.float64) for i in range(N): for j in range(N): fisher[i][j] = 0.5 * tfmagsq * np.real(np.conj(deez[i])*deez[j]) / Sn #all done return fisher def fishersfab(ltisys,w,Sn): """ create the single-frequency fisher matrix for a transfer function as a rational function, i.e. Sum_0^N b_i s^i H(w) = -------------------- 1 + Sum_1^M a_i s^i *** the excitation signal is assumed to be a sum of sines *** *** the denominator must be monic (it's enforced, so no worries)*** arguments: ltisys = instance of scipy.signal.lti w = frequencies of interest Sn = PSD of the noise as an array (same length as w) returns: an NxN numpy array with each value being an array of len(w) (the collection of all single frequency Fisher matrices at frequencies w) NB: you have to take the transpose of the result if you want to, say compute the determinant via numpy.linalg.det """ ###FIXME: add some basic error handling #tf is in terms of iw, not w s = 1j*w #get the tf in the right form a,b = lti2ab(ltisys) #create the numerator and denominator of the tf if b.size: numer = np.sum(np.array([ b[i] * s**i for i in range(len(b))]),axis=0) else: #i don't think this is even possible unless the tf is just a pass-through numer = np.ones(len(s)) denom = np.sum(np.array([ a[i] * s**(i+1) for i in range(len(a))]),axis=0) + np.ones(len(s)) #number of parameters N = len(a) + len(b) #take all the derivatives deez = np.zeros([N,len(w)],dtype=np.complex128) for i in range(N): #derivative wrt denominator #funky numbering because denom is monic (demonic?) if i < len(a): deez[i] = - s**(i+1) * numer / denom**2 #derivative wrt numerator else: deez[i] = s**(i-len(a)) / denom #put it together to make the fisher matrix fisher = np.zeros([N,N,len(w)],dtype=np.float64) for i in range(N): for j in range(N): fisher[i][j] = 0.5 * np.real(np.conj(deez[i])*deez[j]) / Sn #all done return fisher def fishersf(ltisys,w,Sn,usezpk=False): """ convenience function to select between zpk (default) and ab form for computing the fisher matrix """ if usezpk is True: return fishersfzpk(ltisys,w,Sn) else: return fishersfab(ltisys,w,Sn) def fisherdesign(fmsf,Sx): """ compute the fisher matrix associated with the design Sx uses the Sx and the single frequency fisher matrix """ return np.sum(fmsf*Sx,axis=2) def dispersion(fmdesign,fmsf): """ compute the dispersion from the single frequency and design fisher matrices """ return np.trace(np.dot(np.linalg.inv(fmdesign),fmsf)) def lti2ab(ltisys): """ convenience function to convert scipy.signal.lti instance to a,b suitable for fisher matrix calculation ltisys is an instance of scipy.signal.lti returns a,b """ b = ltisys.num a = ltisys.den #fancy array slicing to reverse the order (::-1) and remove the first element of a (1:) return a[::-1][1:] / a[-1], b[::-1] / a[-1] def findfreqs(ltisys,Sn,w,nfreqs=None,usezpk=False): """ find best frequencies for optimal design (brute force method) arguments: ltisys = instance of scipy.signal.lti w = (angular) frequencies of interest nfreqs = # of frequencies to return. default is 3 x #parameters usezpk = boolean for indicating form of the transfer function returns: wopt = array of optimal frequencies to use fisherf = single-frequency fisher matrix evaluated at wopt (basically input for design optimization) """ #get the number of parameters and put the transfer function in the right form if usezpk is True: #number of parameters for zpk representation if ltisys.gain == 1: nparm = len(ltisys.zeros) + len(ltisys.poles) else: nparm = len(ltisys.zeros) + len(ltisys.poles) + 1 else: #using ab form a,b = lti2ab(ltisys) #number of parameters nparm = len(a) + len(b) #set the number of frequencies if nfreqs is None: nfreqs = 3 * nparm if nfreqs < 2 * nparm: raise ValueError('Must specify an nfreqs at least twice as large as the number of parameters!') sys.exit(0) fmsf = fishersf(ltisys,w,Sn,usezpk=usezpk) thesefreqs = np.sort(np.argsort(np.linalg.det(fmsf.T))[-nfreqs:]) return w[thesefreqs], fmsf.T[thesefreqs].T def optdesign(ltisys,w,usezpk=False,fmsf=None,Sn=None,tol=None,maxit=10000): """ compute the optimal design, Sx arguments: ltisys = instance of scipy.signal.lti w = the frequencies to optimize over tol = if max(dispersion - nparam) < tol, then iteration ceases. if tol isn't specified then iteration continues until maxit maxit = maximum number of iterations to perform returns a tuple containing: Sx = optimal design as a numpy array max(dispersion - nparam) """ #FIXME: add some error handling if fmsf is None and Sn is None: raise ValueError('Must specify Sn to compute Fisher!') sys.exit(1) #get the number of parameters and put the transfer function in the right form if usezpk is True: #number of parameters for zpk representation if ltisys.gain == 1: nparm = len(ltisys.zeros) + len(ltisys.poles) else: nparm = len(ltisys.zeros) + len(ltisys.poles) + 1 else: #using ab form a,b = lti2ab(ltisys) #number of parameters nparm = len(a) + len(b) #compute the single frequency fisher matrix if fmsf is None: fmsf = fishersf(ltisys,w,Sn,usezpk=usezpk) #initial design #normalized to one with all the power evenly distributed #don't worry about phases for now...FIXME: optimize phases Sx = np.ones(len(w)) / len (w) #compute the dispersion disp = dispersion(fisherdesign(fmsf,Sx),fmsf) for i in range(maxit): if tol is not None: if np.max(disp - nparm) < tol: break else: Sx *= (disp / nparm) disp = dispersion(fisherdesign(fmsf,Sx),fmsf) return Sx, np.max(disp - nparm)
gpl-3.0
lthurlow/Network-Grapher
proj/external/matplotlib-1.2.1/doc/mpl_examples/api/histogram_path_demo.py
6
1464
""" This example shows how to use a path patch to draw a bunch of rectangles. The technique of using lots of Rectangle instances, or the faster method of using PolyCollections, were implemented before we had proper paths with moveto/lineto, closepoly etc in mpl. Now that we have them, we can draw collections of regularly shaped objects with homogeous properties more efficiently with a PathCollection. This example makes a histogram -- its more work to set up the vertex arrays at the outset, but it should be much faster for large numbers of objects """ import numpy as np import matplotlib.pyplot as plt import matplotlib.patches as patches import matplotlib.path as path fig = plt.figure() ax = fig.add_subplot(111) # histogram our data with numpy data = np.random.randn(1000) n, bins = np.histogram(data, 50) # get the corners of the rectangles for the histogram left = np.array(bins[:-1]) right = np.array(bins[1:]) bottom = np.zeros(len(left)) top = bottom + n # we need a (numrects x numsides x 2) numpy array for the path helper # function to build a compound path XY = np.array([[left,left,right,right], [bottom,top,top,bottom]]).T # get the Path object barpath = path.Path.make_compound_path_from_polys(XY) # make a patch out of it patch = patches.PathPatch(barpath, facecolor='blue', edgecolor='gray', alpha=0.8) ax.add_patch(patch) # update the view limits ax.set_xlim(left[0], right[-1]) ax.set_ylim(bottom.min(), top.max()) plt.show()
mit
mfitzp/padua
setup.py
1
1035
from setuptools import setup, find_packages version = '0.1.16' setup( name='padua', version=version, url='http://github.com/mfitzp/padua', author='Martin Fitzpatrick', author_email='martin.fitzpatrick@gmail.com', description='A Python interface for Proteomic Data Analysis, working with MaxQuant & Perseus outputs', license='MIT', packages=find_packages(), include_package_data=True, classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Intended Audience :: Developers', 'Topic :: Desktop Environment', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Widget Sets', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4' ], install_requires=[ 'numpy', 'scipy', 'matplotlib', 'pandas', 'statsmodels', 'matplotlib-venn', 'scikit-learn', 'requests', 'requests_toolbelt', 'adjustText' ] )
bsd-2-clause
ilo10/scikit-learn
examples/cluster/plot_agglomerative_clustering_metrics.py
402
4492
""" Agglomerative clustering with different metrics =============================================== Demonstrates the effect of different metrics on the hierarchical clustering. The example is engineered to show the effect of the choice of different metrics. It is applied to waveforms, which can be seen as high-dimensional vector. Indeed, the difference between metrics is usually more pronounced in high dimension (in particular for euclidean and cityblock). We generate data from three groups of waveforms. Two of the waveforms (waveform 1 and waveform 2) are proportional one to the other. The cosine distance is invariant to a scaling of the data, as a result, it cannot distinguish these two waveforms. Thus even with no noise, clustering using this distance will not separate out waveform 1 and 2. We add observation noise to these waveforms. We generate very sparse noise: only 6% of the time points contain noise. As a result, the l1 norm of this noise (ie "cityblock" distance) is much smaller than it's l2 norm ("euclidean" distance). This can be seen on the inter-class distance matrices: the values on the diagonal, that characterize the spread of the class, are much bigger for the Euclidean distance than for the cityblock distance. When we apply clustering to the data, we find that the clustering reflects what was in the distance matrices. Indeed, for the Euclidean distance, the classes are ill-separated because of the noise, and thus the clustering does not separate the waveforms. For the cityblock distance, the separation is good and the waveform classes are recovered. Finally, the cosine distance does not separate at all waveform 1 and 2, thus the clustering puts them in the same cluster. """ # Author: Gael Varoquaux # License: BSD 3-Clause or CC-0 import matplotlib.pyplot as plt import numpy as np from sklearn.cluster import AgglomerativeClustering from sklearn.metrics import pairwise_distances np.random.seed(0) # Generate waveform data n_features = 2000 t = np.pi * np.linspace(0, 1, n_features) def sqr(x): return np.sign(np.cos(x)) X = list() y = list() for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]): for _ in range(30): phase_noise = .01 * np.random.normal() amplitude_noise = .04 * np.random.normal() additional_noise = 1 - 2 * np.random.rand(n_features) # Make the noise sparse additional_noise[np.abs(additional_noise) < .997] = 0 X.append(12 * ((a + amplitude_noise) * (sqr(6 * (t + phi + phase_noise))) + additional_noise)) y.append(i) X = np.array(X) y = np.array(y) n_clusters = 3 labels = ('Waveform 1', 'Waveform 2', 'Waveform 3') # Plot the ground-truth labelling plt.figure() plt.axes([0, 0, 1, 1]) for l, c, n in zip(range(n_clusters), 'rgb', labels): lines = plt.plot(X[y == l].T, c=c, alpha=.5) lines[0].set_label(n) plt.legend(loc='best') plt.axis('tight') plt.axis('off') plt.suptitle("Ground truth", size=20) # Plot the distances for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): avg_dist = np.zeros((n_clusters, n_clusters)) plt.figure(figsize=(5, 4.5)) for i in range(n_clusters): for j in range(n_clusters): avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j], metric=metric).mean() avg_dist /= avg_dist.max() for i in range(n_clusters): for j in range(n_clusters): plt.text(i, j, '%5.3f' % avg_dist[i, j], verticalalignment='center', horizontalalignment='center') plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2, vmin=0) plt.xticks(range(n_clusters), labels, rotation=45) plt.yticks(range(n_clusters), labels) plt.colorbar() plt.suptitle("Interclass %s distances" % metric, size=18) plt.tight_layout() # Plot clustering results for index, metric in enumerate(["cosine", "euclidean", "cityblock"]): model = AgglomerativeClustering(n_clusters=n_clusters, linkage="average", affinity=metric) model.fit(X) plt.figure() plt.axes([0, 0, 1, 1]) for l, c in zip(np.arange(model.n_clusters), 'rgbk'): plt.plot(X[model.labels_ == l].T, c=c, alpha=.5) plt.axis('tight') plt.axis('off') plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20) plt.show()
bsd-3-clause
ldirer/scikit-learn
examples/classification/plot_lda_qda.py
32
5381
""" ==================================================================== Linear and Quadratic Discriminant Analysis with covariance ellipsoid ==================================================================== This example plots the covariance ellipsoids of each class and decision boundary learned by LDA and QDA. The ellipsoids display the double standard deviation for each class. With LDA, the standard deviation is the same for all the classes, while each class has its own standard deviation with QDA. """ print(__doc__) from scipy import linalg import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from matplotlib import colors from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis ############################################################################### # colormap cmap = colors.LinearSegmentedColormap( 'red_blue_classes', {'red': [(0, 1, 1), (1, 0.7, 0.7)], 'green': [(0, 0.7, 0.7), (1, 0.7, 0.7)], 'blue': [(0, 0.7, 0.7), (1, 1, 1)]}) plt.cm.register_cmap(cmap=cmap) ############################################################################### # generate datasets def dataset_fixed_cov(): '''Generate 2 Gaussians samples with the same covariance matrix''' n, dim = 300, 2 np.random.seed(0) C = np.array([[0., -0.23], [0.83, .23]]) X = np.r_[np.dot(np.random.randn(n, dim), C), np.dot(np.random.randn(n, dim), C) + np.array([1, 1])] y = np.hstack((np.zeros(n), np.ones(n))) return X, y def dataset_cov(): '''Generate 2 Gaussians samples with different covariance matrices''' n, dim = 300, 2 np.random.seed(0) C = np.array([[0., -1.], [2.5, .7]]) * 2. X = np.r_[np.dot(np.random.randn(n, dim), C), np.dot(np.random.randn(n, dim), C.T) + np.array([1, 4])] y = np.hstack((np.zeros(n), np.ones(n))) return X, y ############################################################################### # plot functions def plot_data(lda, X, y, y_pred, fig_index): splot = plt.subplot(2, 2, fig_index) if fig_index == 1: plt.title('Linear Discriminant Analysis') plt.ylabel('Data with fixed covariance') elif fig_index == 2: plt.title('Quadratic Discriminant Analysis') elif fig_index == 3: plt.ylabel('Data with varying covariances') tp = (y == y_pred) # True Positive tp0, tp1 = tp[y == 0], tp[y == 1] X0, X1 = X[y == 0], X[y == 1] X0_tp, X0_fp = X0[tp0], X0[~tp0] X1_tp, X1_fp = X1[tp1], X1[~tp1] alpha = 0.5 # class 0: dots plt.plot(X0_tp[:, 0], X0_tp[:, 1], 'o', alpha=alpha, color='red') plt.plot(X0_fp[:, 0], X0_fp[:, 1], '*', alpha=alpha, color='#990000') # dark red # class 1: dots plt.plot(X1_tp[:, 0], X1_tp[:, 1], 'o', alpha=alpha, color='blue') plt.plot(X1_fp[:, 0], X1_fp[:, 1], '*', alpha=alpha, color='#000099') # dark blue # class 0 and 1 : areas nx, ny = 200, 100 x_min, x_max = plt.xlim() y_min, y_max = plt.ylim() xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx), np.linspace(y_min, y_max, ny)) Z = lda.predict_proba(np.c_[xx.ravel(), yy.ravel()]) Z = Z[:, 1].reshape(xx.shape) plt.pcolormesh(xx, yy, Z, cmap='red_blue_classes', norm=colors.Normalize(0., 1.)) plt.contour(xx, yy, Z, [0.5], linewidths=2., colors='k') # means plt.plot(lda.means_[0][0], lda.means_[0][1], 'o', color='black', markersize=10) plt.plot(lda.means_[1][0], lda.means_[1][1], 'o', color='black', markersize=10) return splot def plot_ellipse(splot, mean, cov, color): v, w = linalg.eigh(cov) u = w[0] / linalg.norm(w[0]) angle = np.arctan(u[1] / u[0]) angle = 180 * angle / np.pi # convert to degrees # filled Gaussian at 2 standard deviation ell = mpl.patches.Ellipse(mean, 2 * v[0] ** 0.5, 2 * v[1] ** 0.5, 180 + angle, facecolor=color, edgecolor='yellow', linewidth=2, zorder=2) ell.set_clip_box(splot.bbox) ell.set_alpha(0.5) splot.add_artist(ell) splot.set_xticks(()) splot.set_yticks(()) def plot_lda_cov(lda, splot): plot_ellipse(splot, lda.means_[0], lda.covariance_, 'red') plot_ellipse(splot, lda.means_[1], lda.covariance_, 'blue') def plot_qda_cov(qda, splot): plot_ellipse(splot, qda.means_[0], qda.covariances_[0], 'red') plot_ellipse(splot, qda.means_[1], qda.covariances_[1], 'blue') ############################################################################### for i, (X, y) in enumerate([dataset_fixed_cov(), dataset_cov()]): # Linear Discriminant Analysis lda = LinearDiscriminantAnalysis(solver="svd", store_covariance=True) y_pred = lda.fit(X, y).predict(X) splot = plot_data(lda, X, y, y_pred, fig_index=2 * i + 1) plot_lda_cov(lda, splot) plt.axis('tight') # Quadratic Discriminant Analysis qda = QuadraticDiscriminantAnalysis(store_covariances=True) y_pred = qda.fit(X, y).predict(X) splot = plot_data(qda, X, y, y_pred, fig_index=2 * i + 2) plot_qda_cov(qda, splot) plt.axis('tight') plt.suptitle('Linear Discriminant Analysis vs Quadratic Discriminant Analysis') plt.show()
bsd-3-clause
trmznt/genaf
genaf/views/utils/plot.py
1
3274
# general plot / graphics utility using matplotlib from genaf.views.tools import * from matplotlib import pyplot as plt from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas from matplotlib.figure import Figure import pandas import io, base64 @roles( PUBLIC ) def index(request): # check if not request.GET.get('_method', None) in [ '_exec', '_dfexec' ]: pform, jscode = create_form( request ) return render_to_response('genaf:templates/utils/index.mako', { 'title': 'Plotting Utility', 'html': pform, 'code': jscode, }, request = request ) if request.GET.get('method') == '_dfexec': df = parse_df(request.GET.get('dfdata')) else: df = parse_textdata(request.GET.get('textdata')) plot_type = request.GET.get('plot_type') if plot_type == 'B': html, jscode = column_chart(df) elif plot_type == 'S': return error_page(request, 'Scatter plot not implemented yet') elif plot_type == 'P': html, jscode = pie_chart(df) return render_to_response('genaf:templates/utils/index.mako', { 'title': 'Plot', 'html': html, 'code': jscode, }, request = request ) def create_form(request): """ return html, jscode """ pform = form(name='plotform', action='#') pform.add( fieldset(name='data')[ input_textarea('textdata', label='Data'), ], fieldset(name='options')[ input_select(name='plot_type', label='Plot type', value='B', options = [ ('B', 'Bar (vertical) / column chart'), ('S', 'Scatter x,y plot'), ('P', 'Pie chart'), ] ), ], fieldset()[ submit_bar('Create plot', '_exec')] ) return (pform, '') def parse_textdata(textdata): """ parse data, with the first line as header, and consecutive lines as data """ header, content = textdata.split('\n', 1) columns = [ x.strip() for x in header.split('|') ] buff = io.StringIO(content) dataframe = pandas.read_table(buff, header=None, names = columns) return dataframe def save_figure(canvas): figfile = io.BytesIO() canvas.print_figure(figfile) figfile.seek(0) figdata_png = figfile.getvalue() figdata_png = base64.b64encode(figdata_png).decode('ASCII') fig_html = literal('<img src="data:image/png;base64,%s" >' % figdata_png) return fig_html,'' def column_chart(df): """ creates column (vertical bar) chart """ fig = Figure() canvas = FigureCanvas(fig) ax = fig.add_subplot(111) ax.bar(df.index, df.iloc[:,1], align='center') ax.set_xlabel(df.columns[0]) ax.set_xticks(df.index) ax.set_xticklabels(df.iloc[:,0], rotation='vertical') ax.set_ylabel(df.columns[1]) fig.tight_layout() return save_figure(canvas) def pie_chart(df): fig = Figure() canvas = FigureCanvas(fig) ax = fig.add_subplot(111, aspect=1) ax.pie( df.iloc[:,1], labels = df.iloc[:,0], counterclock=False, startangle=90 ) ax.set_xlabel(df.columns[0]) fig.tight_layout() return save_figure(canvas)
lgpl-3.0
badlogicmanpreet/nupic
examples/opf/clients/hotgym/anomaly/one_gym/nupic_anomaly_output.py
49
9450
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Provides two classes with the same signature for writing data out of NuPIC models. (This is a component of the One Hot Gym Anomaly Tutorial.) """ import csv from collections import deque from abc import ABCMeta, abstractmethod from nupic.algorithms import anomaly_likelihood # Try to import matplotlib, but we don't have to. try: import matplotlib matplotlib.use('TKAgg') import matplotlib.pyplot as plt import matplotlib.gridspec as gridspec from matplotlib.dates import date2num, DateFormatter except ImportError: pass WINDOW = 300 HIGHLIGHT_ALPHA = 0.3 ANOMALY_HIGHLIGHT_COLOR = 'red' WEEKEND_HIGHLIGHT_COLOR = 'yellow' ANOMALY_THRESHOLD = 0.9 class NuPICOutput(object): __metaclass__ = ABCMeta def __init__(self, name): self.name = name self.anomalyLikelihoodHelper = anomaly_likelihood.AnomalyLikelihood() @abstractmethod def write(self, timestamp, value, predicted, anomalyScore): pass @abstractmethod def close(self): pass class NuPICFileOutput(NuPICOutput): def __init__(self, *args, **kwargs): super(NuPICFileOutput, self).__init__(*args, **kwargs) self.outputFiles = [] self.outputWriters = [] self.lineCount = 0 headerRow = [ 'timestamp', 'kw_energy_consumption', 'prediction', 'anomaly_score', 'anomaly_likelihood' ] outputFileName = "%s_out.csv" % self.name print "Preparing to output %s data to %s" % (self.name, outputFileName) self.outputFile = open(outputFileName, "w") self.outputWriter = csv.writer(self.outputFile) self.outputWriter.writerow(headerRow) def write(self, timestamp, value, predicted, anomalyScore): if timestamp is not None: anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability( value, anomalyScore, timestamp ) outputRow = [timestamp, value, predicted, anomalyScore, anomalyLikelihood] self.outputWriter.writerow(outputRow) self.lineCount += 1 def close(self): self.outputFile.close() print "Done. Wrote %i data lines to %s." % (self.lineCount, self.name) def extractWeekendHighlights(dates): weekendsOut = [] weekendSearch = [5, 6] weekendStart = None for i, date in enumerate(dates): if date.weekday() in weekendSearch: if weekendStart is None: # Mark start of weekend weekendStart = i else: if weekendStart is not None: # Mark end of weekend weekendsOut.append(( weekendStart, i, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) weekendStart = None # Cap it off if we're still in the middle of a weekend if weekendStart is not None: weekendsOut.append(( weekendStart, len(dates)-1, WEEKEND_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) return weekendsOut def extractAnomalyIndices(anomalyLikelihood): anomaliesOut = [] anomalyStart = None for i, likelihood in enumerate(anomalyLikelihood): if likelihood >= ANOMALY_THRESHOLD: if anomalyStart is None: # Mark start of anomaly anomalyStart = i else: if anomalyStart is not None: # Mark end of anomaly anomaliesOut.append(( anomalyStart, i, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) anomalyStart = None # Cap it off if we're still in the middle of an anomaly if anomalyStart is not None: anomaliesOut.append(( anomalyStart, len(anomalyLikelihood)-1, ANOMALY_HIGHLIGHT_COLOR, HIGHLIGHT_ALPHA )) return anomaliesOut class NuPICPlotOutput(NuPICOutput): def __init__(self, *args, **kwargs): super(NuPICPlotOutput, self).__init__(*args, **kwargs) # Turn matplotlib interactive mode on. plt.ion() self.dates = [] self.convertedDates = [] self.value = [] self.allValues = [] self.predicted = [] self.anomalyScore = [] self.anomalyLikelihood = [] self.actualLine = None self.predictedLine = None self.anomalyScoreLine = None self.anomalyLikelihoodLine = None self.linesInitialized = False self._chartHighlights = [] fig = plt.figure(figsize=(16, 10)) gs = gridspec.GridSpec(2, 1, height_ratios=[3, 1]) self._mainGraph = fig.add_subplot(gs[0, 0]) plt.title(self.name) plt.ylabel('KW Energy Consumption') plt.xlabel('Date') self._anomalyGraph = fig.add_subplot(gs[1]) plt.ylabel('Percentage') plt.xlabel('Date') # Maximizes window mng = plt.get_current_fig_manager() mng.resize(*mng.window.maxsize()) plt.tight_layout() def initializeLines(self, timestamp): print "initializing %s" % self.name anomalyRange = (0.0, 1.0) self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW) self.convertedDates = deque( [date2num(date) for date in self.dates], maxlen=WINDOW ) self.value = deque([0.0] * WINDOW, maxlen=WINDOW) self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW) self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW) self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW) actualPlot, = self._mainGraph.plot(self.dates, self.value) self.actualLine = actualPlot predictedPlot, = self._mainGraph.plot(self.dates, self.predicted) self.predictedLine = predictedPlot self._mainGraph.legend(tuple(['actual', 'predicted']), loc=3) anomalyScorePlot, = self._anomalyGraph.plot( self.dates, self.anomalyScore, 'm' ) anomalyScorePlot.axes.set_ylim(anomalyRange) self.anomalyScoreLine = anomalyScorePlot anomalyLikelihoodPlot, = self._anomalyGraph.plot( self.dates, self.anomalyScore, 'r' ) anomalyLikelihoodPlot.axes.set_ylim(anomalyRange) self.anomalyLikelihoodLine = anomalyLikelihoodPlot self._anomalyGraph.legend( tuple(['anomaly score', 'anomaly likelihood']), loc=3 ) dateFormatter = DateFormatter('%m/%d %H:%M') self._mainGraph.xaxis.set_major_formatter(dateFormatter) self._anomalyGraph.xaxis.set_major_formatter(dateFormatter) self._mainGraph.relim() self._mainGraph.autoscale_view(True, True, True) self.linesInitialized = True def highlightChart(self, highlights, chart): for highlight in highlights: # Each highlight contains [start-index, stop-index, color, alpha] self._chartHighlights.append(chart.axvspan( self.convertedDates[highlight[0]], self.convertedDates[highlight[1]], color=highlight[2], alpha=highlight[3] )) def write(self, timestamp, value, predicted, anomalyScore): # We need the first timestamp to initialize the lines at the right X value, # so do that check first. if not self.linesInitialized: self.initializeLines(timestamp) anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability( value, anomalyScore, timestamp ) self.dates.append(timestamp) self.convertedDates.append(date2num(timestamp)) self.value.append(value) self.allValues.append(value) self.predicted.append(predicted) self.anomalyScore.append(anomalyScore) self.anomalyLikelihood.append(anomalyLikelihood) # Update main chart data self.actualLine.set_xdata(self.convertedDates) self.actualLine.set_ydata(self.value) self.predictedLine.set_xdata(self.convertedDates) self.predictedLine.set_ydata(self.predicted) # Update anomaly chart data self.anomalyScoreLine.set_xdata(self.convertedDates) self.anomalyScoreLine.set_ydata(self.anomalyScore) self.anomalyLikelihoodLine.set_xdata(self.convertedDates) self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood) # Remove previous highlighted regions for poly in self._chartHighlights: poly.remove() self._chartHighlights = [] weekends = extractWeekendHighlights(self.dates) anomalies = extractAnomalyIndices(self.anomalyLikelihood) # Highlight weekends in main chart self.highlightChart(weekends, self._mainGraph) # Highlight anomalies in anomaly chart self.highlightChart(anomalies, self._anomalyGraph) maxValue = max(self.allValues) self._mainGraph.relim() self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02)) self._mainGraph.relim() self._mainGraph.autoscale_view(True, scaley=False) self._anomalyGraph.relim() self._anomalyGraph.autoscale_view(True, True, True) plt.draw() def close(self): plt.ioff() plt.show() NuPICOutput.register(NuPICFileOutput) NuPICOutput.register(NuPICPlotOutput)
agpl-3.0
Gillu13/scipy
scipy/optimize/_lsq/least_squares.py
3
36471
"""Generic interface for least-square minimization.""" from warnings import warn import numpy as np from numpy.linalg import norm from scipy.sparse import issparse, csr_matrix from scipy.sparse.linalg import LinearOperator from scipy.optimize import _minpack, OptimizeResult from scipy.optimize._numdiff import approx_derivative, group_columns from scipy._lib.six import string_types from .trf import trf from .dogbox import dogbox from .common import EPS, in_bounds, make_strictly_feasible TERMINATION_MESSAGES = { -1: "Improper input parameters status returned from `leastsq`", 0: "The maximum number of function evaluations is exceeded.", 1: "`gtol` termination condition is satisfied.", 2: "`ftol` termination condition is satisfied.", 3: "`xtol` termination condition is satisfied.", 4: "Both `ftol` and `xtol` termination conditions are satisfied." } FROM_MINPACK_TO_COMMON = { 0: -1, # Improper input parameters from MINPACK. 1: 2, 2: 3, 3: 4, 4: 1, 5: 0 # There are 6, 7, 8 for too small tolerance parameters, # but we guard against it by checking ftol, xtol, gtol beforehand. } def call_minpack(fun, x0, jac, ftol, xtol, gtol, max_nfev, x_scale, diff_step): n = x0.size if diff_step is None: epsfcn = EPS else: epsfcn = diff_step**2 # Compute MINPACK's `diag`, which is inverse of our `x_scale` and # ``x_scale='jac'`` corresponds to ``diag=None``. if isinstance(x_scale, string_types) and x_scale == 'jac': diag = None else: diag = 1 / x_scale full_output = True col_deriv = False factor = 100.0 if jac is None: if max_nfev is None: # n squared to account for Jacobian evaluations. max_nfev = 100 * n * (n + 1) x, info, status = _minpack._lmdif( fun, x0, (), full_output, ftol, xtol, gtol, max_nfev, epsfcn, factor, diag) else: if max_nfev is None: max_nfev = 100 * n x, info, status = _minpack._lmder( fun, jac, x0, (), full_output, col_deriv, ftol, xtol, gtol, max_nfev, factor, diag) f = info['fvec'] if callable(jac): J = jac(x) else: J = np.atleast_2d(approx_derivative(fun, x)) cost = 0.5 * np.dot(f, f) g = J.T.dot(f) g_norm = norm(g, ord=np.inf) nfev = info['nfev'] njev = info.get('njev', None) status = FROM_MINPACK_TO_COMMON[status] active_mask = np.zeros_like(x0, dtype=int) return OptimizeResult( x=x, cost=cost, fun=f, jac=J, grad=g, optimality=g_norm, active_mask=active_mask, nfev=nfev, njev=njev, status=status) def prepare_bounds(bounds, n): lb, ub = [np.asarray(b, dtype=float) for b in bounds] if lb.ndim == 0: lb = np.resize(lb, n) if ub.ndim == 0: ub = np.resize(ub, n) return lb, ub def check_tolerance(ftol, xtol, gtol): message = "{} is too low, setting to machine epsilon {}." if ftol < EPS: warn(message.format("`ftol`", EPS)) ftol = EPS if xtol < EPS: warn(message.format("`xtol`", EPS)) xtol = EPS if gtol < EPS: warn(message.format("`gtol`", EPS)) gtol = EPS return ftol, xtol, gtol def check_x_scale(x_scale, x0): if isinstance(x_scale, string_types) and x_scale == 'jac': return x_scale try: x_scale = np.asarray(x_scale, dtype=float) valid = np.all(np.isfinite(x_scale)) and np.all(x_scale > 0) except (ValueError, TypeError): valid = False if not valid: raise ValueError("`x_scale` must be 'jac' or array_like with " "positive numbers.") if x_scale.ndim == 0: x_scale = np.resize(x_scale, x0.shape) if x_scale.shape != x0.shape: raise ValueError("Inconsistent shapes between `x_scale` and `x0`.") return x_scale def check_jac_sparsity(jac_sparsity, m, n): if jac_sparsity is None: return None if not issparse(jac_sparsity): jac_sparsity = np.atleast_2d(jac_sparsity) if jac_sparsity.shape != (m, n): raise ValueError("`jac_sparsity` has wrong shape.") return jac_sparsity, group_columns(jac_sparsity) # Loss functions. def huber(z, rho, cost_only): mask = z <= 1 rho[0, mask] = z[mask] rho[0, ~mask] = 2 * z[~mask]**0.5 - 1 if cost_only: return rho[1, mask] = 1 rho[1, ~mask] = z[~mask]**-0.5 rho[2, mask] = 0 rho[2, ~mask] = -0.5 * z[~mask]**-1.5 def soft_l1(z, rho, cost_only): t = 1 + z rho[0] = 2 * (t**0.5 - 1) if cost_only: return rho[1] = t**-0.5 rho[2] = -0.5 * t**-1.5 def cauchy(z, rho, cost_only): rho[0] = np.log1p(z) if cost_only: return t = 1 + z rho[1] = 1 / t rho[2] = -1 / t**2 def arctan(z, rho, cost_only): rho[0] = np.arctan(z) if cost_only: return t = 1 + z**2 rho[1] = 1 / t rho[2] = -2 * z / t**2 IMPLEMENTED_LOSSES = dict(linear=None, huber=huber, soft_l1=soft_l1, cauchy=cauchy, arctan=arctan) def construct_loss_function(m, loss, f_scale): if loss == 'linear': return None if not callable(loss): loss = IMPLEMENTED_LOSSES[loss] rho = np.empty((3, m)) def loss_function(f, cost_only=False): z = (f / f_scale) ** 2 loss(z, rho, cost_only=cost_only) if cost_only: return 0.5 * f_scale ** 2 * np.sum(rho[0]) rho[0] *= f_scale ** 2 rho[2] /= f_scale ** 2 return rho else: def loss_function(f, cost_only=False): z = (f / f_scale) ** 2 rho = loss(z) if cost_only: return 0.5 * f_scale ** 2 * np.sum(rho[0]) rho[0] *= f_scale ** 2 rho[2] /= f_scale ** 2 return rho return loss_function def least_squares( fun, x0, jac='2-point', bounds=(-np.inf, np.inf), method='trf', ftol=1e-8, xtol=1e-8, gtol=1e-8, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=(), kwargs={}): """Solve a nonlinear least-squares problem with bounds on the variables. Given the residuals f(x) (an m-dimensional function of n variables) and the loss function rho(s) (a scalar function), `least_squares` finds a local minimum of the cost function F(x):: minimize F(x) = 0.5 * sum(rho(f_i(x)**2), i = 0, ..., m - 1) subject to lb <= x <= ub The purpose of the loss function rho(s) is to reduce the influence of outliers on the solution. Parameters ---------- fun : callable Function which computes the vector of residuals, with the signature ``fun(x, *args, **kwargs)``, i.e., the minimization proceeds with respect to its first argument. The argument ``x`` passed to this function is an ndarray of shape (n,) (never a scalar, even for n=1). It must return a 1-d array_like of shape (m,) or a scalar. x0 : array_like with shape (n,) or float Initial guess on independent variables. If float, it will be treated as a 1-d array with one element. jac : {'2-point', '3-point', 'cs', callable}, optional Method of computing the Jacobian matrix (an m-by-n matrix, where element (i, j) is the partial derivative of f[i] with respect to x[j]). The keywords select a finite difference scheme for numerical estimation. The scheme '3-point' is more accurate, but requires twice as much operations compared to '2-point' (default). The scheme 'cs' uses complex steps, and while potentially the most accurate, it is applicable only when `fun` correctly handles complex inputs and can be analytically continued to the complex plane. Method 'lm' always uses the '2-point' scheme. If callable, it is used as ``jac(x, *args, **kwargs)`` and should return a good approximation (or the exact value) for the Jacobian as an array_like (np.atleast_2d is applied), a sparse matrix or a `scipy.sparse.linalg.LinearOperator`. bounds : 2-tuple of array_like, optional Lower and upper bounds on independent variables. Defaults to no bounds. Each array must match the size of `x0` or be a scalar, in the latter case a bound will be the same for all variables. Use ``np.inf`` with an appropriate sign to disable bounds on all or some variables. method : {'trf', 'dogbox', 'lm'}, optional Algorithm to perform minimization. * 'trf' : Trust Region Reflective algorithm, particularly suitable for large sparse problems with bounds. Generally robust method. * 'dogbox' : dogleg algorithm with rectangular trust regions, typical use case is small problems with bounds. Not recommended for problems with rank-deficient Jacobian. * 'lm' : Levenberg-Marquardt algorithm as implemented in MINPACK. Doesn't handle bounds and sparse Jacobians. Usually the most efficient method for small unconstrained problems. Default is 'trf'. See Notes for more information. ftol : float, optional Tolerance for termination by the change of the cost function. Default is 1e-8. The optimization process is stopped when ``dF < ftol * F``, and there was an adequate agreement between a local quadratic model and the true model in the last step. xtol : float, optional Tolerance for termination by the change of the independent variables. Default is 1e-8. The exact condition depends on the `method` used: * For 'trf' and 'dogbox' : ``norm(dx) < xtol * (xtol + norm(x))`` * For 'lm' : ``Delta < xtol * norm(xs)``, where ``Delta`` is a trust-region radius and ``xs`` is the value of ``x`` scaled according to `x_scale` parameter (see below). gtol : float, optional Tolerance for termination by the norm of the gradient. Default is 1e-8. The exact condition depends on a `method` used: * For 'trf' : ``norm(g_scaled, ord=np.inf) < gtol``, where ``g_scaled`` is the value of the gradient scaled to account for the presence of the bounds [STIR]_. * For 'dogbox' : ``norm(g_free, ord=np.inf) < gtol``, where ``g_free`` is the gradient with respect to the variables which are not in the optimal state on the boundary. * For 'lm' : the maximum absolute value of the cosine of angles between columns of the Jacobian and the residual vector is less than `gtol`, or the residual vector is zero. x_scale : array_like or 'jac', optional Characteristic scale of each variable. Setting `x_scale` is equivalent to reformulating the problem in scaled variables ``xs = x / x_scale``. An alternative view is that the size of a trust region along j-th dimension is proportional to ``x_scale[j]``. Improved convergence may be achieved by setting `x_scale` such that a step of a given size along any of the scaled variables has a similar effect on the cost function. If set to 'jac', the scale is iteratively updated using the inverse norms of the columns of the Jacobian matrix (as described in [JJMore]_). loss : str or callable, optional Determines the loss function. The following keyword values are allowed: * 'linear' (default) : ``rho(z) = z``. Gives a standard least-squares problem. * 'soft_l1' : ``rho(z) = 2 * ((1 + z)**0.5 - 1)``. The smooth approximation of l1 (absolute value) loss. Usually a good choice for robust least squares. * 'huber' : ``rho(z) = z if z <= 1 else 2*z**0.5 - 1``. Works similarly to 'soft_l1'. * 'cauchy' : ``rho(z) = ln(1 + z)``. Severely weakens outliers influence, but may cause difficulties in optimization process. * 'arctan' : ``rho(z) = arctan(z)``. Limits a maximum loss on a single residual, has properties similar to 'cauchy'. If callable, it must take a 1-d ndarray ``z=f**2`` and return an array_like with shape (3, m) where row 0 contains function values, row 1 contains first derivatives and row 2 contains second derivatives. Method 'lm' supports only 'linear' loss. f_scale : float, optional Value of soft margin between inlier and outlier residuals, default is 1.0. The loss function is evaluated as follows ``rho_(f**2) = C**2 * rho(f**2 / C**2)``, where ``C`` is `f_scale`, and ``rho`` is determined by `loss` parameter. This parameter has no effect with ``loss='linear'``, but for other `loss` values it is of crucial importance. max_nfev : None or int, optional Maximum number of function evaluations before the termination. If None (default), the value is chosen automatically: * For 'trf' and 'dogbox' : 100 * n. * For 'lm' : 100 * n if `jac` is callable and 100 * n * (n + 1) otherwise (because 'lm' counts function calls in Jacobian estimation). diff_step : None or array_like, optional Determines the relative step size for the finite difference approximation of the Jacobian. The actual step is computed as ``x * diff_step``. If None (default), then `diff_step` is taken to be a conventional "optimal" power of machine epsilon for the finite difference scheme used [NR]_. tr_solver : {None, 'exact', 'lsmr'}, optional Method for solving trust-region subproblems, relevant only for 'trf' and 'dogbox' methods. * 'exact' is suitable for not very large problems with dense Jacobian matrices. The computational complexity per iteration is comparable to a singular value decomposition of the Jacobian matrix. * 'lsmr' is suitable for problems with sparse and large Jacobian matrices. It uses the iterative procedure `scipy.sparse.linalg.lsmr` for finding a solution of a linear least-squares problem and only requires matrix-vector product evaluations. If None (default) the solver is chosen based on the type of Jacobian returned on the first iteration. tr_options : dict, optional Keyword options passed to trust-region solver. * ``tr_solver='exact'``: `tr_options` are ignored. * ``tr_solver='lsmr'``: options for `scipy.sparse.linalg.lsmr`. Additionally ``method='trf'`` supports 'regularize' option (bool, default is True) which adds a regularization term to the normal equation, which improves convergence if the Jacobian is rank-deficient [Byrd]_ (eq. 3.4). jac_sparsity : {None, array_like, sparse matrix}, optional Defines the sparsity structure of the Jacobian matrix for finite difference estimation, its shape must be (m, n). If the Jacobian has only few non-zero elements in *each* row, providing the sparsity structure will greatly speed up the computations [Curtis]_. A zero entry means that a corresponding element in the Jacobian is identically zero. If provided, forces the use of 'lsmr' trust-region solver. If None (default) then dense differencing will be used. Has no effect for 'lm' method. verbose : {0, 1, 2}, optional Level of algorithm's verbosity: * 0 (default) : work silently. * 1 : display a termination report. * 2 : display progress during iterations (not supported by 'lm' method). args, kwargs : tuple and dict, optional Additional arguments passed to `fun` and `jac`. Both empty by default. The calling signature is ``fun(x, *args, **kwargs)`` and the same for `jac`. Returns ------- `OptimizeResult` with the following fields defined: x : ndarray, shape (n,) Solution found. cost : float Value of the cost function at the solution. fun : ndarray, shape (m,) Vector of residuals at the solution. jac : ndarray, sparse matrix or LinearOperator, shape (m, n) Modified Jacobian matrix at the solution, in the sense that J^T J is a Gauss-Newton approximation of the Hessian of the cost function. The type is the same as the one used by the algorithm. grad : ndarray, shape (m,) Gradient of the cost function at the solution. optimality : float First-order optimality measure. In unconstrained problems, it is always the uniform norm of the gradient. In constrained problems, it is the quantity which was compared with `gtol` during iterations. active_mask : ndarray of int, shape (n,) Each component shows whether a corresponding constraint is active (that is, whether a variable is at the bound): * 0 : a constraint is not active. * -1 : a lower bound is active. * 1 : an upper bound is active. Might be somewhat arbitrary for 'trf' method as it generates a sequence of strictly feasible iterates and `active_mask` is determined within a tolerance threshold. nfev : int Number of function evaluations done. Methods 'trf' and 'dogbox' do not count function calls for numerical Jacobian approximation, as opposed to 'lm' method. njev : int or None Number of Jacobian evaluations done. If numerical Jacobian approximation is used in 'lm' method, it is set to None. status : int The reason for algorithm termination: * -1 : improper input parameters status returned from MINPACK. * 0 : the maximum number of function evaluations is exceeded. * 1 : `gtol` termination condition is satisfied. * 2 : `ftol` termination condition is satisfied. * 3 : `xtol` termination condition is satisfied. * 4 : Both `ftol` and `xtol` termination conditions are satisfied. message : str Verbal description of the termination reason. success : bool True if one of the convergence criteria is satisfied (`status` > 0). See Also -------- leastsq : A legacy wrapper for the MINPACK implementation of the Levenberg-Marquadt algorithm. curve_fit : Least-squares minimization applied to a curve fitting problem. Notes ----- Method 'lm' (Levenberg-Marquardt) calls a wrapper over least-squares algorithms implemented in MINPACK (lmder, lmdif). It runs the Levenberg-Marquardt algorithm formulated as a trust-region type algorithm. The implementation is based on paper [JJMore]_, it is very robust and efficient with a lot of smart tricks. It should be your first choice for unconstrained problems. Note that it doesn't support bounds. Also it doesn't work when m < n. Method 'trf' (Trust Region Reflective) is motivated by the process of solving a system of equations, which constitute the first-order optimality condition for a bound-constrained minimization problem as formulated in [STIR]_. The algorithm iteratively solves trust-region subproblems augmented by a special diagonal quadratic term and with trust-region shape determined by the distance from the bounds and the direction of the gradient. This enhancements help to avoid making steps directly into bounds and efficiently explore the whole space of variables. To further improve convergence, the algorithm considers search directions reflected from the bounds. To obey theoretical requirements, the algorithm keeps iterates strictly feasible. With dense Jacobians trust-region subproblems are solved by an exact method very similar to the one described in [JJMore]_ (and implemented in MINPACK). The difference from the MINPACK implementation is that a singular value decomposition of a Jacobian matrix is done once per iteration, instead of a QR decomposition and series of Givens rotation eliminations. For large sparse Jacobians a 2-d subspace approach of solving trust-region subproblems is used [STIR]_, [Byrd]_. The subspace is spanned by a scaled gradient and an approximate Gauss-Newton solution delivered by `scipy.sparse.linalg.lsmr`. When no constraints are imposed the algorithm is very similar to MINPACK and has generally comparable performance. The algorithm works quite robust in unbounded and bounded problems, thus it is chosen as a default algorithm. Method 'dogbox' operates in a trust-region framework, but considers rectangular trust regions as opposed to conventional ellipsoids [Voglis]_. The intersection of a current trust region and initial bounds is again rectangular, so on each iteration a quadratic minimization problem subject to bound constraints is solved approximately by Powell's dogleg method [NumOpt]_. The required Gauss-Newton step can be computed exactly for dense Jacobians or approximately by `scipy.sparse.linalg.lsmr` for large sparse Jacobians. The algorithm is likely to exhibit slow convergence when the rank of Jacobian is less than the number of variables. The algorithm often outperforms 'trf' in bounded problems with a small number of variables. Robust loss functions are implemented as described in [BA]_. The idea is to modify a residual vector and a Jacobian matrix on each iteration such that computed gradient and Gauss-Newton Hessian approximation match the true gradient and Hessian approximation of the cost function. Then the algorithm proceeds in a normal way, i.e. robust loss functions are implemented as a simple wrapper over standard least-squares algorithms. .. versionadded:: 0.17.0 References ---------- .. [STIR] M. A. Branch, T. F. Coleman, and Y. Li, "A Subspace, Interior, and Conjugate Gradient Method for Large-Scale Bound-Constrained Minimization Problems," SIAM Journal on Scientific Computing, Vol. 21, Number 1, pp 1-23, 1999. .. [NR] William H. Press et. al., "Numerical Recipes. The Art of Scientific Computing. 3rd edition", Sec. 5.7. .. [Byrd] R. H. Byrd, R. B. Schnabel and G. A. Shultz, "Approximate solution of the trust region problem by minimization over two-dimensional subspaces", Math. Programming, 40, pp. 247-263, 1988. .. [Curtis] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of sparse Jacobian matrices", Journal of the Institute of Mathematics and its Applications, 13, pp. 117-120, 1974. .. [JJMore] J. J. More, "The Levenberg-Marquardt Algorithm: Implementation and Theory," Numerical Analysis, ed. G. A. Watson, Lecture Notes in Mathematics 630, Springer Verlag, pp. 105-116, 1977. .. [Voglis] C. Voglis and I. E. Lagaris, "A Rectangular Trust Region Dogleg Approach for Unconstrained and Bound Constrained Nonlinear Optimization", WSEAS International Conference on Applied Mathematics, Corfu, Greece, 2004. .. [NumOpt] J. Nocedal and S. J. Wright, "Numerical optimization, 2nd edition", Chapter 4. .. [BA] B. Triggs et. al., "Bundle Adjustment - A Modern Synthesis", Proceedings of the International Workshop on Vision Algorithms: Theory and Practice, pp. 298-372, 1999. Examples -------- In this example we find a minimum of the Rosenbrock function without bounds on independed variables. >>> def fun_rosenbrock(x): ... return np.array([10 * (x[1] - x[0]**2), (1 - x[0])]) Notice that we only provide the vector of the residuals. The algorithm constructs the cost function as a sum of squares of the residuals, which gives the Rosenbrock function. The exact minimum is at ``x = [1.0, 1.0]``. >>> from scipy.optimize import least_squares >>> x0_rosenbrock = np.array([2, 2]) >>> res_1 = least_squares(fun_rosenbrock, x0_rosenbrock) >>> res_1.x array([ 1., 1.]) >>> res_1.cost 9.8669242910846867e-30 >>> res_1.optimality 8.8928864934219529e-14 We now constrain the variables, in such a way that the previous solution becomes infeasible. Specifically, we require that ``x[1] >= 1.5``, and ``x[0]`` left unconstrained. To this end, we specify the `bounds` parameter to `least_squares` in the form ``bounds=([-np.inf, 1.5], np.inf)``. We also provide the analytic Jacobian: >>> def jac_rosenbrock(x): ... return np.array([ ... [-20 * x[0], 10], ... [-1, 0]]) Putting this all together, we see that the new solution lies on the bound: >>> res_2 = least_squares(fun_rosenbrock, x0_rosenbrock, jac_rosenbrock, ... bounds=([-np.inf, 1.5], np.inf)) >>> res_2.x array([ 1.22437075, 1.5 ]) >>> res_2.cost 0.025213093946805685 >>> res_2.optimality 1.5885401433157753e-07 Now we solve a system of equations (i.e., the cost function should be zero at a minimum) for a Broyden tridiagonal vector-valued function of 100000 variables: >>> def fun_broyden(x): ... f = (3 - x) * x + 1 ... f[1:] -= x[:-1] ... f[:-1] -= 2 * x[1:] ... return f The corresponding Jacobian matrix is sparse. We tell the algorithm to estimate it by finite differences and provide the sparsity structure of Jacobian to significantly speed up this process. >>> from scipy.sparse import lil_matrix >>> def sparsity_broyden(n): ... sparsity = lil_matrix((n, n), dtype=int) ... i = np.arange(n) ... sparsity[i, i] = 1 ... i = np.arange(1, n) ... sparsity[i, i - 1] = 1 ... i = np.arange(n - 1) ... sparsity[i, i + 1] = 1 ... return sparsity ... >>> n = 100000 >>> x0_broyden = -np.ones(n) ... >>> res_3 = least_squares(fun_broyden, x0_broyden, ... jac_sparsity=sparsity_broyden(n)) >>> res_3.cost 4.5687069299604613e-23 >>> res_3.optimality 1.1650454296851518e-11 Let's also solve a curve fitting problem using robust loss function to take care of outliers in the data. Define the model function as ``y = a + b * exp(c * t)``, where t is a predictor variable, y is an observation and a, b, c are parameters to estimate. First, define the function which generates the data with noise and outliers, define the model parameters, and generate data: >>> def gen_data(t, a, b, c, noise=0, n_outliers=0, random_state=0): ... y = a + b * np.exp(t * c) ... ... rnd = np.random.RandomState(random_state) ... error = noise * rnd.randn(t.size) ... outliers = rnd.randint(0, t.size, n_outliers) ... error[outliers] *= 10 ... ... return y + error ... >>> a = 0.5 >>> b = 2.0 >>> c = -1 >>> t_min = 0 >>> t_max = 10 >>> n_points = 15 ... >>> t_train = np.linspace(t_min, t_max, n_points) >>> y_train = gen_data(t_train, a, b, c, noise=0.1, n_outliers=3) Define function for computing residuals and initial estimate of parameters. >>> def fun(x, t, y): ... return x[0] + x[1] * np.exp(x[2] * t) - y ... >>> x0 = np.array([1.0, 1.0, 0.0]) Compute a standard least-squares solution: >>> res_lsq = least_squares(fun, x0, args=(t_train, y_train)) Now compute two solutions with two different robust loss functions. The parameter `f_scale` is set to 0.1, meaning that inlier residuals should not significantly exceed 0.1 (the noise level used). >>> res_soft_l1 = least_squares(fun, x0, loss='soft_l1', f_scale=0.1, ... args=(t_train, y_train)) >>> res_log = least_squares(fun, x0, loss='cauchy', f_scale=0.1, ... args=(t_train, y_train)) And finally plot all the curves. We see that by selecting an appropriate `loss` we can get estimates close to optimal even in the presence of strong outliers. But keep in mind that generally it is recommended to try 'soft_l1' or 'huber' losses first (if at all necessary) as the other two options may cause difficulties in optimization process. >>> t_test = np.linspace(t_min, t_max, n_points * 10) >>> y_true = gen_data(t_test, a, b, c) >>> y_lsq = gen_data(t_test, *res_lsq.x) >>> y_soft_l1 = gen_data(t_test, *res_soft_l1.x) >>> y_log = gen_data(t_test, *res_log.x) ... >>> import matplotlib.pyplot as plt >>> plt.plot(t_train, y_train, 'o') >>> plt.plot(t_test, y_true, 'k', linewidth=2, label='true') >>> plt.plot(t_test, y_lsq, label='linear loss') >>> plt.plot(t_test, y_soft_l1, label='soft_l1 loss') >>> plt.plot(t_test, y_log, label='cauchy loss') >>> plt.xlabel("t") >>> plt.ylabel("y") >>> plt.legend() >>> plt.show() """ if method not in ['trf', 'dogbox', 'lm']: raise ValueError("`method` must be 'trf', 'dogbox' or 'lm'.") if jac not in ['2-point', '3-point', 'cs'] and not callable(jac): raise ValueError("`jac` must be '2-point', '3-point', 'cs' or " "callable.") if tr_solver not in [None, 'exact', 'lsmr']: raise ValueError("`tr_solver` must be None, 'exact' or 'lsmr'.") if loss not in IMPLEMENTED_LOSSES and not callable(loss): raise ValueError("`loss` must be one of {0} or a callable." .format(IMPLEMENTED_LOSSES.keys())) if method == 'lm' and loss != 'linear': raise ValueError("method='lm' supports only 'linear' loss function.") if verbose not in [0, 1, 2]: raise ValueError("`verbose` must be in [0, 1, 2].") if len(bounds) != 2: raise ValueError("`bounds` must contain 2 elements.") if max_nfev is not None and max_nfev <= 0: raise ValueError("`max_nfev` must be None or positive integer.") x0 = np.atleast_1d(x0).astype(float) if x0.ndim > 1: raise ValueError("`x0` must have at most 1 dimension.") lb, ub = prepare_bounds(bounds, x0.shape[0]) if method == 'lm' and not np.all((lb == -np.inf) & (ub == np.inf)): raise ValueError("Method 'lm' doesn't support bounds.") if lb.shape != x0.shape or ub.shape != x0.shape: raise ValueError("Inconsistent shapes between bounds and `x0`.") if np.any(lb >= ub): raise ValueError("Each lower bound must be strictly less than each " "upper bound.") if not in_bounds(x0, lb, ub): raise ValueError("`x0` is infeasible.") x_scale = check_x_scale(x_scale, x0) ftol, xtol, gtol = check_tolerance(ftol, xtol, gtol) def fun_wrapped(x): return np.atleast_1d(fun(x, *args, **kwargs)) if method == 'trf': x0 = make_strictly_feasible(x0, lb, ub) f0 = fun_wrapped(x0) if f0.ndim != 1: raise ValueError("`fun` must return at most 1-d array_like.") if not np.all(np.isfinite(f0)): raise ValueError("Residuals are not finite in the initial point.") n = x0.size m = f0.size if method == 'lm' and m < n: raise ValueError("Method 'lm' doesn't work when the number of " "residuals is less than the number of variables.") loss_function = construct_loss_function(m, loss, f_scale) if callable(loss): rho = loss_function(f0) if rho.shape != (3, m): raise ValueError("The return value of `loss` callable has wrong " "shape.") initial_cost = 0.5 * np.sum(rho[0]) elif loss_function is not None: initial_cost = loss_function(f0, cost_only=True) else: initial_cost = 0.5 * np.dot(f0, f0) if callable(jac): J0 = jac(x0, *args, **kwargs) if issparse(J0): J0 = csr_matrix(J0) def jac_wrapped(x, _=None): return csr_matrix(jac(x, *args, **kwargs)) elif isinstance(J0, LinearOperator): def jac_wrapped(x, _=None): return jac(x, *args, **kwargs) else: J0 = np.atleast_2d(J0) def jac_wrapped(x, _=None): return np.atleast_2d(jac(x, *args, **kwargs)) else: # Estimate Jacobian by finite differences. if method == 'lm': if jac_sparsity is not None: raise ValueError("method='lm' does not support " "`jac_sparsity`.") if jac != '2-point': warn("jac='{0}' works equivalently to '2-point' " "for method='lm'.".format(jac)) J0 = jac_wrapped = None else: if jac_sparsity is not None and tr_solver == 'exact': raise ValueError("tr_solver='exact' is incompatible " "with `jac_sparsity`.") jac_sparsity = check_jac_sparsity(jac_sparsity, m, n) def jac_wrapped(x, f): J = approx_derivative(fun, x, rel_step=diff_step, method=jac, f0=f, bounds=bounds, args=args, kwargs=kwargs, sparsity=jac_sparsity) if J.ndim != 2: # J is guaranteed not sparse. J = np.atleast_2d(J) return J J0 = jac_wrapped(x0, f0) if J0 is not None: if J0.shape != (m, n): raise ValueError( "The return value of `jac` has wrong shape: expected {0}, " "actual {1}.".format((m, n), J0.shape)) if not isinstance(J0, np.ndarray): if method == 'lm': raise ValueError("method='lm' works only with dense " "Jacobian matrices.") if tr_solver == 'exact': raise ValueError( "tr_solver='exact' works only with dense " "Jacobian matrices.") jac_scale = isinstance(x_scale, string_types) and x_scale == 'jac' if isinstance(J0, LinearOperator) and jac_scale: raise ValueError("x_scale='jac' can't be used when `jac` " "returns LinearOperator.") if tr_solver is None: if isinstance(J0, np.ndarray): tr_solver = 'exact' else: tr_solver = 'lsmr' if method == 'lm': result = call_minpack(fun_wrapped, x0, jac_wrapped, ftol, xtol, gtol, max_nfev, x_scale, diff_step) elif method == 'trf': result = trf(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options.copy(), verbose) elif method == 'dogbox': if tr_solver == 'lsmr' and 'regularize' in tr_options: warn("The keyword 'regularize' in `tr_options` is not relevant " "for 'dogbox' method.") tr_options = tr_options.copy() del tr_options['regularize'] result = dogbox(fun_wrapped, jac_wrapped, x0, f0, J0, lb, ub, ftol, xtol, gtol, max_nfev, x_scale, loss_function, tr_solver, tr_options, verbose) result.message = TERMINATION_MESSAGES[result.status] result.success = result.status > 0 if verbose >= 1: print(result.message) print("Function evaluations {0}, initial cost {1:.4e}, final cost " "{2:.4e}, first-order optimality {3:.2e}." .format(result.nfev, initial_cost, result.cost, result.optimality)) return result
bsd-3-clause
mengxn/tensorflow
tensorflow/examples/tutorials/word2vec/word2vec_basic.py
28
9485
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Basic word2vec example.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import math import os import random import zipfile import numpy as np from six.moves import urllib from six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf # Step 1: Download the data. url = 'http://mattmahoney.net/dc/' def maybe_download(filename, expected_bytes): """Download a file if not present, and make sure it's the right size.""" if not os.path.exists(filename): filename, _ = urllib.request.urlretrieve(url + filename, filename) statinfo = os.stat(filename) if statinfo.st_size == expected_bytes: print('Found and verified', filename) else: print(statinfo.st_size) raise Exception( 'Failed to verify ' + filename + '. Can you get to it with a browser?') return filename filename = maybe_download('text8.zip', 31344016) # Read the data into a list of strings. def read_data(filename): """Extract the first file enclosed in a zip file as a list of words.""" with zipfile.ZipFile(filename) as f: data = tf.compat.as_str(f.read(f.namelist()[0])).split() return data vocabulary = read_data(filename) print('Data size', len(vocabulary)) # Step 2: Build the dictionary and replace rare words with UNK token. vocabulary_size = 50000 def build_dataset(words, n_words): """Process raw inputs into a dataset.""" count = [['UNK', -1]] count.extend(collections.Counter(words).most_common(n_words - 1)) dictionary = dict() for word, _ in count: dictionary[word] = len(dictionary) data = list() unk_count = 0 for word in words: if word in dictionary: index = dictionary[word] else: index = 0 # dictionary['UNK'] unk_count += 1 data.append(index) count[0][1] = unk_count reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys())) return data, count, dictionary, reversed_dictionary data, count, dictionary, reverse_dictionary = build_dataset(vocabulary, vocabulary_size) del vocabulary # Hint to reduce memory. print('Most common words (+UNK)', count[:5]) print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]]) data_index = 0 # Step 3: Function to generate a training batch for the skip-gram model. def generate_batch(batch_size, num_skips, skip_window): global data_index assert batch_size % num_skips == 0 assert num_skips <= 2 * skip_window batch = np.ndarray(shape=(batch_size), dtype=np.int32) labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32) span = 2 * skip_window + 1 # [ skip_window target skip_window ] buffer = collections.deque(maxlen=span) for _ in range(span): buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) for i in range(batch_size // num_skips): target = skip_window # target label at the center of the buffer targets_to_avoid = [skip_window] for j in range(num_skips): while target in targets_to_avoid: target = random.randint(0, span - 1) targets_to_avoid.append(target) batch[i * num_skips + j] = buffer[skip_window] labels[i * num_skips + j, 0] = buffer[target] buffer.append(data[data_index]) data_index = (data_index + 1) % len(data) # Backtrack a little bit to avoid skipping words in the end of a batch data_index = (data_index + len(data) - span) % len(data) return batch, labels batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1) for i in range(8): print(batch[i], reverse_dictionary[batch[i]], '->', labels[i, 0], reverse_dictionary[labels[i, 0]]) # Step 4: Build and train a skip-gram model. batch_size = 128 embedding_size = 128 # Dimension of the embedding vector. skip_window = 1 # How many words to consider left and right. num_skips = 2 # How many times to reuse an input to generate a label. # We pick a random validation set to sample nearest neighbors. Here we limit the # validation samples to the words that have a low numeric ID, which by # construction are also the most frequent. valid_size = 16 # Random set of words to evaluate similarity on. valid_window = 100 # Only pick dev samples in the head of the distribution. valid_examples = np.random.choice(valid_window, valid_size, replace=False) num_sampled = 64 # Number of negative examples to sample. graph = tf.Graph() with graph.as_default(): # Input data. train_inputs = tf.placeholder(tf.int32, shape=[batch_size]) train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1]) valid_dataset = tf.constant(valid_examples, dtype=tf.int32) # Ops and variables pinned to the CPU because of missing GPU implementation with tf.device('/cpu:0'): # Look up embeddings for inputs. embeddings = tf.Variable( tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0)) embed = tf.nn.embedding_lookup(embeddings, train_inputs) # Construct the variables for the NCE loss nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # Compute the average NCE loss for the batch. # tf.nce_loss automatically draws a new sample of the negative labels each # time we evaluate the loss. loss = tf.reduce_mean( tf.nn.nce_loss(weights=nce_weights, biases=nce_biases, labels=train_labels, inputs=embed, num_sampled=num_sampled, num_classes=vocabulary_size)) # Construct the SGD optimizer using a learning rate of 1.0. optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss) # Compute the cosine similarity between minibatch examples and all embeddings. norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True)) normalized_embeddings = embeddings / norm valid_embeddings = tf.nn.embedding_lookup( normalized_embeddings, valid_dataset) similarity = tf.matmul( valid_embeddings, normalized_embeddings, transpose_b=True) # Add variable initializer. init = tf.global_variables_initializer() # Step 5: Begin training. num_steps = 100001 with tf.Session(graph=graph) as session: # We must initialize all variables before we use them. init.run() print('Initialized') average_loss = 0 for step in xrange(num_steps): batch_inputs, batch_labels = generate_batch( batch_size, num_skips, skip_window) feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels} # We perform one update step by evaluating the optimizer op (including it # in the list of returned values for session.run() _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict) average_loss += loss_val if step % 2000 == 0: if step > 0: average_loss /= 2000 # The average loss is an estimate of the loss over the last 2000 batches. print('Average loss at step ', step, ': ', average_loss) average_loss = 0 # Note that this is expensive (~20% slowdown if computed every 500 steps) if step % 10000 == 0: sim = similarity.eval() for i in xrange(valid_size): valid_word = reverse_dictionary[valid_examples[i]] top_k = 8 # number of nearest neighbors nearest = (-sim[i, :]).argsort()[1:top_k + 1] log_str = 'Nearest to %s:' % valid_word for k in xrange(top_k): close_word = reverse_dictionary[nearest[k]] log_str = '%s %s,' % (log_str, close_word) print(log_str) final_embeddings = normalized_embeddings.eval() # Step 6: Visualize the embeddings. def plot_with_labels(low_dim_embs, labels, filename='tsne.png'): assert low_dim_embs.shape[0] >= len(labels), 'More labels than embeddings' plt.figure(figsize=(18, 18)) # in inches for i, label in enumerate(labels): x, y = low_dim_embs[i, :] plt.scatter(x, y) plt.annotate(label, xy=(x, y), xytext=(5, 2), textcoords='offset points', ha='right', va='bottom') plt.savefig(filename) try: # pylint: disable=g-import-not-at-top from sklearn.manifold import TSNE import matplotlib.pyplot as plt tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) plot_only = 500 low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only, :]) labels = [reverse_dictionary[i] for i in xrange(plot_only)] plot_with_labels(low_dim_embs, labels) except ImportError: print('Please install sklearn, matplotlib, and scipy to show embeddings.')
apache-2.0
fossdevil/Assignments
Machine Learning/Assignment3Final/ML4.py
1
3746
import numpy as np import scipy import matplotlib.pyplot as plt import random # N points in d dimensions def generatePoints(n,d): points = [] for i in range(0,n): point = np.random.normal(0,1,d); p = point**2; den = np.sqrt(sum(p)); point = list(point/den); points.append(point); return points; def interPointDistance(points,n,d): distMat = [] distance = 0; for i in range(0,n): disti = [] for j in range(0,n): distance = np.linalg.norm(list(np.asarray(points[i])-np.asarray(points[j]))); disti.append(distance); distMat.append(disti); return distMat; def projection(points,subspace,n): projPoint = [] subspacet = np.asmatrix(subspace); subspace = subspacet.T; for i in range(0,n): inv = np.linalg.inv(np.dot(subspacet,subspace)); proj = np.dot(np.dot(np.dot(subspace,inv),subspacet),points[i]); projPoint.append(proj); return projPoint; def subspaceGen(n,d): subspace = []; subv = np.zeros(d); r = np.arange(0,d); k = list(random.sample(r,n)); j = 0; for i in range(0,n): subv = np.zeros(d); subv[k[j]] = 1; j = j+1; subspace.append(subv); return subspace; n = 50; d = 200; points50 = generatePoints(n,d); distMat = interPointDistance(points50,n,d); print("Please open file \"Solution4.txt\":"); filename = "Solution4.txt" target = open(filename,'w'); target.write("The interpoint distance Matrix is as follows:\n"); for i in range(0,n): target.write(str(distMat[i])); target.write("\n"); target.write("\n"); target.write("\n"); target.write("\n"); subspaces1 = np.asmatrix(subspaceGen(1,d)); subspaces2 = np.asmatrix(subspaceGen(2,d)); subspaces3 = np.asmatrix(subspaceGen(3,d)); subspaces10 = np.asmatrix(subspaceGen(10,d)); subspaces50 = np.asmatrix(subspaceGen(50,d)); projPoint1 = projection(points50,subspaces1,n); projPoint2 = projection(points50,subspaces2,n); projPoint3 = projection(points50,subspaces3,n); projPoint10 = projection(points50,subspaces10,n); projPoint50 = projection(points50,subspaces50,n); distMat1 = interPointDistance(projPoint1,n,d); distMat2 = interPointDistance(projPoint2,n,d); distMat3 = interPointDistance(projPoint3,n,d); distMat10 = interPointDistance(projPoint10,n,d); distMat50 = interPointDistance(projPoint50,n,d); num = np.sqrt(1.0/200); diff1 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat1)); num = np.sqrt(2.0/200); diff2 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat2)); num = np.sqrt(3.0/200); diff3 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat3)); num = np.sqrt(10.0/200); diff10 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat10)); num = np.sqrt(50.0/200); diff50 = list((num*np.asmatrix(distMat))-np.asmatrix(distMat50)); target.write("Difference matrix is as follows:\n"); target.write("For k = 1"); target.write("\n"); for i in range(0,n): target.write(str(diff1[i])); target.write("\n"); target.write("\n"); target.write("\n"); target.write("\n"); target.write("For k = 2"); target.write("\n"); for i in range(0,n): target.write(str(diff2[i])); target.write("\n"); target.write("\n"); target.write("\n"); target.write("\n"); target.write("For k = 3"); target.write("\n"); for i in range(0,n): target.write(str(diff3[i])); target.write("\n"); target.write("\n"); target.write("\n"); target.write("\n"); target.write("For k = 10"); target.write("\n"); for i in range(0,n): target.write(str(diff10[i])); target.write("\n"); target.write("\n"); target.write("\n"); target.write("\n"); target.write("For k = 50"); target.write("\n"); for i in range(0,n): target.write(str(diff50[i])); target.write("\n"); target.close();
mit
t00mas/datascience-python
classification/knearest.py
1
1554
import matplotlib import matplotlib.pyplot as pyplot import numpy from matplotlib.colors import ListedColormap from sklearn import neighbors, datasets def get_iris_dataset(): iris = datasets.load_iris() return iris.data[:, :2], iris.target def get_knn_classifier(X, y, n_neighbors=None): if not n_neighbors: n_neighbors = 6 classifier = neighbors.KNeighborsClassifier(n_neighbors, weights='distance') classifier.fit(X, y) return classifier, n_neighbors def get_meshgrid(X, y, h=None): if not h: h = .02 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 return numpy.meshgrid( numpy.arange(x_min, x_max, h), numpy.arange(y_min, y_max, h)) def predict(classifier, mesh_xx, mesh_yy): Z = classifier.predict(numpy.c_[mesh_xx.ravel(), mesh_yy.ravel()]) return Z.reshape(mesh_xx.shape) def plot_classified_regions(X, y, classifier, n_neighbors): xx, yy = get_meshgrid(X, y) Z = predict(classifier, xx, yy) pyplot.figure() pyplot.pcolormesh(xx, yy, Z) # Plot also the training points cmap = ListedColormap(['#FFAAAA', '#AAFFAA','#00AAFF']) pyplot.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap, alpha=0.8) pyplot.xlim(xx.min(), xx.max()) pyplot.ylim(yy.min(), yy.max()) pyplot.title("3-Class classification (k = %i)" % (n_neighbors)) pyplot.savefig('knearest.png') X, y = get_iris_dataset() knn, n_neighbors = get_knn_classifier(X, y) plot_classified_regions(X, y, knn, n_neighbors)
mit
AndKyr/GETELEC
python/JFplot.py
1
1648
#! /usr/bin/python import numpy as np import getelec_mod as gt from mpl_toolkits.mplot3d import Axes3D import matplotlib.pyplot as plt import matplotlib as mb font = 30 # mb.rcParams["font.family"] = "Serif" mb.rcParams["font.size"] = font mb.rcParams["axes.labelsize"] = font mb.rcParams["xtick.labelsize"] = font mb.rcParams["ytick.labelsize"] = font mb.rcParams["legend.fontsize"] = font mb.rcParams["lines.linewidth"] = 2.5 fsize = (18,10) Npoints = 256 Temps = [1.e-2, 300, 800, 1500] Xfn = np.linspace(0.12, 0.35, 256) F = 1./Xfn Jem = np.copy(F) this = gt.emission_create(W = 4.5, R = 5000., approx = 2) fig1 = plt.figure(figsize=fsize) ax1 = fig1.gca() ax1.set_xlabel(r"$1/F$ [m GV$^{-1}$]") ax1.set_ylabel(r"$J$ [A nm$^{-2}$]") colors = plt.rcParams['axes.prop_cycle'].by_key()['color'] for i in range(len(Temps)): this.Temp = Temps[i] if (this.Temp < 10.): this.approx = -1 else: this.approx = 2 for j in range(len(F)): this.F = F[j] this.cur_dens() Jem[j] = this.Jem ax1.semilogy(Xfn,Jem, label = r'T = %d K'%this.Temp) # for i in range(len(Temps)): # this.Temp = Temps[i] # if (this.Temp < 10.): # this.approx = -1 # else: # this.approx = -1 # for j in range(len(F)): # this.F = F[j] # this.cur_dens() # Jem[j] = this.Jem # ax1.semilogy(Xfn,Jem, '--', color = colors[i], label = r'T = %d K'%this.Temp) # np.savetxt("J-F.dat", np.transpose(np.array([F,Jem])), delimiter = " ") ax1.grid() ax1.legend() plt.savefig("JFplot_Tparam.svg") plt.savefig("JFplot_Tparam.png") plt.show()
gpl-3.0
ipashchenko/emcee-x
document/plots/oned.py
16
2164
import os import sys import time import numpy as np import matplotlib.pyplot as pl import h5py from multiprocessing import Pool sys.path.append(os.path.abspath(os.path.join(__file__, "..", "..", ".."))) import emcee # import acor def lnprobfn(p, icov): return -0.5 * np.dot(p, np.dot(icov, p)) def random_cov(ndim, dof=1): v = np.random.randn(ndim * (ndim + dof)).reshape((ndim + dof, ndim)) return (sum([np.outer(v[i], v[i]) for i in range(ndim + dof)]) / (ndim + dof)) _rngs = {} def _worker(args): i, outfn, nsteps = args pid = os.getpid() _random = _rngs.get(pid, np.random.RandomState(int(int(pid) + time.time()))) _rngs[pid] = _random ndim = int(np.ceil(2 ** (7 * _random.rand()))) nwalkers = 2 * ndim + 2 # nwalkers += nwalkers % 2 print ndim, nwalkers cov = random_cov(ndim) icov = np.linalg.inv(cov) ens_samp = emcee.EnsembleSampler(nwalkers, ndim, lnprobfn, args=[icov]) ens_samp.random_state = _random.get_state() pos, lnprob, state = ens_samp.run_mcmc(np.random.randn(nwalkers * ndim) .reshape([nwalkers, ndim]), nsteps) proposal = np.diag(cov.diagonal()) mh_samp = emcee.MHSampler(proposal, ndim, lnprobfn, args=[icov]) mh_samp.random_state = state mh_samp.run_mcmc(np.random.randn(ndim), nsteps) f = h5py.File(outfn) f["data"][i, :] = np.array([ndim, np.mean(ens_samp.acor), np.mean(mh_samp.acor)]) f.close() def oned(): nsteps = 10000 niter = 10 nthreads = 2 outfn = os.path.join(os.path.split(__file__)[0], "gauss_scaling.h5") print outfn f = h5py.File(outfn, "w") f.create_dataset("data", (niter, 3), "f") f.close() pool = Pool(nthreads) pool.map(_worker, [(i, outfn, nsteps) for i in range(niter)]) f = h5py.File(outfn) data = f["data"][...] f.close() pl.clf() pl.plot(data[:, 0], data[:, 1], "ks", alpha=0.5) pl.plot(data[:, 0], data[:, 2], ".k", alpha=0.5) pl.savefig(os.path.join(os.path.split(__file__)[0], "gauss_scaling.png")) if __name__ == "__main__": oned()
mit
GGoussar/scikit-image
doc/examples/segmentation/plot_marked_watershed.py
9
1988
""" =============================== Markers for watershed transform =============================== The watershed is a classical algorithm used for **segmentation**, that is, for separating different objects in an image. Here a marker image is built from the region of low gradient inside the image. In a gradient image, the areas of high values provide barriers that help to segment the image. Using markers on the lower values will ensure that the segmented objects are found. See Wikipedia_ for more details on the algorithm. .. _Wikipedia: http://en.wikipedia.org/wiki/Watershed_(image_processing) """ from scipy import ndimage as ndi import matplotlib.pyplot as plt from skimage.morphology import watershed, disk from skimage import data from skimage.filters import rank from skimage.util import img_as_ubyte image = img_as_ubyte(data.camera()) # denoise image denoised = rank.median(image, disk(2)) # find continuous region (low gradient - # where less than 10 for this image) --> markers # disk(5) is used here to get a more smooth image markers = rank.gradient(denoised, disk(5)) < 10 markers = ndi.label(markers)[0] # local gradient (disk(2) is used to keep edges thin) gradient = rank.gradient(denoised, disk(2)) # process the watershed labels = watershed(gradient, markers) # display results fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 8), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'}) ax = axes.ravel() ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest') ax[0].set_title("Original") ax[1].imshow(gradient, cmap=plt.cm.spectral, interpolation='nearest') ax[1].set_title("Local Gradient") ax[2].imshow(markers, cmap=plt.cm.spectral, interpolation='nearest') ax[2].set_title("Markers") ax[3].imshow(image, cmap=plt.cm.gray, interpolation='nearest') ax[3].imshow(labels, cmap=plt.cm.spectral, interpolation='nearest', alpha=.7) ax[3].set_title("Segmented") for a in ax: a.axis('off') fig.tight_layout() plt.show()
bsd-3-clause
karlnapf/kameleon-mcmc
kameleon_mcmc/tools/Visualise.py
1
5656
""" This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. Written (W) 2013 Heiko Strathmann Written (W) 2013 Dino Sejdinovic """ from kameleon_mcmc.distribution.Gaussian import Gaussian from matplotlib.patches import Ellipse from matplotlib.pyplot import imshow, ylim, xlim, contour, plot, hold, gca from numpy import linspace from numpy.linalg.linalg import eigh from numpy import zeros, array, exp, arctan2, sqrt import numpy class Visualise(object): def __init__(self): pass @staticmethod def get_plotting_arrays(distribution): bounds = distribution.get_plotting_bounds() assert(len(bounds) == 2) Xs = linspace(bounds[0][0], bounds[0][1]) Ys = linspace(bounds[1][0], bounds[1][1]) return Xs, Ys @staticmethod def visualise_distribution(distribution, Z=None, log_density=False, Xs=None, Ys=None): """ Plots the density of a given Distribution instance and plots some samples on top. """ if Xs is None or Ys is None: Xs, Ys = Visualise.get_plotting_arrays(distribution) Visualise.plot_density(distribution, Xs, Ys) if Z is not None: hold(True) Visualise.plot_data(Z) hold(False) @staticmethod def plot_density(distribution, Xs, Ys, log_domain=False): """ Plots a 2D density density - density - distribution instance to plot Xs - x values the density is evaluated at Ys - y values the density is evaluated at log_domain - if False, density will be put into exponential function """ assert(distribution.dimension == 2) D = zeros((len(Xs), len(Ys))) # compute log-density for i in range(len(Xs)): for j in range(len(Ys)): x = array([[Xs[i], Ys[j]]]) D[j, i] = distribution.log_pdf(x) if log_domain == False: D = exp(D) im = imshow(D, origin='lower') im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()]) im.set_interpolation('nearest') im.set_cmap('gray') ylim([Ys.min(), Ys.max()]) xlim([Xs.min(), Xs.max()]) @staticmethod def contour_plot_density(distribution, Xs=None, Ys=None, log_domain=False): """ Contour-plots a 2D density. If Gaussian, plots 1.96 interval contour only density - distribution instance to plot Xs - x values the density is evaluated at Ys - y values the density is evaluated at log_domain - if False, density will be put into exponential function """ if isinstance(distribution, Gaussian) and log_domain == False: gca().add_artist(Visualise.get_gaussian_ellipse_artist(distribution)) gca().plot(distribution.mu[0], distribution.mu[1], 'r*', \ markersize=3.0, markeredgewidth=.1) return assert(distribution.dimension == 2) if Xs is None: (xmin, xmax), _ = distribution.get_plotting_bounds() Xs = linspace(xmin, xmax) if Ys is None: _, (ymin, ymax) = distribution.get_plotting_bounds() Ys = linspace(ymin, ymax) D = zeros((len(Ys), len(Xs))) # compute log-density for i in range(len(Xs)): for j in range(len(Ys)): x = array([[Xs[i], Ys[j]]]) D[j, i] = distribution.log_pdf(x) if log_domain == False: D = exp(D) contour(Xs, Ys, D, origin='lower') @staticmethod def plot_array(Xs, Ys, D): """ Plots a 2D array Xs - x values the density is evaluated at Ys - y values the density is evaluated at D - array to plot """ im = imshow(D, origin='lower') im.set_extent([Xs.min(), Xs.max(), Ys.min(), Ys.max()]) im.set_interpolation('nearest') im.set_cmap('gray') ylim([Ys.min(), Ys.max()]) xlim([Xs.min(), Xs.max()]) @staticmethod def plot_data(Z, y=None): """ Plots collection of 2D points and optionally adds a marker to one of them Z - set of row-vectors points to plot y - one point that is marked in red, might be None """ plot(Z[:, 0], Z[:, 1], '*', markersize=3.0, markeredgewidth=.1) if y is not None: plot(y[0, 0], y[0, 1], 'r*', markersize=10.0, markeredgewidth=.1) @staticmethod def get_gaussian_ellipse_artist(gaussian, nstd=1.96, linewidth=1): """ Returns an allipse artist for nstd times the standard deviation of this Gaussian """ assert(isinstance(gaussian, Gaussian)) assert(gaussian.dimension == 2) # compute eigenvalues (ordered) vals, vecs = eigh(gaussian.L.dot(gaussian.L.T)) order = vals.argsort()[::-1] vals, vecs = vals[order], vecs[:, order] theta = numpy.degrees(arctan2(*vecs[:, 0][::-1])) # width and height are "full" widths, not radius width, height = 2 * nstd * sqrt(vals) e = Ellipse(xy=gaussian.mu, width=width, height=height, angle=theta, \ edgecolor="red", fill=False, linewidth=linewidth) return e
bsd-2-clause
johankaito/fufuka
microblog/flask/venv/lib/python2.7/site-packages/scipy/stats/_multivariate.py
17
69089
# # Author: Joris Vankerschaver 2013 # from __future__ import division, print_function, absolute_import import numpy as np import scipy.linalg from scipy.misc import doccer from scipy.special import gammaln, psi, multigammaln from scipy._lib._util import check_random_state __all__ = ['multivariate_normal', 'dirichlet', 'wishart', 'invwishart'] _LOG_2PI = np.log(2 * np.pi) _LOG_2 = np.log(2) _LOG_PI = np.log(np.pi) def _process_parameters(dim, mean, cov): """ Infer dimensionality from mean or covariance matrix, ensure that mean and covariance are full vector resp. matrix. """ # Try to infer dimensionality if dim is None: if mean is None: if cov is None: dim = 1 else: cov = np.asarray(cov, dtype=float) if cov.ndim < 2: dim = 1 else: dim = cov.shape[0] else: mean = np.asarray(mean, dtype=float) dim = mean.size else: if not np.isscalar(dim): raise ValueError("Dimension of random variable must be a scalar.") # Check input sizes and return full arrays for mean and cov if necessary if mean is None: mean = np.zeros(dim) mean = np.asarray(mean, dtype=float) if cov is None: cov = 1.0 cov = np.asarray(cov, dtype=float) if dim == 1: mean.shape = (1,) cov.shape = (1, 1) if mean.ndim != 1 or mean.shape[0] != dim: raise ValueError("Array 'mean' must be a vector of length %d." % dim) if cov.ndim == 0: cov = cov * np.eye(dim) elif cov.ndim == 1: cov = np.diag(cov) elif cov.ndim == 2 and cov.shape != (dim, dim): rows, cols = cov.shape if rows != cols: msg = ("Array 'cov' must be square if it is two dimensional," " but cov.shape = %s." % str(cov.shape)) else: msg = ("Dimension mismatch: array 'cov' is of shape %s," " but 'mean' is a vector of length %d.") msg = msg % (str(cov.shape), len(mean)) raise ValueError(msg) elif cov.ndim > 2: raise ValueError("Array 'cov' must be at most two-dimensional," " but cov.ndim = %d" % cov.ndim) return dim, mean, cov def _process_quantiles(x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x[np.newaxis] elif x.ndim == 1: if dim == 1: x = x[:, np.newaxis] else: x = x[np.newaxis, :] return x def _squeeze_output(out): """ Remove single-dimensional entries from array and convert to scalar, if necessary. """ out = out.squeeze() if out.ndim == 0: out = out[()] return out def _eigvalsh_to_eps(spectrum, cond=None, rcond=None): """ Determine which eigenvalues are "small" given the spectrum. This is for compatibility across various linear algebra functions that should agree about whether or not a Hermitian matrix is numerically singular and what is its numerical matrix rank. This is designed to be compatible with scipy.linalg.pinvh. Parameters ---------- spectrum : 1d ndarray Array of eigenvalues of a Hermitian matrix. cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. Returns ------- eps : float Magnitude cutoff for numerical negligibility. """ if rcond is not None: cond = rcond if cond in [None, -1]: t = spectrum.dtype.char.lower() factor = {'f': 1E3, 'd': 1E6} cond = factor[t] * np.finfo(t).eps eps = cond * np.max(abs(spectrum)) return eps def _pinv_1d(v, eps=1e-5): """ A helper function for computing the pseudoinverse. Parameters ---------- v : iterable of numbers This may be thought of as a vector of eigenvalues or singular values. eps : float Values with magnitude no greater than eps are considered negligible. Returns ------- v_pinv : 1d float ndarray A vector of pseudo-inverted numbers. """ return np.array([0 if abs(x) <= eps else 1/x for x in v], dtype=float) class _PSD(object): """ Compute coordinated functions of a symmetric positive semidefinite matrix. This class addresses two issues. Firstly it allows the pseudoinverse, the logarithm of the pseudo-determinant, and the rank of the matrix to be computed using one call to eigh instead of three. Secondly it allows these functions to be computed in a way that gives mutually compatible results. All of the functions are computed with a common understanding as to which of the eigenvalues are to be considered negligibly small. The functions are designed to coordinate with scipy.linalg.pinvh() but not necessarily with np.linalg.det() or with np.linalg.matrix_rank(). Parameters ---------- M : array_like Symmetric positive semidefinite matrix (2-D). cond, rcond : float, optional Cutoff for small eigenvalues. Singular values smaller than rcond * largest_eigenvalue are considered zero. If None or -1, suitable machine precision is used. lower : bool, optional Whether the pertinent array data is taken from the lower or upper triangle of M. (Default: lower) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. allow_singular : bool, optional Whether to allow a singular matrix. (Default: True) Notes ----- The arguments are similar to those of scipy.linalg.pinvh(). """ def __init__(self, M, cond=None, rcond=None, lower=True, check_finite=True, allow_singular=True): # Compute the symmetric eigendecomposition. # Note that eigh takes care of array conversion, chkfinite, # and assertion that the matrix is square. s, u = scipy.linalg.eigh(M, lower=lower, check_finite=check_finite) eps = _eigvalsh_to_eps(s, cond, rcond) if np.min(s) < -eps: raise ValueError('the input matrix must be positive semidefinite') d = s[s > eps] if len(d) < len(s) and not allow_singular: raise np.linalg.LinAlgError('singular matrix') s_pinv = _pinv_1d(s, eps) U = np.multiply(u, np.sqrt(s_pinv)) # Initialize the eagerly precomputed attributes. self.rank = len(d) self.U = U self.log_pdet = np.sum(np.log(d)) # Initialize an attribute to be lazily computed. self._pinv = None @property def pinv(self): if self._pinv is None: self._pinv = np.dot(self.U, self.U.T) return self._pinv _doc_default_callparams = """\ mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional Whether to allow a singular covariance matrix. (Default: False) """ _doc_callparams_note = \ """Setting the parameter `mean` to `None` is equivalent to having `mean` be the zero-vector. The parameter `cov` can be a scalar, in which case the covariance matrix is the identity times that value, a vector of diagonal entries for the covariance matrix, or a two-dimensional array_like. """ _doc_random_state = """\ random_state : None or int or np.random.RandomState instance, optional If int or RandomState, use it for drawing the random variates. If None (or np.random), the global np.random state is used. Default is None. """ _doc_frozen_callparams = "" _doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" docdict_params = { '_doc_default_callparams': _doc_default_callparams, '_doc_callparams_note': _doc_callparams_note, '_doc_random_state': _doc_random_state } docdict_noparams = { '_doc_default_callparams': _doc_frozen_callparams, '_doc_callparams_note': _doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class multi_rv_generic(object): """ Class which encapsulates common functionality between all multivariate distributions. """ def __init__(self, seed=None): super(multi_rv_generic, self).__init__() self._random_state = check_random_state(seed) @property def random_state(self): """ Get or set the RandomState object for generating random variates. This can be either None or an existing RandomState object. If None (or np.random), use the RandomState singleton used by np.random. If already a RandomState instance, use it. If an int, use a new RandomState instance seeded with seed. """ return self._random_state @random_state.setter def random_state(self, seed): self._random_state = check_random_state(seed) def _get_random_state(self, random_state): if random_state is not None: return check_random_state(random_state) else: return self._random_state class multi_rv_frozen(object): """ Class which encapsulates common functionality between all frozen multivariate distributions. """ @property def random_state(self): return self._dist._random_state @random_state.setter def random_state(self, seed): self._dist._random_state = check_random_state(seed) class multivariate_normal_gen(multi_rv_generic): r""" A multivariate normal random variable. The `mean` keyword specifies the mean. The `cov` keyword specifies the covariance matrix. Methods ------- ``pdf(x, mean=None, cov=1, allow_singular=False)`` Probability density function. ``logpdf(x, mean=None, cov=1, allow_singular=False)`` Log of the probability density function. ``rvs(mean=None, cov=1, size=1, random_state=None)`` Draw random samples from a multivariate normal distribution. ``entropy()`` Compute the differential entropy of the multivariate normal. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the mean and covariance parameters, returning a "frozen" multivariate normal random variable: rv = multivariate_normal(mean=None, cov=1, allow_singular=False) - Frozen object with the same methods but holding the given mean and covariance fixed. Notes ----- %(_doc_callparams_note)s The covariance matrix `cov` must be a (symmetric) positive semi-definite matrix. The determinant and inverse of `cov` are computed as the pseudo-determinant and pseudo-inverse, respectively, so that `cov` does not need to have full rank. The probability density function for `multivariate_normal` is .. math:: f(x) = \frac{1}{\sqrt{(2 \pi)^k \det \Sigma}} \exp\left( -\frac{1}{2} (x - \mu)^T \Sigma^{-1} (x - \mu) \right), where :math:`\mu` is the mean, :math:`\Sigma` the covariance matrix, and :math:`k` is the dimension of the space where :math:`x` takes values. .. versionadded:: 0.14.0 Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import multivariate_normal >>> x = np.linspace(0, 5, 10, endpoint=False) >>> y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y array([ 0.00108914, 0.01033349, 0.05946514, 0.20755375, 0.43939129, 0.56418958, 0.43939129, 0.20755375, 0.05946514, 0.01033349]) >>> fig1 = plt.figure() >>> ax = fig1.add_subplot(111) >>> ax.plot(x, y) The input quantiles can be any shape of array, as long as the last axis labels the components. This allows us for instance to display the frozen pdf for a non-isotropic random variable in 2D as follows: >>> x, y = np.mgrid[-1:1:.01, -1:1:.01] >>> pos = np.empty(x.shape + (2,)) >>> pos[:, :, 0] = x; pos[:, :, 1] = y >>> rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]) >>> fig2 = plt.figure() >>> ax2 = fig2.add_subplot(111) >>> ax2.contourf(x, y, rv.pdf(pos)) """ def __init__(self, seed=None): super(multivariate_normal_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, docdict_params) def __call__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. See `multivariate_normal_frozen` for more information. """ return multivariate_normal_frozen(mean, cov, allow_singular=allow_singular, seed=seed) def _logpdf(self, x, mean, prec_U, log_det_cov, rank): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function mean : ndarray Mean of the distribution prec_U : ndarray A decomposition such that np.dot(prec_U, prec_U.T) is the precision matrix, i.e. inverse of the covariance matrix. log_det_cov : float Logarithm of the determinant of the covariance matrix rank : int Rank of the covariance matrix. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ dev = x - mean maha = np.sum(np.square(np.dot(dev, prec_U)), axis=-1) return -0.5 * (rank * _LOG_2PI + log_det_cov + maha) def logpdf(self, x, mean, cov, allow_singular=False): """ Log of the multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) x = _process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank) return _squeeze_output(out) def pdf(self, x, mean, cov, allow_singular=False): """ Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) x = _process_quantiles(x, dim) psd = _PSD(cov, allow_singular=allow_singular) out = np.exp(self._logpdf(x, mean, psd.U, psd.log_pdet, psd.rank)) return _squeeze_output(out) def rvs(self, mean=None, cov=1, size=1, random_state=None): """ Draw random samples from a multivariate normal distribution. Parameters ---------- %(_doc_default_callparams)s size : integer, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) random_state = self._get_random_state(random_state) out = random_state.multivariate_normal(mean, cov, size) return _squeeze_output(out) def entropy(self, mean=None, cov=1): """ Compute the differential entropy of the multivariate normal. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the multivariate normal distribution Notes ----- %(_doc_callparams_note)s """ dim, mean, cov = _process_parameters(None, mean, cov) _, logdet = np.linalg.slogdet(2 * np.pi * np.e * cov) return 0.5 * logdet multivariate_normal = multivariate_normal_gen() class multivariate_normal_frozen(multi_rv_frozen): def __init__(self, mean=None, cov=1, allow_singular=False, seed=None): """ Create a frozen multivariate normal distribution. Parameters ---------- mean : array_like, optional Mean of the distribution (default zero) cov : array_like, optional Covariance matrix of the distribution (default one) allow_singular : bool, optional If this flag is True then tolerate a singular covariance matrix (default False). seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. Examples -------- When called with the default parameters, this will create a 1D random variable with mean 0 and covariance 1: >>> from scipy.stats import multivariate_normal >>> r = multivariate_normal() >>> r.mean array([ 0.]) >>> r.cov array([[1.]]) """ self.dim, self.mean, self.cov = _process_parameters(None, mean, cov) self.cov_info = _PSD(self.cov, allow_singular=allow_singular) self._dist = multivariate_normal_gen(seed) def logpdf(self, x): x = _process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.mean, self.cov_info.U, self.cov_info.log_pdet, self.cov_info.rank) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.mean, self.cov, size, random_state) def entropy(self): """ Computes the differential entropy of the multivariate normal. Returns ------- h : scalar Entropy of the multivariate normal distribution """ log_pdet = self.cov_info.log_pdet rank = self.cov_info.rank return 0.5 * (rank * (_LOG_2PI + 1) + log_pdet) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs']: method = multivariate_normal_gen.__dict__[name] method_frozen = multivariate_normal_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat(method.__doc__, docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, docdict_params) _dirichlet_doc_default_callparams = """\ alpha : array_like The concentration parameters. The number of entries determines the dimensionality of the distribution. """ _dirichlet_doc_frozen_callparams = "" _dirichlet_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" dirichlet_docdict_params = { '_dirichlet_doc_default_callparams': _dirichlet_doc_default_callparams, '_doc_random_state': _doc_random_state } dirichlet_docdict_noparams = { '_dirichlet_doc_default_callparams': _dirichlet_doc_frozen_callparams, '_doc_random_state': _doc_random_state } def _dirichlet_check_parameters(alpha): alpha = np.asarray(alpha) if np.min(alpha) <= 0: raise ValueError("All parameters must be greater than 0") elif alpha.ndim != 1: raise ValueError("Parameter vector 'a' must be one dimensional, " + "but a.shape = %s." % str(alpha.shape)) return alpha def _dirichlet_check_input(alpha, x): x = np.asarray(x) if x.shape[0] + 1 != alpha.shape[0] and x.shape[0] != alpha.shape[0]: raise ValueError("Vector 'x' must have one entry less then the" + " parameter vector 'a', but alpha.shape = " + "%s and " % alpha.shape + "x.shape = %s." % x.shape) if x.shape[0] != alpha.shape[0]: xk = np.array([1 - np.sum(x, 0)]) if xk.ndim == 1: x = np.append(x, xk) elif xk.ndim == 2: x = np.vstack((x, xk)) else: raise ValueError("The input must be one dimensional or a two " "dimensional matrix containing the entries.") if np.min(x) < 0: raise ValueError("Each entry in 'x' must be greater or equal zero.") if np.max(x) > 1: raise ValueError("Each entry in 'x' must be smaller or equal one.") if (np.abs(np.sum(x, 0) - 1.0) > 10e-10).any(): raise ValueError("The input vector 'x' must lie within the normal " + "simplex. but sum(x)=%f." % np.sum(x, 0)) return x def _lnB(alpha): r""" Internal helper function to compute the log of the useful quotient .. math:: B(\alpha) = \frac{\prod_{i=1}{K}\Gamma(\alpha_i)}{\Gamma\left(\sum_{i=1}^{K}\alpha_i\right)} Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- B : scalar Helper quotient, internal use only """ return np.sum(gammaln(alpha)) - gammaln(np.sum(alpha)) class dirichlet_gen(multi_rv_generic): r""" A Dirichlet random variable. The `alpha` keyword specifies the concentration parameters of the distribution. .. versionadded:: 0.15.0 Methods ------- ``pdf(x, alpha)`` Probability density function. ``logpdf(x, alpha)`` Log of the probability density function. ``rvs(alpha, size=1, random_state=None)`` Draw random samples from a Dirichlet distribution. ``mean(alpha)`` The mean of the Dirichlet distribution ``var(alpha)`` The variance of the Dirichlet distribution ``entropy(alpha)`` Compute the differential entropy of the multivariate normal. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix concentration parameters, returning a "frozen" Dirichlet random variable: rv = dirichlet(alpha) - Frozen object with the same methods but holding the given concentration parameters fixed. Notes ----- Each :math:`\alpha` entry must be positive. The distribution has only support on the simplex defined by .. math:: \sum_{i=1}^{K} x_i \le 1 The probability density function for `dirichlet` is .. math:: f(x) = \frac{1}{\mathrm{B}(\boldsymbol\alpha)} \prod_{i=1}^K x_i^{\alpha_i - 1} where .. math:: \mathrm{B}(\boldsymbol\alpha) = \frac{\prod_{i=1}^K \Gamma(\alpha_i)} {\Gamma\bigl(\sum_{i=1}^K \alpha_i\bigr)} and :math:`\boldsymbol\alpha=(\alpha_1,\ldots,\alpha_K)`, the concentration parameters and :math:`K` is the dimension of the space where :math:`x` takes values. """ def __init__(self, seed=None): super(dirichlet_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, dirichlet_docdict_params) def __call__(self, alpha, seed=None): return dirichlet_frozen(alpha, seed=seed) def _logpdf(self, x, alpha): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function %(_dirichlet_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ lnB = _lnB(alpha) return - lnB + np.sum((np.log(x.T) * (alpha - 1)).T, 0) def logpdf(self, x, alpha): """ Log of the Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = self._logpdf(x, alpha) return _squeeze_output(out) def pdf(self, x, alpha): """ The Dirichlet probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_dirichlet_doc_default_callparams)s Returns ------- pdf : ndarray The probability density function evaluated at `x`. """ alpha = _dirichlet_check_parameters(alpha) x = _dirichlet_check_input(alpha, x) out = np.exp(self._logpdf(x, alpha)) return _squeeze_output(out) def mean(self, alpha): """ Compute the mean of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- mu : scalar Mean of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) out = alpha / (np.sum(alpha)) return _squeeze_output(out) def var(self, alpha): """ Compute the variance of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- v : scalar Variance of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) out = (alpha * (alpha0 - alpha)) / ((alpha0 * alpha0) * (alpha0 + 1)) return out def entropy(self, alpha): """ Compute the differential entropy of the dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s Returns ------- h : scalar Entropy of the Dirichlet distribution """ alpha = _dirichlet_check_parameters(alpha) alpha0 = np.sum(alpha) lnB = _lnB(alpha) K = alpha.shape[0] out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum( (alpha - 1) * scipy.special.psi(alpha)) return _squeeze_output(out) def rvs(self, alpha, size=1, random_state=None): """ Draw random samples from a Dirichlet distribution. Parameters ---------- %(_dirichlet_doc_default_callparams)s size : int, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray or scalar Random variates of size (`size`, `N`), where `N` is the dimension of the random variable. """ alpha = _dirichlet_check_parameters(alpha) random_state = self._get_random_state(random_state) return random_state.dirichlet(alpha, size=size) dirichlet = dirichlet_gen() class dirichlet_frozen(multi_rv_frozen): def __init__(self, alpha, seed=None): self.alpha = _dirichlet_check_parameters(alpha) self._dist = dirichlet_gen(seed) def logpdf(self, x): return self._dist.logpdf(x, self.alpha) def pdf(self, x): return self._dist.pdf(x, self.alpha) def mean(self): return self._dist.mean(self.alpha) def var(self): return self._dist.var(self.alpha) def entropy(self): return self._dist.entropy(self.alpha) def rvs(self, size=1, random_state=None): return self._dist.rvs(self.alpha, size, random_state) # Set frozen generator docstrings from corresponding docstrings in # multivariate_normal_gen and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'rvs', 'mean', 'var', 'entropy']: method = dirichlet_gen.__dict__[name] method_frozen = dirichlet_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, dirichlet_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, dirichlet_docdict_params) _wishart_doc_default_callparams = """\ df : int Degrees of freedom, must be greater than or equal to dimension of the scale matrix scale : array_like Symmetric positive definite scale matrix of the distribution """ _wishart_doc_callparams_note = "" _wishart_doc_frozen_callparams = "" _wishart_doc_frozen_callparams_note = \ """See class definition for a detailed description of parameters.""" wishart_docdict_params = { '_doc_default_callparams': _wishart_doc_default_callparams, '_doc_callparams_note': _wishart_doc_callparams_note, '_doc_random_state': _doc_random_state } wishart_docdict_noparams = { '_doc_default_callparams': _wishart_doc_frozen_callparams, '_doc_callparams_note': _wishart_doc_frozen_callparams_note, '_doc_random_state': _doc_random_state } class wishart_gen(multi_rv_generic): r""" A Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal precision matrix (the inverse of the covariance matrix). Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from a Wishart distribution. ``entropy()`` Compute the differential entropy of the Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" Wishart random variable: rv = wishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- invwishart, chi2 Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The Wishart distribution is often denoted .. math:: W_p(\nu, \Sigma) where :math:`\nu` is the degrees of freedom and :math:`\Sigma` is the :math:`p \times p` scale matrix. The probability density function for `wishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|S|^{\frac{\nu - p - 1}{2}}}{2^{ \frac{\nu p}{2} } |\Sigma|^\frac{\nu}{2} \Gamma_p \left ( \frac{\nu}{2} \right )} \exp\left( -tr(\Sigma^{-1} S) / 2 \right) If :math:`S \sim W_p(\nu, \Sigma)` (Wishart) then :math:`S^{-1} \sim W_p^{-1}(\nu, \Sigma^{-1})` (inverse Wishart). If the scale matrix is 1-dimensional and equal to one, then the Wishart distribution :math:`W_1(\nu, 1)` collapses to the :math:`\chi^2(\nu)` distribution. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] W.B. Smith and R.R. Hocking, "Algorithm AS 53: Wishart Variate Generator", Applied Statistics, vol. 21, pp. 341-345, 1972. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import wishart, chi2 >>> x = np.linspace(1e-5, 8, 100) >>> w = wishart.pdf(x, df=3, scale=1); w[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> c = chi2.pdf(x, 3); c[:5] array([ 0.00126156, 0.10892176, 0.14793434, 0.17400548, 0.1929669 ]) >>> plt.plot(x, w) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(wishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen Wishart distribution. See `wishart_frozen` for more information. """ return wishart_frozen(df, scale, seed) def _process_parameters(self, df, scale): if scale is None: scale = 1.0 scale = np.asarray(scale, dtype=float) if scale.ndim == 0: scale = scale[np.newaxis,np.newaxis] elif scale.ndim == 1: scale = np.diag(scale) elif scale.ndim == 2 and not scale.shape[0] == scale.shape[1]: raise ValueError("Array 'scale' must be square if it is two" " dimensional, but scale.scale = %s." % str(scale.shape)) elif scale.ndim > 2: raise ValueError("Array 'scale' must be at most two-dimensional," " but scale.ndim = %d" % scale.ndim) dim = scale.shape[0] if df is None: df = dim elif not np.isscalar(df): raise ValueError("Degrees of freedom must be a scalar.") elif df < dim: raise ValueError("Degrees of freedom cannot be less than dimension" " of scale matrix, but df = %d" % df) return dim, df, scale def _process_quantiles(self, x, dim): """ Adjust quantiles array so that last axis labels the components of each data point. """ x = np.asarray(x, dtype=float) if x.ndim == 0: x = x * np.eye(dim)[:, :, np.newaxis] if x.ndim == 1: if dim == 1: x = x[np.newaxis, np.newaxis, :] else: x = np.diag(x)[:, :, np.newaxis] elif x.ndim == 2: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square if they are two" " dimensional, but x.shape = %s." % str(x.shape)) x = x[:, :, np.newaxis] elif x.ndim == 3: if not x.shape[0] == x.shape[1]: raise ValueError("Quantiles must be square in the first two" " dimensions if they are three dimensional" ", but x.shape = %s." % str(x.shape)) elif x.ndim > 3: raise ValueError("Quantiles must be at most two-dimensional with" " an additional dimension for multiple" "components, but x.ndim = %d" % x.ndim) # Now we have 3-dim array; should have shape [dim, dim, *] if not x.shape[0:2] == (dim, dim): raise ValueError('Quantiles have incompatible dimensions: should' ' be %s, got %s.' % ((dim, dim), x.shape[0:2])) return x def _process_size(self, size): size = np.asarray(size) if size.ndim == 0: size = size[np.newaxis] elif size.ndim > 1: raise ValueError('Size must be an integer or tuple of integers;' ' thus must have dimension <= 1.' ' Got size.ndim = %s' % str(tuple(size))) n = size.prod() shape = tuple(size) return n, shape def _logpdf(self, x, dim, df, scale, log_det_scale, C): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triagular. Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ # log determinant of x # Note: x has components along the last axis, so that x.T has # components alone the 0-th axis. Then since det(A) = det(A'), this # gives us a 1-dim vector of determinants # Retrieve tr(scale^{-1} x) log_det_x = np.zeros(x.shape[-1]) scale_inv_x = np.zeros(x.shape) tr_scale_inv_x = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): _, log_det_x[i] = self._cholesky_logdet(x[:,:,i]) scale_inv_x[:,:,i] = scipy.linalg.cho_solve((C, True), x[:,:,i]) tr_scale_inv_x[i] = scale_inv_x[:,:,i].trace() # Log PDF out = ((0.5 * (df - dim - 1) * log_det_x - 0.5 * tr_scale_inv_x) - (0.5 * df * dim * _LOG_2 + 0.5 * df * log_det_scale + multigammaln(0.5*df, dim))) return out def logpdf(self, x, df, scale): """ Log of the Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) # Cholesky decomposition of scale, get log(det(scale)) C, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale, C) return _squeeze_output(out) def pdf(self, x, df, scale): """ Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ return df * scale def mean(self, df, scale): """ Mean of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ if df >= dim + 1: out = (df-dim-1) * scale else: out = None return out def mode(self, df, scale): """ Mode of the Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix. Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float or None The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) if out is not None else out def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ var = scale**2 diag = scale.diagonal() # 1 x dim array var += np.outer(diag, diag) var *= df return var def var(self, df, scale): """ Variance of the Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) def _standard_rvs(self, n, shape, dim, df, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom random_state : np.random.RandomState instance RandomState used for drawing the random variates. Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ # Random normal variates for off-diagonal elements n_tril = dim * (dim-1) // 2 covariances = random_state.normal( size=n*n_tril).reshape(shape+(n_tril,)) # Random chi-square variates for diagonal elements variances = np.r_[[random_state.chisquare(df-(i+1)+1, size=n)**0.5 for i in range(dim)]].reshape((dim,) + shape[::-1]).T # Create the A matri(ces) - lower triangular A = np.zeros(shape + (dim, dim)) # Input the covariances size_idx = tuple([slice(None,None,None)]*len(shape)) tril_idx = np.tril_indices(dim, k=-1) A[size_idx + tril_idx] = covariances # Input the variances diag_idx = np.diag_indices(dim) A[size_idx + diag_idx] = variances return A def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix C : ndarray Cholesky factorization of the scale matrix, lower triangular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Calculate the matrices A, which are actually lower triangular # Cholesky factorizations of a matrix B such that B ~ W(df, I) A = self._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = C A A' C', where SA ~ W(df, scale) # Note: this is the product of a (lower) (lower) (lower)' (lower)' # or, denoting B = AA', it is C B C' where C is the lower # triangular Cholesky factorization of the scale matrix. # this appears to conflict with the instructions in [1]_, which # suggest that it should be D' B D where D is the lower # triangular factorization of the scale matrix. However, it is # meant to refer to the Bartlett (1933) representation of a # Wishart random variate as L A A' L' where L is lower triangular # so it appears that understanding D' to be upper triangular # is either a typo in or misreading of [1]_. for index in np.ndindex(shape): CA = np.dot(C, A[index]) A[index] = np.dot(CA, CA.T) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from a Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Cholesky decomposition of scale C = scipy.linalg.cholesky(scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def _entropy(self, dim, df, log_det_scale): """ Parameters ---------- dim : int Dimension of the scale matrix df : int Degrees of freedom log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'entropy' instead. """ return ( 0.5 * (dim+1) * log_det_scale + 0.5 * dim * (dim+1) * _LOG_2 + multigammaln(0.5*df, dim) - 0.5 * (df - dim - 1) * np.sum( [psi(0.5*(df + 1 - (i+1))) for i in range(dim)] ) + 0.5 * df * dim ) def entropy(self, df, scale): """ Compute the differential entropy of the Wishart. Parameters ---------- %(_doc_default_callparams)s Returns ------- h : scalar Entropy of the Wishart distribution Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) _, log_det_scale = self._cholesky_logdet(scale) return self._entropy(dim, df, log_det_scale) def _cholesky_logdet(self, scale): """ Compute Cholesky decomposition and determine (log(det(scale)). Parameters ---------- scale : ndarray Scale matrix. Returns ------- c_decomp : ndarray The Cholesky decomposition of `scale`. logdet : scalar The log of the determinant of `scale`. Notes ----- This computation of ``logdet`` is equivalent to ``np.linalg.slogdet(scale)``. It is ~2x faster though. """ c_decomp = scipy.linalg.cholesky(scale, lower=True) logdet = 2 * np.sum(np.log(c_decomp.diagonal())) return c_decomp, logdet wishart = wishart_gen() class wishart_frozen(multi_rv_frozen): """ Create a frozen Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ def __init__(self, df, scale, seed=None): self._dist = wishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale) self.C, self.log_det_scale = self._dist._cholesky_logdet(self.scale) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale, self.C) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): return self._dist._entropy(self.dim, self.df, self.log_det_scale) # Set frozen generator docstrings from corresponding docstrings in # Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs', 'entropy']: method = wishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params) from numpy import asarray_chkfinite, asarray from scipy.linalg.misc import LinAlgError from scipy.linalg.lapack import get_lapack_funcs def _cho_inv_batch(a, check_finite=True): """ Invert the matrices a_i, using a Cholesky factorization of A, where a_i resides in the last two dimensions of a and the other indices describe the index i. Overwrites the data in a. Parameters ---------- a : array Array of matrices to invert, where the matrices themselves are stored in the last two dimensions. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array Array of inverses of the matrices ``a_i``. See also -------- scipy.linalg.cholesky : Cholesky factorization of a matrix """ if check_finite: a1 = asarray_chkfinite(a) else: a1 = asarray(a) if len(a1.shape) < 2 or a1.shape[-2] != a1.shape[-1]: raise ValueError('expected square matrix in last two dimensions') potrf, potri = get_lapack_funcs(('potrf','potri'), (a1,)) tril_idx = np.tril_indices(a.shape[-2], k=-1) triu_idx = np.triu_indices(a.shape[-2], k=1) for index in np.ndindex(a1.shape[:-2]): # Cholesky decomposition a1[index], info = potrf(a1[index], lower=True, overwrite_a=False, clean=False) if info > 0: raise LinAlgError("%d-th leading minor not positive definite" % info) if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Inversion a1[index], info = potri(a1[index], lower=True, overwrite_c=False) if info > 0: raise LinAlgError("the inverse could not be computed") if info < 0: raise ValueError('illegal value in %d-th argument of internal' ' potrf' % -info) # Make symmetric (dpotri only fills in the lower triangle) a1[index][triu_idx] = a1[index][tril_idx] return a1 class invwishart_gen(wishart_gen): r""" An inverse Wishart random variable. The `df` keyword specifies the degrees of freedom. The `scale` keyword specifies the scale matrix, which must be symmetric and positive definite. In this context, the scale matrix is often interpreted in terms of a multivariate normal covariance matrix. Methods ------- ``pdf(x, df, scale)`` Probability density function. ``logpdf(x, df, scale)`` Log of the probability density function. ``rvs(df, scale, size=1, random_state=None)`` Draw random samples from an inverse Wishart distribution. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. %(_doc_default_callparams)s %(_doc_random_state)s Alternatively, the object may be called (as a function) to fix the degrees of freedom and scale parameters, returning a "frozen" inverse Wishart random variable: rv = invwishart(df=1, scale=1) - Frozen object with the same methods but holding the given degrees of freedom and scale fixed. See Also -------- wishart Notes ----- %(_doc_callparams_note)s The scale matrix `scale` must be a symmetric positive definite matrix. Singular matrices, including the symmetric positive semi-definite case, are not supported. The inverse Wishart distribution is often denoted .. math:: W_p^{-1}(\nu, \Psi) where :math:`\nu` is the degrees of freedom and :math:`\Psi` is the :math:`p \times p` scale matrix. The probability density function for `invwishart` has support over positive definite matrices :math:`S`; if :math:`S \sim W^{-1}_p(\nu, \Sigma)`, then its PDF is given by: .. math:: f(S) = \frac{|\Sigma|^\frac{\nu}{2}}{2^{ \frac{\nu p}{2} } |S|^{\frac{\nu + p + 1}{2}} \Gamma_p \left(\frac{\nu}{2} \right)} \exp\left( -tr(\Sigma S^{-1}) / 2 \right) If :math:`S \sim W_p^{-1}(\nu, \Psi)` (inverse Wishart) then :math:`S^{-1} \sim W_p(\nu, \Psi^{-1})` (Wishart). If the scale matrix is 1-dimensional and equal to one, then the inverse Wishart distribution :math:`W_1(\nu, 1)` collapses to the inverse Gamma distribution with parameters shape = :math:`\frac{\nu}{2}` and scale = :math:`\frac{1}{2}`. .. versionadded:: 0.16.0 References ---------- .. [1] M.L. Eaton, "Multivariate Statistics: A Vector Space Approach", Wiley, 1983. .. [2] M.C. Jones, "Generating Inverse Wishart Matrices", Communications in Statistics - Simulation and Computation, vol. 14.2, pp.511-514, 1985. Examples -------- >>> import matplotlib.pyplot as plt >>> from scipy.stats import invwishart, invgamma >>> x = np.linspace(0.01, 1, 100) >>> iw = invwishart.pdf(x, df=6, scale=1) >>> iw[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> ig = invgamma.pdf(x, 6/2., scale=1./2) >>> ig[:3] array([ 1.20546865e-15, 5.42497807e-06, 4.45813929e-03]) >>> plt.plot(x, iw) The input quantiles can be any shape of array, as long as the last axis labels the components. """ def __init__(self, seed=None): super(invwishart_gen, self).__init__(seed) self.__doc__ = doccer.docformat(self.__doc__, wishart_docdict_params) def __call__(self, df=None, scale=None, seed=None): """ Create a frozen inverse Wishart distribution. See `invwishart_frozen` for more information. """ return invwishart_frozen(df, scale, seed) def _logpdf(self, x, dim, df, scale, log_det_scale): """ Parameters ---------- x : ndarray Points at which to evaluate the log of the probability density function. dim : int Dimension of the scale matrix df : int Degrees of freedom scale : ndarray Scale matrix log_det_scale : float Logarithm of the determinant of the scale matrix Notes ----- As this function does no argument checking, it should not be called directly; use 'logpdf' instead. """ log_det_x = np.zeros(x.shape[-1]) #scale_x_inv = np.zeros(x.shape) x_inv = np.copy(x).T if dim > 1: _cho_inv_batch(x_inv) # works in-place else: x_inv = 1./x_inv tr_scale_x_inv = np.zeros(x.shape[-1]) for i in range(x.shape[-1]): C, lower = scipy.linalg.cho_factor(x[:,:,i], lower=True) log_det_x[i] = 2 * np.sum(np.log(C.diagonal())) #scale_x_inv[:,:,i] = scipy.linalg.cho_solve((C, True), scale).T tr_scale_x_inv[i] = np.dot(scale, x_inv[i]).trace() # Log PDF out = ((0.5 * df * log_det_scale - 0.5 * tr_scale_x_inv) - (0.5 * df * dim * _LOG_2 + 0.5 * (df + dim + 1) * log_det_x) - multigammaln(0.5*df, dim)) return out def logpdf(self, x, df, scale): """ Log of the inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Log of the probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ dim, df, scale = self._process_parameters(df, scale) x = self._process_quantiles(x, dim) _, log_det_scale = self._cholesky_logdet(scale) out = self._logpdf(x, dim, df, scale, log_det_scale) return _squeeze_output(out) def pdf(self, x, df, scale): """ Inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of `x` denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at `x` Notes ----- %(_doc_callparams_note)s """ return np.exp(self.logpdf(x, df, scale)) def _mean(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mean' instead. """ if df > dim + 1: out = scale / (df - dim - 1) else: out = None return out def mean(self, df, scale): """ Mean of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus one. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float or None The mean of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) if out is not None else out def _mode(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'mode' instead. """ return scale / (df + dim + 1) def mode(self, df, scale): """ Mode of the inverse Wishart distribution Parameters ---------- %(_doc_default_callparams)s Returns ------- mode : float The Mode of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._mode(dim, df, scale) return _squeeze_output(out) def _var(self, dim, df, scale): """ Parameters ---------- dim : int Dimension of the scale matrix %(_doc_default_callparams)s Notes ----- As this function does no argument checking, it should not be called directly; use 'var' instead. """ if df > dim + 3: var = (df - dim + 1) * scale**2 diag = scale.diagonal() # 1 x dim array var += (df - dim - 1) * np.outer(diag, diag) var /= (df - dim) * (df - dim - 1)**2 * (df - dim - 3) else: var = None return var def var(self, df, scale): """ Variance of the inverse Wishart distribution Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus three. Parameters ---------- %(_doc_default_callparams)s Returns ------- var : float The variance of the distribution """ dim, df, scale = self._process_parameters(df, scale) out = self._var(dim, df, scale) return _squeeze_output(out) if out is not None else out def _rvs(self, n, shape, dim, df, C, random_state): """ Parameters ---------- n : integer Number of variates to generate shape : iterable Shape of the variates to generate dim : int Dimension of the scale matrix df : int Degrees of freedom C : ndarray Cholesky factorization of the scale matrix, lower triagular. %(_doc_random_state)s Notes ----- As this function does no argument checking, it should not be called directly; use 'rvs' instead. """ random_state = self._get_random_state(random_state) # Get random draws A such that A ~ W(df, I) A = super(invwishart_gen, self)._standard_rvs(n, shape, dim, df, random_state) # Calculate SA = (CA)'^{-1} (CA)^{-1} ~ iW(df, scale) eye = np.eye(dim) trtrs = get_lapack_funcs(('trtrs'), (A,)) for index in np.ndindex(A.shape[:-2]): # Calculate CA CA = np.dot(C, A[index]) # Get (C A)^{-1} via triangular solver if dim > 1: CA, info = trtrs(CA, eye, lower=True) if info > 0: raise LinAlgError("Singular matrix.") if info < 0: raise ValueError('Illegal value in %d-th argument of' ' internal trtrs' % -info) else: CA = 1. / CA # Get SA A[index] = np.dot(CA.T, CA) return A def rvs(self, df, scale, size=1, random_state=None): """ Draw random samples from an inverse Wishart distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw (default 1). %(_doc_random_state)s Returns ------- rvs : ndarray Random variates of shape (`size`) + (`dim`, `dim), where `dim` is the dimension of the scale matrix. Notes ----- %(_doc_callparams_note)s """ n, shape = self._process_size(size) dim, df, scale = self._process_parameters(df, scale) # Invert the scale eye = np.eye(dim) L, lower = scipy.linalg.cho_factor(scale, lower=True) inv_scale = scipy.linalg.cho_solve((L, lower), eye) # Cholesky decomposition of inverted scale C = scipy.linalg.cholesky(inv_scale, lower=True) out = self._rvs(n, shape, dim, df, C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError invwishart = invwishart_gen() class invwishart_frozen(multi_rv_frozen): def __init__(self, df, scale, seed=None): """ Create a frozen inverse Wishart distribution. Parameters ---------- df : array_like Degrees of freedom of the distribution scale : array_like Scale matrix of the distribution seed : None or int or np.random.RandomState instance, optional This parameter defines the RandomState object to use for drawing random variates. If None (or np.random), the global np.random state is used. If integer, it is used to seed the local RandomState instance Default is None. """ self._dist = invwishart_gen(seed) self.dim, self.df, self.scale = self._dist._process_parameters( df, scale ) # Get the determinant via Cholesky factorization C, lower = scipy.linalg.cho_factor(self.scale, lower=True) self.log_det_scale = 2 * np.sum(np.log(C.diagonal())) # Get the inverse using the Cholesky factorization eye = np.eye(self.dim) self.inv_scale = scipy.linalg.cho_solve((C, lower), eye) # Get the Cholesky factorization of the inverse scale self.C = scipy.linalg.cholesky(self.inv_scale, lower=True) def logpdf(self, x): x = self._dist._process_quantiles(x, self.dim) out = self._dist._logpdf(x, self.dim, self.df, self.scale, self.log_det_scale) return _squeeze_output(out) def pdf(self, x): return np.exp(self.logpdf(x)) def mean(self): out = self._dist._mean(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def mode(self): out = self._dist._mode(self.dim, self.df, self.scale) return _squeeze_output(out) def var(self): out = self._dist._var(self.dim, self.df, self.scale) return _squeeze_output(out) if out is not None else out def rvs(self, size=1, random_state=None): n, shape = self._dist._process_size(size) out = self._dist._rvs(n, shape, self.dim, self.df, self.C, random_state) return _squeeze_output(out) def entropy(self): # Need to find reference for inverse Wishart entropy raise AttributeError # Set frozen generator docstrings from corresponding docstrings in # inverse Wishart and fill in default strings in class docstrings for name in ['logpdf', 'pdf', 'mean', 'mode', 'var', 'rvs']: method = invwishart_gen.__dict__[name] method_frozen = wishart_frozen.__dict__[name] method_frozen.__doc__ = doccer.docformat( method.__doc__, wishart_docdict_noparams) method.__doc__ = doccer.docformat(method.__doc__, wishart_docdict_params)
apache-2.0
harshaneelhg/scikit-learn
sklearn/ensemble/tests/test_partial_dependence.py
365
6996
""" Testing for the partial dependence module. """ import numpy as np from numpy.testing import assert_array_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import if_matplotlib from sklearn.ensemble.partial_dependence import partial_dependence from sklearn.ensemble.partial_dependence import plot_partial_dependence from sklearn.ensemble import GradientBoostingClassifier from sklearn.ensemble import GradientBoostingRegressor from sklearn import datasets # toy sample X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]] y = [-1, -1, -1, 1, 1, 1] T = [[-1, -1], [2, 2], [3, 2]] true_result = [-1, 1, 1] # also load the boston dataset boston = datasets.load_boston() # also load the iris dataset iris = datasets.load_iris() def test_partial_dependence_classifier(): # Test partial dependence for classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) pdp, axes = partial_dependence(clf, [0], X=X, grid_resolution=5) # only 4 grid points instead of 5 because only 4 unique X[:,0] vals assert pdp.shape == (1, 4) assert axes[0].shape[0] == 4 # now with our own grid X_ = np.asarray(X) grid = np.unique(X_[:, 0]) pdp_2, axes = partial_dependence(clf, [0], grid=grid) assert axes is None assert_array_equal(pdp, pdp_2) def test_partial_dependence_multiclass(): # Test partial dependence for multi-class classifier clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 n_classes = clf.n_classes_ pdp, axes = partial_dependence( clf, [0], X=iris.data, grid_resolution=grid_resolution) assert pdp.shape == (n_classes, grid_resolution) assert len(axes) == 1 assert axes[0].shape[0] == grid_resolution def test_partial_dependence_regressor(): # Test partial dependence for regressor clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 pdp, axes = partial_dependence( clf, [0], X=boston.data, grid_resolution=grid_resolution) assert pdp.shape == (1, grid_resolution) assert axes[0].shape[0] == grid_resolution def test_partial_dependecy_input(): # Test input validation of partial dependence. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(X, y) assert_raises(ValueError, partial_dependence, clf, [0], grid=None, X=None) assert_raises(ValueError, partial_dependence, clf, [0], grid=[0, 1], X=X) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, partial_dependence, {}, [0], X=X) # Gradient boosting estimator must be fit assert_raises(ValueError, partial_dependence, GradientBoostingClassifier(), [0], X=X) assert_raises(ValueError, partial_dependence, clf, [-1], X=X) assert_raises(ValueError, partial_dependence, clf, [100], X=X) # wrong ndim for grid grid = np.random.rand(10, 2, 1) assert_raises(ValueError, partial_dependence, clf, [0], grid=grid) @if_matplotlib def test_plot_partial_dependence(): # Test partial dependence plot function. clf = GradientBoostingRegressor(n_estimators=10, random_state=1) clf.fit(boston.data, boston.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, boston.data, [0, 1, (0, 1)], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with str features and array feature names fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=boston.feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) # check with list feature_names feature_names = boston.feature_names.tolist() fig, axs = plot_partial_dependence(clf, boston.data, ['CRIM', 'ZN', ('CRIM', 'ZN')], grid_resolution=grid_resolution, feature_names=feature_names) assert len(axs) == 3 assert all(ax.has_data for ax in axs) @if_matplotlib def test_plot_partial_dependence_input(): # Test partial dependence plot function input checks. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) # not fitted yet assert_raises(ValueError, plot_partial_dependence, clf, X, [0]) clf.fit(X, y) assert_raises(ValueError, plot_partial_dependence, clf, np.array(X)[:, :0], [0]) # first argument must be an instance of BaseGradientBoosting assert_raises(ValueError, plot_partial_dependence, {}, X, [0]) # must be larger than -1 assert_raises(ValueError, plot_partial_dependence, clf, X, [-1]) # too large feature value assert_raises(ValueError, plot_partial_dependence, clf, X, [100]) # str feature but no feature_names assert_raises(ValueError, plot_partial_dependence, clf, X, ['foobar']) # not valid features value assert_raises(ValueError, plot_partial_dependence, clf, X, [{'foo': 'bar'}]) @if_matplotlib def test_plot_partial_dependence_multiclass(): # Test partial dependence plot function on multi-class input. clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, iris.target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label=0, grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # now with symbol labels target = iris.target_names[iris.target] clf = GradientBoostingClassifier(n_estimators=10, random_state=1) clf.fit(iris.data, target) grid_resolution = 25 fig, axs = plot_partial_dependence(clf, iris.data, [0, 1], label='setosa', grid_resolution=grid_resolution) assert len(axs) == 2 assert all(ax.has_data for ax in axs) # label not in gbrt.classes_ assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], label='foobar', grid_resolution=grid_resolution) # label not provided assert_raises(ValueError, plot_partial_dependence, clf, iris.data, [0, 1], grid_resolution=grid_resolution)
bsd-3-clause
OpenNeuroLab/brainspell-neo
archive/sprite/brainsprite.py
2
7440
# Christian Dansereau 2016 Copyright import os import numpy as np import nibabel as nib from PIL import Image import json from nilearn.image import resample_img import hashlib, time import matplotlib.pyplot as plt from shutil import copyfile def _load_json_template(): data_file = """{ "canvas": "3Dviewer", "sprite": "spriteImg", "flagCoordinates": true, "nbSlice": { "Y": 233, "Z": 189 }, "colorBackground": "#000", "colorFont": "#FFF", "overlay": { "sprite": "overlayImg", "nbSlice": { "Y": 233, "Z": 189 }, "opacity": 0.7 }, "colorMap": { "img": "colorMap", "min": 0.2, "max": 0.66 } } """ data = json.loads(data_file) return data def _load_notebook_html(canvas_id, bkg_path, overlay_path, tmp_path, json_data): html = """ <!DOCTYPE html> <html> <head> </head> <body> <div id="div_viewer"> <canvas id="{0}"> <!-- this is the canvas that will feature the brain slices --> <img id="spriteImg" class="hidden" src="{1}"> <!-- load a hidden version of the sprite image that includes all (sagital) brain slices --> <img id="overlayImg" class="hidden" src="{2}"> <!-- another sprite image, with an overlay--> </div> <script type="text/javascript" src="{3}jquery.min.js"></script> <!-- JQuery is used in this example, line 18, but is not actually used in brainsprite.js --> <script type="text/javascript" src="{3}brainsprite.js"></script> <script> // On load: build all figures $( "{0}" ).ready(function() {{ var brain = brainsprite({4}); }}); </script> </body> </html> """ return html.format(canvas_id, bkg_path, overlay_path, tmp_path, json_data) def _loadVolume(source_file): img = nib.load(source_file) vol = img.get_data() # check if its a nii file ext = _getExt(source_file) if ext == ".nii": vol = np.swapaxes(vol, 0, 2) return vol def _getspec(vol): nx, ny, nz = vol.shape nrows = int(np.ceil(np.sqrt(nz))) ncolumns = int(np.ceil(nz / (1. * nrows))) return nrows, ncolumns, nx, ny, nz def _getExt(source_file): # Getting the extension if os.path.splitext(source_file)[1] == '.gz': extension = os.path.splitext(os.path.splitext(source_file)[0])[1] else: extension = os.path.splitext(source_file)[1] return extension def _montage(vol): nrows, ncolumns, nx, ny, nz = _getspec(vol) mosaic = np.zeros((nrows * nx, ncolumns * ny)) indx, indy = np.where(np.ones((nrows, ncolumns))) for ii in range(vol.shape[2]): # we need to flip the image in the x axis mosaic[(indx[ii] * nx):((indx[ii] + 1) * nx), (indy[ii] * ny):((indy[ii] + 1) * ny)] = vol[::-1, :, ii] return mosaic def _saveMosaic(mosaic, output_path, overlay=False, overlay_threshold=0.1): if overlay: mosaic[mosaic < overlay_threshold] = 0 im = Image.fromarray(np.uint8(plt.cm.hot(mosaic) * 255)) mask = Image.fromarray(np.uint8(mosaic > 0) * 255).convert("L") im.putalpha(mask) else: im = Image.fromarray(mosaic).convert('RGB') # if im.mode != 'RGBA': # im = im.convert('RGBA') im.save(output_path) def transform_package(img_path, output_folder, overlay_path=''): if overlay_path == '': transform(img_path, output_folder + 'bkg_mosaic.jpg', output_folder + 'params.js') else: transform(img_path, output_folder + 'bkg_mosaic.jpg', output_folder + 'params.js', overlay_path, output_folder + 'overlay_mosaic.png') def transform(source_bkg_path, out_bkg_path, out_json, source_overlay_path='', out_overlay_path='', overlay_threshold=0.1, return_json=False, overlay_interpolation='continuous'): # load data bkg_vol = _loadVolume(source_bkg_path) bkg_vol = (bkg_vol / float(bkg_vol.max())) * 255. # populate json params = _load_json_template() params['nbSlice']['Y'] = bkg_vol.shape[1] params['nbSlice']['Z'] = bkg_vol.shape[0] # make bkg montage save mosa_bkg = _montage(bkg_vol) _saveMosaic(mosa_bkg, out_bkg_path) if source_overlay_path != '': # load data bkimg = nib.load(source_bkg_path) overimg = nib.load(source_overlay_path) # transform slice order and resample to fit bkimg # check if its a nii file ext = _getExt(source_overlay_path) ext_bkg = _getExt(source_bkg_path) if ext == ".nii": if ext_bkg == ".mnc": bkimg.affine[:, [0, 2]] = bkimg.affine[:, [2, 0]] overimg = resample_img(overimg, bkimg.affine, bkimg.shape[::-1], interpolation=overlay_interpolation) overlay_vol = np.swapaxes(overimg.get_data(), 0, 2) else: overimg = nib.nifti1.Nifti1Image(overimg.get_data(), overimg.get_affine) overimg = resample_img(overimg, bkimg.affine, bkimg.shape) overlay_vol = overimg.get_data() # populate json params['overlay']['nbSlice']['Y'] = overlay_vol.shape[1] params['overlay']['nbSlice']['Z'] = overlay_vol.shape[0] # make overlay montage and save mosa_overlay = _montage(overlay_vol) _saveMosaic(mosa_overlay, out_overlay_path, overlay=True, overlay_threshold=overlay_threshold) else: del params['overlay'] del params['colorMap'] if out_json[-3:] == '.js': with open(out_json, 'w') as outfile: data = "var jsonParams = '" + json.dumps(params) + "';" outfile.write(data) else: with open(out_json, 'w') as outfile: data = json.dumps(params) outfile.write(data) if return_json: return json.dumps(params) def show_sprite(bkg_img, overlay_img, tmp_path): # make a tmp folder tmp_path = tmp_path + '/brainsprite_tmp/' _make_folder(tmp_path) copyfile('../brainsprite.js', tmp_path+'brainsprite.js') copyfile('../assets/jquery-1.9.1/jquery.min.js', tmp_path + 'jquery.min.js') hash = _gen_file_name() bkgimg_ = tmp_path + hash + '_bkg.jpg' overlayimg_ = tmp_path + hash + '_overlay_mosaic.png' json_data = transform(bkg_img, bkgimg_, tmp_path + hash + '_params.json', overlay_img, overlayimg_, overlay_threshold=0.3, return_json=True) json_data = json_data.replace("3Dviewer", "canvas" + hash) print json_data html_code = _load_notebook_html('canvas' + hash, 'brainsprite_tmp/' + hash + '_bkg.jpg', 'brainsprite_tmp/' + hash + '_overlay_mosaic.png', 'brainsprite_tmp/', json_data) return html_code def _make_folder(path): if not os.path.exists(path): os.makedirs(path) return True return False def _gen_file_name(): hash_ = hashlib.sha1() hash_.update(str(time.time()).encode('utf-8')) return hash_.hexdigest() def test_mosaic(): # custom path background = "test_anat.mnc.gz" overlay = "DMN.nii.gz" output_folder = "/home/cdansereau/virenv/" #background = "t2.nii.gz" #overlay = "t2_seg.nii.gz" #output_folder = "/home/cdansereau/t2/" # transform data transform(output_folder + background, output_folder + 'bkg_mosaic.jpg', output_folder + 'params.json', output_folder + overlay, output_folder + 'overlay_mosaic.png', overlay_threshold=0.3)
mit
djgagne/scikit-learn
examples/svm/plot_svm_anova.py
250
2000
""" ================================================= SVM-Anova: SVM with univariate feature selection ================================================= This example shows how to perform univariate feature before running a SVC (support vector classifier) to improve the classification scores. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import svm, datasets, feature_selection, cross_validation from sklearn.pipeline import Pipeline ############################################################################### # Import some data to play with digits = datasets.load_digits() y = digits.target # Throw away data, to be in the curse of dimension settings y = y[:200] X = digits.data[:200] n_samples = len(y) X = X.reshape((n_samples, -1)) # add 200 non-informative features X = np.hstack((X, 2 * np.random.random((n_samples, 200)))) ############################################################################### # Create a feature-selection transform and an instance of SVM that we # combine together to have an full-blown estimator transform = feature_selection.SelectPercentile(feature_selection.f_classif) clf = Pipeline([('anova', transform), ('svc', svm.SVC(C=1.0))]) ############################################################################### # Plot the cross-validation score as a function of percentile of features score_means = list() score_stds = list() percentiles = (1, 3, 6, 10, 15, 20, 30, 40, 60, 80, 100) for percentile in percentiles: clf.set_params(anova__percentile=percentile) # Compute cross-validation score using all CPUs this_scores = cross_validation.cross_val_score(clf, X, y, n_jobs=1) score_means.append(this_scores.mean()) score_stds.append(this_scores.std()) plt.errorbar(percentiles, score_means, np.array(score_stds)) plt.title( 'Performance of the SVM-Anova varying the percentile of features selected') plt.xlabel('Percentile') plt.ylabel('Prediction rate') plt.axis('tight') plt.show()
bsd-3-clause
WillisXChen/django-oscar
oscar/lib/python2.7/site-packages/IPython/kernel/zmq/eventloops.py
4
8652
# encoding: utf-8 """Event loop integration for the ZeroMQ-based kernels. """ #----------------------------------------------------------------------------- # Copyright (C) 2011 The IPython Development Team # Distributed under the terms of the BSD License. The full license is in # the file COPYING, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- import os import sys # System library imports import zmq # Local imports from IPython.config.application import Application from IPython.utils import io #------------------------------------------------------------------------------ # Eventloops for integrating the Kernel into different GUIs #------------------------------------------------------------------------------ def _on_os_x_10_9(): import platform from distutils.version import LooseVersion as V return sys.platform == 'darwin' and V(platform.mac_ver()[0]) >= V('10.9') def _notify_stream_qt(kernel, stream): from IPython.external.qt_for_kernel import QtCore if _on_os_x_10_9() and kernel._darwin_app_nap: from IPython.external.appnope import nope_scope as context else: from IPython.core.interactiveshell import NoOpContext as context def process_stream_events(): while stream.getsockopt(zmq.EVENTS) & zmq.POLLIN: with context(): kernel.do_one_iteration() fd = stream.getsockopt(zmq.FD) notifier = QtCore.QSocketNotifier(fd, QtCore.QSocketNotifier.Read, kernel.app) notifier.activated.connect(process_stream_events) def loop_qt4(kernel): """Start a kernel with PyQt4 event loop integration.""" from IPython.lib.guisupport import get_app_qt4, start_event_loop_qt4 kernel.app = get_app_qt4([" "]) kernel.app.setQuitOnLastWindowClosed(False) for s in kernel.shell_streams: _notify_stream_qt(kernel, s) start_event_loop_qt4(kernel.app) def loop_qt5(kernel): """Start a kernel with PyQt5 event loop integration""" os.environ['QT_API'] = 'pyqt5' return loop_qt4(kernel) def loop_wx(kernel): """Start a kernel with wx event loop support.""" import wx from IPython.lib.guisupport import start_event_loop_wx if _on_os_x_10_9() and kernel._darwin_app_nap: # we don't hook up App Nap contexts for Wx, # just disable it outright. from IPython.external.appnope import nope nope() doi = kernel.do_one_iteration # Wx uses milliseconds poll_interval = int(1000*kernel._poll_interval) # We have to put the wx.Timer in a wx.Frame for it to fire properly. # We make the Frame hidden when we create it in the main app below. class TimerFrame(wx.Frame): def __init__(self, func): wx.Frame.__init__(self, None, -1) self.timer = wx.Timer(self) # Units for the timer are in milliseconds self.timer.Start(poll_interval) self.Bind(wx.EVT_TIMER, self.on_timer) self.func = func def on_timer(self, event): self.func() # We need a custom wx.App to create our Frame subclass that has the # wx.Timer to drive the ZMQ event loop. class IPWxApp(wx.App): def OnInit(self): self.frame = TimerFrame(doi) self.frame.Show(False) return True # The redirect=False here makes sure that wx doesn't replace # sys.stdout/stderr with its own classes. kernel.app = IPWxApp(redirect=False) # The import of wx on Linux sets the handler for signal.SIGINT # to 0. This is a bug in wx or gtk. We fix by just setting it # back to the Python default. import signal if not callable(signal.getsignal(signal.SIGINT)): signal.signal(signal.SIGINT, signal.default_int_handler) start_event_loop_wx(kernel.app) def loop_tk(kernel): """Start a kernel with the Tk event loop.""" try: from tkinter import Tk # Py 3 except ImportError: from Tkinter import Tk # Py 2 doi = kernel.do_one_iteration # Tk uses milliseconds poll_interval = int(1000*kernel._poll_interval) # For Tkinter, we create a Tk object and call its withdraw method. class Timer(object): def __init__(self, func): self.app = Tk() self.app.withdraw() self.func = func def on_timer(self): self.func() self.app.after(poll_interval, self.on_timer) def start(self): self.on_timer() # Call it once to get things going. self.app.mainloop() kernel.timer = Timer(doi) kernel.timer.start() def loop_gtk(kernel): """Start the kernel, coordinating with the GTK event loop""" from .gui.gtkembed import GTKEmbed gtk_kernel = GTKEmbed(kernel) gtk_kernel.start() def loop_cocoa(kernel): """Start the kernel, coordinating with the Cocoa CFRunLoop event loop via the matplotlib MacOSX backend. """ import matplotlib if matplotlib.__version__ < '1.1.0': kernel.log.warn( "MacOSX backend in matplotlib %s doesn't have a Timer, " "falling back on Tk for CFRunLoop integration. Note that " "even this won't work if Tk is linked against X11 instead of " "Cocoa (e.g. EPD). To use the MacOSX backend in the kernel, " "you must use matplotlib >= 1.1.0, or a native libtk." ) return loop_tk(kernel) from matplotlib.backends.backend_macosx import TimerMac, show # scale interval for sec->ms poll_interval = int(1000*kernel._poll_interval) real_excepthook = sys.excepthook def handle_int(etype, value, tb): """don't let KeyboardInterrupts look like crashes""" if etype is KeyboardInterrupt: io.raw_print("KeyboardInterrupt caught in CFRunLoop") else: real_excepthook(etype, value, tb) # add doi() as a Timer to the CFRunLoop def doi(): # restore excepthook during IPython code sys.excepthook = real_excepthook kernel.do_one_iteration() # and back: sys.excepthook = handle_int t = TimerMac(poll_interval) t.add_callback(doi) t.start() # but still need a Poller for when there are no active windows, # during which time mainloop() returns immediately poller = zmq.Poller() if kernel.control_stream: poller.register(kernel.control_stream.socket, zmq.POLLIN) for stream in kernel.shell_streams: poller.register(stream.socket, zmq.POLLIN) while True: try: # double nested try/except, to properly catch KeyboardInterrupt # due to pyzmq Issue #130 try: # don't let interrupts during mainloop invoke crash_handler: sys.excepthook = handle_int show.mainloop() sys.excepthook = real_excepthook # use poller if mainloop returned (no windows) # scale by extra factor of 10, since it's a real poll poller.poll(10*poll_interval) kernel.do_one_iteration() except: raise except KeyboardInterrupt: # Ctrl-C shouldn't crash the kernel io.raw_print("KeyboardInterrupt caught in kernel") finally: # ensure excepthook is restored sys.excepthook = real_excepthook # mapping of keys to loop functions loop_map = { 'qt' : loop_qt4, 'qt4': loop_qt4, 'qt5': loop_qt5, 'inline': None, 'nbagg': None, 'osx': loop_cocoa, 'wx' : loop_wx, 'tk' : loop_tk, 'gtk': loop_gtk, None : None, } def enable_gui(gui, kernel=None): """Enable integration with a given GUI""" if gui not in loop_map: e = "Invalid GUI request %r, valid ones are:%s" % (gui, loop_map.keys()) raise ValueError(e) if kernel is None: if Application.initialized(): kernel = getattr(Application.instance(), 'kernel', None) if kernel is None: raise RuntimeError("You didn't specify a kernel," " and no IPython Application with a kernel appears to be running." ) loop = loop_map[gui] if loop and kernel.eventloop is not None and kernel.eventloop is not loop: raise RuntimeError("Cannot activate multiple GUI eventloops") kernel.eventloop = loop
bsd-3-clause
iamaziz/simpleai
simpleai/machine_learning/reinforcement_learning.py
5
6345
# -*- coding: utf-8 -*- from collections import defaultdict, Counter import math import random from simpleai.search.utils import argmax import pickle try: import matplotlib.pyplot as plt import numpy except: plt = None # lint:ok numpy = None # lint:ok def make_at_least_n_times(optimistic_reward, min_n): def at_least_n_times_exploration(actions, utilities, temperature, action_counter): utilities = [utilities[x] for x in actions] for i, utility in enumerate(utilities): if action_counter[actions[i]] < min_n: utilities[i] = optimistic_reward d = dict(zip(actions, utilities)) uf = lambda action: d[action] return argmax(actions, uf) return at_least_n_times_exploration def boltzmann_exploration(actions, utilities, temperature, action_counter): '''returns an action with a probability depending on utilities and temperature''' utilities = [utilities[x] for x in actions] temperature = max(temperature, 0.01) _max = max(utilities) _min = min(utilities) if _max == _min: return random.choice(actions) utilities = [math.exp(((u - _min) / (_max - _min)) / temperature) for u in utilities] probs = [u / sum(utilities) for u in utilities] i = 0 tot = probs[i] r = random.random() while i < len(actions) and r >= tot: i += 1 tot += probs[i] return actions[i] def make_exponential_temperature(initial_temperature, alpha): '''returns a function like initial / exp(n * alpha)''' def _function(n): try: return initial_temperature / math.exp(n * alpha) except OverflowError: return 0.01 return _function class PerformanceCounter(object): def __init__(self, learners, names=None): self.learners = learners for i, learner in enumerate(learners): self.update_set_reward(learner) learner.accumulated_rewards = [] learner.known_states = [] learner.temperatures = [] if names is None: learner.name = 'Learner %d' % i else: learner.name = names[i] def update_set_reward(self, learner): def set_reward(reward, terminal=False): if terminal: if len(learner.accumulated_rewards) > 0: learner.accumulated_rewards.append(learner.accumulated_rewards[-1] + reward) else: learner.accumulated_rewards.append(reward) learner.known_states.append(len(learner.Q)) learner.temperatures.append(learner.temperature_function(learner.trials)) learner.old_set_reward(reward, terminal) learner.old_set_reward = learner.set_reward learner.set_reward = set_reward def _make_plot(self, ax, data_name): for learner in self.learners: data = numpy.array(getattr(learner, data_name)) ax.plot(numpy.arange(len(data)), data, label=learner.name) nice_name = data_name.replace('_', ' ').capitalize() ax.set_title(nice_name) ax.legend() def show_statistics(self): f, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex=True) self._make_plot(ax1, 'accumulated_rewards') self._make_plot(ax2, 'known_states') self._make_plot(ax3, 'temperatures') plt.show() class RLProblem(object): def actions(self, state): '''Returns the actions available to perform from `state`. The returned value is an iterable over actions. ''' raise NotImplementedError() def update_state(self, percept, agent): 'Override this method if you need to clean perception to a given agent' return percept def inverse(n): if n == 0: return 1 return 1.0 / n def state_default(): return defaultdict(int) class QLearner(object): def __init__(self, problem, temperature_function=inverse, discount_factor=1, exploration_function=boltzmann_exploration, learning_rate=inverse): self.Q = defaultdict(state_default) self.problem = problem self.discount_factor = discount_factor self.temperature_function = temperature_function self.exploration_function = exploration_function self.learning_rate = learning_rate self.last_state = None self.last_action = None self.last_reward = None self.counter = defaultdict(Counter) self.trials = 0 def set_reward(self, reward, terminal=False): self.last_reward = reward if terminal: self.trials += 1 self.Q[self.last_state][self.last_action] = reward def program(self, percept): s = self.last_state a = self.last_action state = self.problem.update_state(percept, self) actions = self.problem.actions(state) if len(actions) > 0: current_action = self.exploration_function(actions, self.Q[state], self.temperature_function(self.trials), self.counter[state]) else: current_action = None if s is not None and current_action: self.counter[s][a] += 1 self.update_rule(s, a, self.last_reward, state, current_action) self.last_state = state self.last_action = current_action return current_action def update_rule(self, s, a, r, cs, ca): raise NotImplementedError def dump(self, path): self.temperature_function = inverse with open(path, 'wb') as f: pickle.dump(self, f) @classmethod def load(self, path): with open(path, 'rb') as f: return pickle.load(f) class TDQLearner(QLearner): def update_rule(self, s, a, r, cs, ca): lr = self.learning_rate(self.counter[s][a]) self.Q[s][a] += lr * (r + self.discount_factor * max(self.Q[cs].values()) - self.Q[s][a]) class SARSALearner(QLearner): def update_rule(self, s, a, r, cs, ca): lr = self.learning_rate(self.counter[s][a]) self.Q[s][a] += lr * (r + self.discount_factor * self.Q[cs][ca] - self.Q[s][a])
mit
2baOrNot2ba/AntPat
scripts/viewJonespat_dual.py
1
2897
#!/usr/bin/env python """A simple viewer for Jones patterns for dual-polarized representations. """ import argparse import numpy import matplotlib.pyplot as plt from antpat.reps.sphgridfun.pntsonsphere import ZenHemisphGrid from antpat.dualpolelem import DualPolElem, jones2gIXR, IXRJ2IXRM from antpat.reps.hamaker import convLOFARcc2DPE import antpat.io.filetypes as antfiles def plotJonesCanonical(theta, phi, jones, dpelemname): normalize = True dbscale = True polarplt = True IXRTYPE = 'IXR_J' # Can be IXR_J or IXR_M g, IXRJ = jones2gIXR(jones) IXRM = IXRJ2IXRM(IXRJ) if IXRTYPE == 'IXR_J': IXR = IXRJ elif IXRTYPE == 'IXR_J': IXR = IXRM else: raise RuntimeError("""Error: IXR type {} unknown. Known types are IXR_J, IXR_M.""".format(IXRTYPE)) fig = plt.figure() fig.suptitle(dpelemname) plt.subplot(121, polar=polarplt) if normalize: g_max = numpy.max(g) g = g/g_max if dbscale: g = 20*numpy.log10(g) # nrlvls = 5 # g_lvls = numpy.max(g) - 3.0*numpy.arange(nrlvls) plt.pcolormesh(phi, numpy.rad2deg(theta), g) # plt.contour( phi, numpy.rad2deg(theta), g_dress, levels = g_lvls) plt.colorbar() plt.title('Amp gain') plt.subplot(122, polar=polarplt) plt.pcolormesh(phi, numpy.rad2deg(theta), 10*numpy.log10(IXR)) plt.colorbar() plt.title('IXR_J') plt.show() def plotFFpat(): from antpat.reps.sphgridfun import tvecfun for polchan in [0, 1]: E_th = jones[:, :, polchan, 0].squeeze() E_ph = jones[:, :, polchan, 1].squeeze() tvecfun.plotvfonsph(THETA, PHI, E_th, E_ph, args.freq, vcoordlist=['Ludwig3'], projection='orthographic') if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("freq", type=float, help="Frequency in Hertz") parser.add_argument("filename", help=""" Filename of dual-polarization FF, Hamaker-Arts format, or a single-polarization FF (p-channel)""") parser.add_argument("filename_q", nargs='?', help=""" Filename of second (q-channel) single-polarization FF. """) args = parser.parse_args() if args.filename.endswith(antfiles.HamArtsuffix): hp = convLOFARcc2DPE(args.filename, [args.freq]) elif args.filename.endswith(antfiles.FEKOsuffix): hp = DualPolElem() hp.load_ffes(args.filename, args.filename_q) else: raise RuntimeError("dual-pol pattern file type not known") THETA, PHI = ZenHemisphGrid() jones = hp.getJonesAlong([args.freq], (THETA, PHI)) plotFFpat() # plotJonesCanonical(THETA, PHI, jones, os.path.basename(args.filename) # + ' (' + str(args.freq/1e6) + ' MHz)')
isc
metaml/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/lines.py
69
48233
""" This module contains all the 2D line class which can draw with a variety of line styles, markers and colors. """ # TODO: expose cap and join style attrs from __future__ import division import numpy as np from numpy import ma from matplotlib import verbose import artist from artist import Artist from cbook import iterable, is_string_like, is_numlike, ls_mapper, dedent,\ flatten from colors import colorConverter from path import Path from transforms import Affine2D, Bbox, TransformedPath, IdentityTransform from matplotlib import rcParams # special-purpose marker identifiers: (TICKLEFT, TICKRIGHT, TICKUP, TICKDOWN, CARETLEFT, CARETRIGHT, CARETUP, CARETDOWN) = range(8) # COVERAGE NOTE: Never called internally or from examples def unmasked_index_ranges(mask, compressed = True): warnings.warn("Import this directly from matplotlib.cbook", DeprecationWarning) # Warning added 2008/07/22 from matplotlib.cbook import unmasked_index_ranges as _unmasked_index_ranges return _unmasked_index_ranges(mask, compressed=compressed) def segment_hits(cx, cy, x, y, radius): """ Determine if any line segments are within radius of a point. Returns the list of line segments that are within that radius. """ # Process single points specially if len(x) < 2: res, = np.nonzero( (cx - x)**2 + (cy - y)**2 <= radius**2 ) return res # We need to lop the last element off a lot. xr,yr = x[:-1],y[:-1] # Only look at line segments whose nearest point to C on the line # lies within the segment. dx,dy = x[1:]-xr, y[1:]-yr Lnorm_sq = dx**2+dy**2 # Possibly want to eliminate Lnorm==0 u = ( (cx-xr)*dx + (cy-yr)*dy )/Lnorm_sq candidates = (u>=0) & (u<=1) #if any(candidates): print "candidates",xr[candidates] # Note that there is a little area near one side of each point # which will be near neither segment, and another which will # be near both, depending on the angle of the lines. The # following radius test eliminates these ambiguities. point_hits = (cx - x)**2 + (cy - y)**2 <= radius**2 #if any(point_hits): print "points",xr[candidates] candidates = candidates & ~(point_hits[:-1] | point_hits[1:]) # For those candidates which remain, determine how far they lie away # from the line. px,py = xr+u*dx,yr+u*dy line_hits = (cx-px)**2 + (cy-py)**2 <= radius**2 #if any(line_hits): print "lines",xr[candidates] line_hits = line_hits & candidates points, = point_hits.ravel().nonzero() lines, = line_hits.ravel().nonzero() #print points,lines return np.concatenate((points,lines)) class Line2D(Artist): """ A line - the line can have both a solid linestyle connecting all the vertices, and a marker at each vertex. Additionally, the drawing of the solid line is influenced by the drawstyle, eg one can create "stepped" lines in various styles. """ lineStyles = _lineStyles = { # hidden names deprecated '-' : '_draw_solid', '--' : '_draw_dashed', '-.' : '_draw_dash_dot', ':' : '_draw_dotted', 'None' : '_draw_nothing', ' ' : '_draw_nothing', '' : '_draw_nothing', } _drawStyles_l = { 'default' : '_draw_lines', 'steps-mid' : '_draw_steps_mid', 'steps-pre' : '_draw_steps_pre', 'steps-post' : '_draw_steps_post', } _drawStyles_s = { 'steps' : '_draw_steps_pre', } drawStyles = {} drawStyles.update(_drawStyles_l) drawStyles.update(_drawStyles_s) markers = _markers = { # hidden names deprecated '.' : '_draw_point', ',' : '_draw_pixel', 'o' : '_draw_circle', 'v' : '_draw_triangle_down', '^' : '_draw_triangle_up', '<' : '_draw_triangle_left', '>' : '_draw_triangle_right', '1' : '_draw_tri_down', '2' : '_draw_tri_up', '3' : '_draw_tri_left', '4' : '_draw_tri_right', 's' : '_draw_square', 'p' : '_draw_pentagon', '*' : '_draw_star', 'h' : '_draw_hexagon1', 'H' : '_draw_hexagon2', '+' : '_draw_plus', 'x' : '_draw_x', 'D' : '_draw_diamond', 'd' : '_draw_thin_diamond', '|' : '_draw_vline', '_' : '_draw_hline', TICKLEFT : '_draw_tickleft', TICKRIGHT : '_draw_tickright', TICKUP : '_draw_tickup', TICKDOWN : '_draw_tickdown', CARETLEFT : '_draw_caretleft', CARETRIGHT : '_draw_caretright', CARETUP : '_draw_caretup', CARETDOWN : '_draw_caretdown', 'None' : '_draw_nothing', ' ' : '_draw_nothing', '' : '_draw_nothing', } filled_markers = ('o', '^', 'v', '<', '>', 's', 'd', 'D', 'h', 'H', 'p', '*') zorder = 2 validCap = ('butt', 'round', 'projecting') validJoin = ('miter', 'round', 'bevel') def __str__(self): if self._label != "": return "Line2D(%s)"%(self._label) elif hasattr(self, '_x') and len(self._x) > 3: return "Line2D((%g,%g),(%g,%g),...,(%g,%g))"\ %(self._x[0],self._y[0],self._x[0],self._y[0],self._x[-1],self._y[-1]) elif hasattr(self, '_x'): return "Line2D(%s)"\ %(",".join(["(%g,%g)"%(x,y) for x,y in zip(self._x,self._y)])) else: return "Line2D()" def __init__(self, xdata, ydata, linewidth = None, # all Nones default to rc linestyle = None, color = None, marker = None, markersize = None, markeredgewidth = None, markeredgecolor = None, markerfacecolor = None, antialiased = None, dash_capstyle = None, solid_capstyle = None, dash_joinstyle = None, solid_joinstyle = None, pickradius = 5, drawstyle = None, **kwargs ): """ Create a :class:`~matplotlib.lines.Line2D` instance with *x* and *y* data in sequences *xdata*, *ydata*. The kwargs are :class:`~matplotlib.lines.Line2D` properties: %(Line2D)s See :meth:`set_linestyle` for a decription of the line styles, :meth:`set_marker` for a description of the markers, and :meth:`set_drawstyle` for a description of the draw styles. """ Artist.__init__(self) #convert sequences to numpy arrays if not iterable(xdata): raise RuntimeError('xdata must be a sequence') if not iterable(ydata): raise RuntimeError('ydata must be a sequence') if linewidth is None : linewidth=rcParams['lines.linewidth'] if linestyle is None : linestyle=rcParams['lines.linestyle'] if marker is None : marker=rcParams['lines.marker'] if color is None : color=rcParams['lines.color'] if markersize is None : markersize=rcParams['lines.markersize'] if antialiased is None : antialiased=rcParams['lines.antialiased'] if dash_capstyle is None : dash_capstyle=rcParams['lines.dash_capstyle'] if dash_joinstyle is None : dash_joinstyle=rcParams['lines.dash_joinstyle'] if solid_capstyle is None : solid_capstyle=rcParams['lines.solid_capstyle'] if solid_joinstyle is None : solid_joinstyle=rcParams['lines.solid_joinstyle'] if drawstyle is None : drawstyle='default' self.set_dash_capstyle(dash_capstyle) self.set_dash_joinstyle(dash_joinstyle) self.set_solid_capstyle(solid_capstyle) self.set_solid_joinstyle(solid_joinstyle) self.set_linestyle(linestyle) self.set_drawstyle(drawstyle) self.set_linewidth(linewidth) self.set_color(color) self.set_marker(marker) self.set_antialiased(antialiased) self.set_markersize(markersize) self._dashSeq = None self.set_markerfacecolor(markerfacecolor) self.set_markeredgecolor(markeredgecolor) self.set_markeredgewidth(markeredgewidth) self._point_size_reduction = 0.5 self.verticalOffset = None # update kwargs before updating data to give the caller a # chance to init axes (and hence unit support) self.update(kwargs) self.pickradius = pickradius if is_numlike(self._picker): self.pickradius = self._picker self._xorig = np.asarray([]) self._yorig = np.asarray([]) self._invalid = True self.set_data(xdata, ydata) def contains(self, mouseevent): """ Test whether the mouse event occurred on the line. The pick radius determines the precision of the location test (usually within five points of the value). Use :meth:`~matplotlib.lines.Line2D.get_pickradius` or :meth:`~matplotlib.lines.Line2D.set_pickradius` to view or modify it. Returns *True* if any values are within the radius along with ``{'ind': pointlist}``, where *pointlist* is the set of points within the radius. TODO: sort returned indices by distance """ if callable(self._contains): return self._contains(self,mouseevent) if not is_numlike(self.pickradius): raise ValueError,"pick radius should be a distance" # Make sure we have data to plot if self._invalid: self.recache() if len(self._xy)==0: return False,{} # Convert points to pixels path, affine = self._transformed_path.get_transformed_path_and_affine() path = affine.transform_path(path) xy = path.vertices xt = xy[:, 0] yt = xy[:, 1] # Convert pick radius from points to pixels if self.figure == None: warning.warn('no figure set when check if mouse is on line') pixels = self.pickradius else: pixels = self.figure.dpi/72. * self.pickradius # Check for collision if self._linestyle in ['None',None]: # If no line, return the nearby point(s) d = (xt-mouseevent.x)**2 + (yt-mouseevent.y)**2 ind, = np.nonzero(np.less_equal(d, pixels**2)) else: # If line, return the nearby segment(s) ind = segment_hits(mouseevent.x,mouseevent.y,xt,yt,pixels) # Debugging message if False and self._label != u'': print "Checking line",self._label,"at",mouseevent.x,mouseevent.y print 'xt', xt print 'yt', yt #print 'dx,dy', (xt-mouseevent.x)**2., (yt-mouseevent.y)**2. print 'ind',ind # Return the point(s) within radius return len(ind)>0,dict(ind=ind) def get_pickradius(self): 'return the pick radius used for containment tests' return self.pickradius def setpickradius(self,d): """Sets the pick radius used for containment tests ACCEPTS: float distance in points """ self.pickradius = d def set_picker(self,p): """Sets the event picker details for the line. ACCEPTS: float distance in points or callable pick function ``fn(artist, event)`` """ if callable(p): self._contains = p else: self.pickradius = p self._picker = p def get_window_extent(self, renderer): bbox = Bbox.unit() bbox.update_from_data_xy(self.get_transform().transform(self.get_xydata()), ignore=True) # correct for marker size, if any if self._marker is not None: ms = (self._markersize / 72.0 * self.figure.dpi) * 0.5 bbox = bbox.padded(ms) return bbox def set_axes(self, ax): Artist.set_axes(self, ax) if ax.xaxis is not None: self._xcid = ax.xaxis.callbacks.connect('units', self.recache) if ax.yaxis is not None: self._ycid = ax.yaxis.callbacks.connect('units', self.recache) set_axes.__doc__ = Artist.set_axes.__doc__ def set_data(self, *args): """ Set the x and y data ACCEPTS: 2D array """ if len(args)==1: x, y = args[0] else: x, y = args not_masked = 0 if not ma.isMaskedArray(x): x = np.asarray(x) not_masked += 1 if not ma.isMaskedArray(y): y = np.asarray(y) not_masked += 1 if (not_masked < 2 or (x is not self._xorig and (x.shape != self._xorig.shape or np.any(x != self._xorig))) or (y is not self._yorig and (y.shape != self._yorig.shape or np.any(y != self._yorig)))): self._xorig = x self._yorig = y self._invalid = True def recache(self): #if self.axes is None: print 'recache no axes' #else: print 'recache units', self.axes.xaxis.units, self.axes.yaxis.units if ma.isMaskedArray(self._xorig) or ma.isMaskedArray(self._yorig): x = ma.asarray(self.convert_xunits(self._xorig), float) y = ma.asarray(self.convert_yunits(self._yorig), float) x = ma.ravel(x) y = ma.ravel(y) else: x = np.asarray(self.convert_xunits(self._xorig), float) y = np.asarray(self.convert_yunits(self._yorig), float) x = np.ravel(x) y = np.ravel(y) if len(x)==1 and len(y)>1: x = x * np.ones(y.shape, float) if len(y)==1 and len(x)>1: y = y * np.ones(x.shape, float) if len(x) != len(y): raise RuntimeError('xdata and ydata must be the same length') x = x.reshape((len(x), 1)) y = y.reshape((len(y), 1)) if ma.isMaskedArray(x) or ma.isMaskedArray(y): self._xy = ma.concatenate((x, y), 1) else: self._xy = np.concatenate((x, y), 1) self._x = self._xy[:, 0] # just a view self._y = self._xy[:, 1] # just a view # Masked arrays are now handled by the Path class itself self._path = Path(self._xy) self._transformed_path = TransformedPath(self._path, self.get_transform()) self._invalid = False def set_transform(self, t): """ set the Transformation instance used by this artist ACCEPTS: a :class:`matplotlib.transforms.Transform` instance """ Artist.set_transform(self, t) self._invalid = True # self._transformed_path = TransformedPath(self._path, self.get_transform()) def _is_sorted(self, x): "return true if x is sorted" if len(x)<2: return 1 return np.alltrue(x[1:]-x[0:-1]>=0) def draw(self, renderer): if self._invalid: self.recache() renderer.open_group('line2d') if not self._visible: return gc = renderer.new_gc() self._set_gc_clip(gc) gc.set_foreground(self._color) gc.set_antialiased(self._antialiased) gc.set_linewidth(self._linewidth) gc.set_alpha(self._alpha) if self.is_dashed(): cap = self._dashcapstyle join = self._dashjoinstyle else: cap = self._solidcapstyle join = self._solidjoinstyle gc.set_joinstyle(join) gc.set_capstyle(cap) gc.set_snap(self.get_snap()) funcname = self._lineStyles.get(self._linestyle, '_draw_nothing') if funcname != '_draw_nothing': tpath, affine = self._transformed_path.get_transformed_path_and_affine() self._lineFunc = getattr(self, funcname) funcname = self.drawStyles.get(self._drawstyle, '_draw_lines') drawFunc = getattr(self, funcname) drawFunc(renderer, gc, tpath, affine.frozen()) if self._marker is not None: gc = renderer.new_gc() self._set_gc_clip(gc) gc.set_foreground(self.get_markeredgecolor()) gc.set_linewidth(self._markeredgewidth) gc.set_alpha(self._alpha) funcname = self._markers.get(self._marker, '_draw_nothing') if funcname != '_draw_nothing': tpath, affine = self._transformed_path.get_transformed_points_and_affine() markerFunc = getattr(self, funcname) markerFunc(renderer, gc, tpath, affine.frozen()) renderer.close_group('line2d') def get_antialiased(self): return self._antialiased def get_color(self): return self._color def get_drawstyle(self): return self._drawstyle def get_linestyle(self): return self._linestyle def get_linewidth(self): return self._linewidth def get_marker(self): return self._marker def get_markeredgecolor(self): if (is_string_like(self._markeredgecolor) and self._markeredgecolor == 'auto'): if self._marker in self.filled_markers: return 'k' else: return self._color else: return self._markeredgecolor return self._markeredgecolor def get_markeredgewidth(self): return self._markeredgewidth def get_markerfacecolor(self): if (self._markerfacecolor is None or (is_string_like(self._markerfacecolor) and self._markerfacecolor.lower()=='none') ): return self._markerfacecolor elif (is_string_like(self._markerfacecolor) and self._markerfacecolor.lower() == 'auto'): return self._color else: return self._markerfacecolor def get_markersize(self): return self._markersize def get_data(self, orig=True): """ Return the xdata, ydata. If *orig* is *True*, return the original data """ return self.get_xdata(orig=orig), self.get_ydata(orig=orig) def get_xdata(self, orig=True): """ Return the xdata. If *orig* is *True*, return the original data, else the processed data. """ if orig: return self._xorig if self._invalid: self.recache() return self._x def get_ydata(self, orig=True): """ Return the ydata. If *orig* is *True*, return the original data, else the processed data. """ if orig: return self._yorig if self._invalid: self.recache() return self._y def get_path(self): """ Return the :class:`~matplotlib.path.Path` object associated with this line. """ if self._invalid: self.recache() return self._path def get_xydata(self): """ Return the *xy* data as a Nx2 numpy array. """ if self._invalid: self.recache() return self._xy def set_antialiased(self, b): """ True if line should be drawin with antialiased rendering ACCEPTS: [True | False] """ self._antialiased = b def set_color(self, color): """ Set the color of the line ACCEPTS: any matplotlib color """ self._color = color def set_drawstyle(self, drawstyle): """ Set the drawstyle of the plot 'default' connects the points with lines. The steps variants produce step-plots. 'steps' is equivalent to 'steps-pre' and is maintained for backward-compatibility. ACCEPTS: [ 'default' | 'steps' | 'steps-pre' | 'steps-mid' | 'steps-post' ] """ self._drawstyle = drawstyle def set_linewidth(self, w): """ Set the line width in points ACCEPTS: float value in points """ self._linewidth = w def set_linestyle(self, linestyle): """ Set the linestyle of the line (also accepts drawstyles) ================ ================= linestyle description ================ ================= '-' solid '--' dashed '-.' dash_dot ':' dotted 'None' draw nothing ' ' draw nothing '' draw nothing ================ ================= 'steps' is equivalent to 'steps-pre' and is maintained for backward-compatibility. .. seealso:: :meth:`set_drawstyle` ACCEPTS: [ '-' | '--' | '-.' | ':' | 'None' | ' ' | '' ] and any drawstyle in combination with a linestyle, e.g. 'steps--'. """ # handle long drawstyle names before short ones ! for ds in flatten([k.keys() for k in (self._drawStyles_l, self._drawStyles_s)], is_string_like): if linestyle.startswith(ds): self.set_drawstyle(ds) if len(linestyle) > len(ds): linestyle = linestyle[len(ds):] else: linestyle = '-' if linestyle not in self._lineStyles: if linestyle in ls_mapper: linestyle = ls_mapper[linestyle] else: verbose.report('Unrecognized line style %s, %s' % (linestyle, type(linestyle))) if linestyle in [' ','']: linestyle = 'None' self._linestyle = linestyle def set_marker(self, marker): """ Set the line marker ========== ========================== marker description ========== ========================== '.' point ',' pixel 'o' circle 'v' triangle_down '^' triangle_up '<' triangle_left '>' triangle_right '1' tri_down '2' tri_up '3' tri_left '4' tri_right 's' square 'p' pentagon '*' star 'h' hexagon1 'H' hexagon2 '+' plus 'x' x 'D' diamond 'd' thin_diamond '|' vline '_' hline TICKLEFT tickleft TICKRIGHT tickright TICKUP tickup TICKDOWN tickdown CARETLEFT caretleft CARETRIGHT caretright CARETUP caretup CARETDOWN caretdown 'None' nothing ' ' nothing '' nothing ========== ========================== ACCEPTS: [ '+' | '*' | ',' | '.' | '1' | '2' | '3' | '4' | '<' | '>' | 'D' | 'H' | '^' | '_' | 'd' | 'h' | 'o' | 'p' | 's' | 'v' | 'x' | '|' | TICKUP | TICKDOWN | TICKLEFT | TICKRIGHT | 'None' | ' ' | '' ] """ if marker not in self._markers: verbose.report('Unrecognized marker style %s, %s' % (marker, type(marker))) if marker in [' ','']: marker = 'None' self._marker = marker self._markerFunc = self._markers[marker] def set_markeredgecolor(self, ec): """ Set the marker edge color ACCEPTS: any matplotlib color """ if ec is None : ec = 'auto' self._markeredgecolor = ec def set_markeredgewidth(self, ew): """ Set the marker edge width in points ACCEPTS: float value in points """ if ew is None : ew = rcParams['lines.markeredgewidth'] self._markeredgewidth = ew def set_markerfacecolor(self, fc): """ Set the marker face color ACCEPTS: any matplotlib color """ if fc is None : fc = 'auto' self._markerfacecolor = fc def set_markersize(self, sz): """ Set the marker size in points ACCEPTS: float """ self._markersize = sz def set_xdata(self, x): """ Set the data np.array for x ACCEPTS: 1D array """ x = np.asarray(x) self.set_data(x, self._yorig) def set_ydata(self, y): """ Set the data np.array for y ACCEPTS: 1D array """ y = np.asarray(y) self.set_data(self._xorig, y) def set_dashes(self, seq): """ Set the dash sequence, sequence of dashes with on off ink in points. If seq is empty or if seq = (None, None), the linestyle will be set to solid. ACCEPTS: sequence of on/off ink in points """ if seq == (None, None) or len(seq)==0: self.set_linestyle('-') else: self.set_linestyle('--') self._dashSeq = seq # TODO: offset ignored for now def _draw_lines(self, renderer, gc, path, trans): self._lineFunc(renderer, gc, path, trans) def _draw_steps_pre(self, renderer, gc, path, trans): vertices = self._xy steps = ma.zeros((2*len(vertices)-1, 2), np.float_) steps[0::2, 0], steps[1::2, 0] = vertices[:, 0], vertices[:-1, 0] steps[0::2, 1], steps[1:-1:2, 1] = vertices[:, 1], vertices[1:, 1] path = Path(steps) path = path.transformed(self.get_transform()) self._lineFunc(renderer, gc, path, IdentityTransform()) def _draw_steps_post(self, renderer, gc, path, trans): vertices = self._xy steps = ma.zeros((2*len(vertices)-1, 2), np.float_) steps[::2, 0], steps[1:-1:2, 0] = vertices[:, 0], vertices[1:, 0] steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:-1, 1] path = Path(steps) path = path.transformed(self.get_transform()) self._lineFunc(renderer, gc, path, IdentityTransform()) def _draw_steps_mid(self, renderer, gc, path, trans): vertices = self._xy steps = ma.zeros((2*len(vertices), 2), np.float_) steps[1:-1:2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0]) steps[2::2, 0] = 0.5 * (vertices[:-1, 0] + vertices[1:, 0]) steps[0, 0] = vertices[0, 0] steps[-1, 0] = vertices[-1, 0] steps[0::2, 1], steps[1::2, 1] = vertices[:, 1], vertices[:, 1] path = Path(steps) path = path.transformed(self.get_transform()) self._lineFunc(renderer, gc, path, IdentityTransform()) def _draw_nothing(self, *args, **kwargs): pass def _draw_solid(self, renderer, gc, path, trans): gc.set_linestyle('solid') renderer.draw_path(gc, path, trans) def _draw_dashed(self, renderer, gc, path, trans): gc.set_linestyle('dashed') if self._dashSeq is not None: gc.set_dashes(0, self._dashSeq) renderer.draw_path(gc, path, trans) def _draw_dash_dot(self, renderer, gc, path, trans): gc.set_linestyle('dashdot') renderer.draw_path(gc, path, trans) def _draw_dotted(self, renderer, gc, path, trans): gc.set_linestyle('dotted') renderer.draw_path(gc, path, trans) def _draw_point(self, renderer, gc, path, path_trans): w = renderer.points_to_pixels(self._markersize) * \ self._point_size_reduction * 0.5 gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0) rgbFace = self._get_rgb_face() transform = Affine2D().scale(w) renderer.draw_markers( gc, Path.unit_circle(), transform, path, path_trans, rgbFace) _draw_pixel_transform = Affine2D().translate(-0.5, -0.5) def _draw_pixel(self, renderer, gc, path, path_trans): rgbFace = self._get_rgb_face() gc.set_snap(False) renderer.draw_markers(gc, Path.unit_rectangle(), self._draw_pixel_transform, path, path_trans, rgbFace) def _draw_circle(self, renderer, gc, path, path_trans): w = renderer.points_to_pixels(self._markersize) * 0.5 gc.set_snap(renderer.points_to_pixels(self._markersize) > 3.0) rgbFace = self._get_rgb_face() transform = Affine2D().scale(w, w) renderer.draw_markers( gc, Path.unit_circle(), transform, path, path_trans, rgbFace) _triangle_path = Path([[0.0, 1.0], [-1.0, -1.0], [1.0, -1.0], [0.0, 1.0]]) def _draw_triangle_up(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_triangle_down(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, -offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_triangle_left(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, offset).rotate_deg(90) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_triangle_right(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset, offset).rotate_deg(-90) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, self._triangle_path, transform, path, path_trans, rgbFace) def _draw_square(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 2.0) side = renderer.points_to_pixels(self._markersize) transform = Affine2D().translate(-0.5, -0.5).scale(side) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_rectangle(), transform, path, path_trans, rgbFace) def _draw_diamond(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) side = renderer.points_to_pixels(self._markersize) transform = Affine2D().translate(-0.5, -0.5).rotate_deg(45).scale(side) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_rectangle(), transform, path, path_trans, rgbFace) def _draw_thin_diamond(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = renderer.points_to_pixels(self._markersize) transform = Affine2D().translate(-0.5, -0.5) \ .rotate_deg(45).scale(offset * 0.6, offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_rectangle(), transform, path, path_trans, rgbFace) def _draw_pentagon(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_regular_polygon(5), transform, path, path_trans, rgbFace) def _draw_star(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) rgbFace = self._get_rgb_face() _starpath = Path.unit_regular_star(5, innerCircle=0.381966) renderer.draw_markers(gc, _starpath, transform, path, path_trans, rgbFace) def _draw_hexagon1(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform, path, path_trans, rgbFace) def _draw_hexagon2(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5 * renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(30) rgbFace = self._get_rgb_face() renderer.draw_markers(gc, Path.unit_regular_polygon(6), transform, path, path_trans, rgbFace) _line_marker_path = Path([[0.0, -1.0], [0.0, 1.0]]) def _draw_vline(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._line_marker_path, transform, path, path_trans) def _draw_hline(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(90) renderer.draw_markers(gc, self._line_marker_path, transform, path, path_trans) _tickhoriz_path = Path([[0.0, 0.0], [1.0, 0.0]]) def _draw_tickleft(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(-offset, 1.0) renderer.draw_markers(gc, self._tickhoriz_path, marker_transform, path, path_trans) def _draw_tickright(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(offset, 1.0) renderer.draw_markers(gc, self._tickhoriz_path, marker_transform, path, path_trans) _tickvert_path = Path([[-0.0, 0.0], [-0.0, 1.0]]) def _draw_tickup(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(1.0, offset) renderer.draw_markers(gc, self._tickvert_path, marker_transform, path, path_trans) def _draw_tickdown(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 1.0) offset = renderer.points_to_pixels(self._markersize) marker_transform = Affine2D().scale(1.0, -offset) renderer.draw_markers(gc, self._tickvert_path, marker_transform, path, path_trans) _plus_path = Path([[-1.0, 0.0], [1.0, 0.0], [0.0, -1.0], [0.0, 1.0]], [Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO]) def _draw_plus(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._plus_path, transform, path, path_trans) _tri_path = Path([[0.0, 0.0], [0.0, -1.0], [0.0, 0.0], [0.8, 0.5], [0.0, 0.0], [-0.8, 0.5]], [Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO]) def _draw_tri_down(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) def _draw_tri_up(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(180) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) def _draw_tri_left(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(90) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) def _draw_tri_right(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 5.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(270) renderer.draw_markers(gc, self._tri_path, transform, path, path_trans) _caret_path = Path([[-1.0, 1.5], [0.0, 0.0], [1.0, 1.5]]) def _draw_caretdown(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) def _draw_caretup(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(180) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) def _draw_caretleft(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(270) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) def _draw_caretright(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset).rotate_deg(90) renderer.draw_markers(gc, self._caret_path, transform, path, path_trans) _x_path = Path([[-1.0, -1.0], [1.0, 1.0], [-1.0, 1.0], [1.0, -1.0]], [Path.MOVETO, Path.LINETO, Path.MOVETO, Path.LINETO]) def _draw_x(self, renderer, gc, path, path_trans): gc.set_snap(renderer.points_to_pixels(self._markersize) >= 3.0) offset = 0.5*renderer.points_to_pixels(self._markersize) transform = Affine2D().scale(offset) renderer.draw_markers(gc, self._x_path, transform, path, path_trans) def update_from(self, other): 'copy properties from other to self' Artist.update_from(self, other) self._linestyle = other._linestyle self._linewidth = other._linewidth self._color = other._color self._markersize = other._markersize self._markerfacecolor = other._markerfacecolor self._markeredgecolor = other._markeredgecolor self._markeredgewidth = other._markeredgewidth self._dashSeq = other._dashSeq self._dashcapstyle = other._dashcapstyle self._dashjoinstyle = other._dashjoinstyle self._solidcapstyle = other._solidcapstyle self._solidjoinstyle = other._solidjoinstyle self._linestyle = other._linestyle self._marker = other._marker self._drawstyle = other._drawstyle def _get_rgb_face(self): facecolor = self.get_markerfacecolor() if is_string_like(facecolor) and facecolor.lower()=='none': rgbFace = None else: rgbFace = colorConverter.to_rgb(facecolor) return rgbFace # some aliases.... def set_aa(self, val): 'alias for set_antialiased' self.set_antialiased(val) def set_c(self, val): 'alias for set_color' self.set_color(val) def set_ls(self, val): 'alias for set_linestyle' self.set_linestyle(val) def set_lw(self, val): 'alias for set_linewidth' self.set_linewidth(val) def set_mec(self, val): 'alias for set_markeredgecolor' self.set_markeredgecolor(val) def set_mew(self, val): 'alias for set_markeredgewidth' self.set_markeredgewidth(val) def set_mfc(self, val): 'alias for set_markerfacecolor' self.set_markerfacecolor(val) def set_ms(self, val): 'alias for set_markersize' self.set_markersize(val) def get_aa(self): 'alias for get_antialiased' return self.get_antialiased() def get_c(self): 'alias for get_color' return self.get_color() def get_ls(self): 'alias for get_linestyle' return self.get_linestyle() def get_lw(self): 'alias for get_linewidth' return self.get_linewidth() def get_mec(self): 'alias for get_markeredgecolor' return self.get_markeredgecolor() def get_mew(self): 'alias for get_markeredgewidth' return self.get_markeredgewidth() def get_mfc(self): 'alias for get_markerfacecolor' return self.get_markerfacecolor() def get_ms(self): 'alias for get_markersize' return self.get_markersize() def set_dash_joinstyle(self, s): """ Set the join style for dashed linestyles ACCEPTS: ['miter' | 'round' | 'bevel'] """ s = s.lower() if s not in self.validJoin: raise ValueError('set_dash_joinstyle passed "%s";\n' % (s,) + 'valid joinstyles are %s' % (self.validJoin,)) self._dashjoinstyle = s def set_solid_joinstyle(self, s): """ Set the join style for solid linestyles ACCEPTS: ['miter' | 'round' | 'bevel'] """ s = s.lower() if s not in self.validJoin: raise ValueError('set_solid_joinstyle passed "%s";\n' % (s,) + 'valid joinstyles are %s' % (self.validJoin,)) self._solidjoinstyle = s def get_dash_joinstyle(self): """ Get the join style for dashed linestyles """ return self._dashjoinstyle def get_solid_joinstyle(self): """ Get the join style for solid linestyles """ return self._solidjoinstyle def set_dash_capstyle(self, s): """ Set the cap style for dashed linestyles ACCEPTS: ['butt' | 'round' | 'projecting'] """ s = s.lower() if s not in self.validCap: raise ValueError('set_dash_capstyle passed "%s";\n' % (s,) + 'valid capstyles are %s' % (self.validCap,)) self._dashcapstyle = s def set_solid_capstyle(self, s): """ Set the cap style for solid linestyles ACCEPTS: ['butt' | 'round' | 'projecting'] """ s = s.lower() if s not in self.validCap: raise ValueError('set_solid_capstyle passed "%s";\n' % (s,) + 'valid capstyles are %s' % (self.validCap,)) self._solidcapstyle = s def get_dash_capstyle(self): """ Get the cap style for dashed linestyles """ return self._dashcapstyle def get_solid_capstyle(self): """ Get the cap style for solid linestyles """ return self._solidcapstyle def is_dashed(self): 'return True if line is dashstyle' return self._linestyle in ('--', '-.', ':') class VertexSelector: """ Manage the callbacks to maintain a list of selected vertices for :class:`matplotlib.lines.Line2D`. Derived classes should override :meth:`~matplotlib.lines.VertexSelector.process_selected` to do something with the picks. Here is an example which highlights the selected verts with red circles:: import numpy as np import matplotlib.pyplot as plt import matplotlib.lines as lines class HighlightSelected(lines.VertexSelector): def __init__(self, line, fmt='ro', **kwargs): lines.VertexSelector.__init__(self, line) self.markers, = self.axes.plot([], [], fmt, **kwargs) def process_selected(self, ind, xs, ys): self.markers.set_data(xs, ys) self.canvas.draw() fig = plt.figure() ax = fig.add_subplot(111) x, y = np.random.rand(2, 30) line, = ax.plot(x, y, 'bs-', picker=5) selector = HighlightSelected(line) plt.show() """ def __init__(self, line): """ Initialize the class with a :class:`matplotlib.lines.Line2D` instance. The line should already be added to some :class:`matplotlib.axes.Axes` instance and should have the picker property set. """ if not hasattr(line, 'axes'): raise RuntimeError('You must first add the line to the Axes') if line.get_picker() is None: raise RuntimeError('You must first set the picker property of the line') self.axes = line.axes self.line = line self.canvas = self.axes.figure.canvas self.cid = self.canvas.mpl_connect('pick_event', self.onpick) self.ind = set() def process_selected(self, ind, xs, ys): """ Default "do nothing" implementation of the :meth:`process_selected` method. *ind* are the indices of the selected vertices. *xs* and *ys* are the coordinates of the selected vertices. """ pass def onpick(self, event): 'When the line is picked, update the set of selected indicies.' if event.artist is not self.line: return for i in event.ind: if i in self.ind: self.ind.remove(i) else: self.ind.add(i) ind = list(self.ind) ind.sort() xdata, ydata = self.line.get_data() self.process_selected(ind, xdata[ind], ydata[ind]) lineStyles = Line2D._lineStyles lineMarkers = Line2D._markers drawStyles = Line2D.drawStyles artist.kwdocd['Line2D'] = artist.kwdoc(Line2D) # You can not set the docstring of an instancemethod, # but you can on the underlying function. Go figure. Line2D.__init__.im_func.__doc__ = dedent(Line2D.__init__.__doc__) % artist.kwdocd
agpl-3.0
arrow-/simQuad
ground_station/gyro_scope.py
2
5471
''' -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= IMPORTANT!! It is suggested you run this script with mpu_level2.ino first to see and understand its operation. Basically this script EXPECTS: Arduino is providing space separated gyro readings @ ~5ms intervals (via MPU Interrupt). * Each serial packet must be ASCII and look like: [x_gyro]<space>[y_gyro]<space>[z_gyro]<newline> + You need to specify correct Serial port + You need to set the Y-limits of the plot axis. + You need to use correct value of "dt". + You need to set the correct conversion factor for Gyro readings. Mode 0 1 2 3 Range +-250 +-500 +-1000 +-2000 Conv. 131 65.5 32.75 16.375 AND it DELIVERS: * 3 axis loss-less Gyro readings plot (almost real time). * 3D visualisation of current orientation based on gyro vals If you want to just plot data in ~real time use {oscilloscope.py}. -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= ''' import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np import serial, time def rotate(v, axis, theta): ''' Rotates "v" vector about "axis" vector by "theta" radians, returns vector ''' c = np.cos(theta) s = np.sin(theta) t = 1-c mat = np.array([ [c+axis[0]*axis[0]*t, axis[0]*axis[1]*t-axis[2]*s, axis[0]*axis[2]*t+axis[1]*s], [axis[0]*axis[1]*t+axis[2]*s, c+axis[1]*axis[1]*t, axis[1]*axis[2]*t-axis[0]*s], [axis[0]*axis[2]*t-axis[1]*s, axis[1]*axis[2]*t+axis[0]*s, c+axis[2]*axis[2]*t] ]) return mat.dot(v.T) def calcPose(omega): ''' Helper function. Finds the "d"-theta, then calls rotate. Omega must be in ** degrees per second ** ''' theta = omega*dt*np.pi/180 #theta is "d-theta" in radians rpy[1] = rotate(rpy[1], rpy[0], theta[0]) rpy[0] = rotate(rpy[0], rpy[1], theta[1]) rpy[2] = np.cross(rpy[0], rpy[1]) rpy[1] = rotate(rpy[1], rpy[2], theta[2]) rpy[0] = rotate(rpy[0], rpy[2], theta[2]) plt.ion() # SET CORRECT PORT NUM HERE arduino = serial.Serial('/dev/ttyACM0', 57600) # dt is found experimentally. Contact Ananya for details. Basically this the time between # 2 MPU(gyro) interrupts. The np.pi/180 converts deg/sec to rad/sec. # SET CORRECT dt HERE. TIME IN SECONDS BETWEEN TWO SENSOR PACKETS AS RECVD. BY ARDUINO. dt = .005 # 5msec # rpy is original orientation. These vectors are updated by calcPose() rpy = np.eye(3) fig = plt.figure(figsize=(16,6)) axes = fig.add_subplot(121) a3d = fig.add_subplot(122, projection='3d') a3d.set_xlim(-1.2,1.2) a3d.set_ylim(-1.2,1.2) a3d.set_zlim(-1.2,1.2) a3d.scatter([0], [0], [0], s=40) r, = a3d.plot([0,1], [0,0], [0,0], lw=2, c='black') p, = a3d.plot([0,0], [0,1], [0,0], lw=2, c='red') a3d.plot([0,2], [0,0], [0,0], c='cyan') a3d.plot([0,0], [0,2], [0,0], c='brown') a3d.plot([0,0], [0,0], [0,2], c='green') a3d.plot([0,-2], [0,0], [0,0], ls='--', c='cyan') a3d.plot([0,0], [0,-2], [0,0], ls='--', c='brown') a3d.plot([0,0], [0,0], [0,-2], ls='--', c='green') num_samples = 0 buff = 0 # "buff" counts till 50. Every time it reaches fifty, plt.draw() is called, since # plt.draw() is a costly operation. Normal list append and pose calculations are fast. # So, do those diligently, for every sample, but update display # rarely (while ensuring smooth animation). gyro_x = [0] gyro_y = [0] # gyro data lists. I use them like queues. gyro_z = [0] t = [0] # scopes is a list of 3 matplotlib.Line_2D objects. scopes = [axes.plot(t, gyro_x, label=r'$\omega_x$')[0], axes.plot(t, gyro_y, label=r'$\omega_y$')[0], axes.plot(t, gyro_z, label=r'$\omega_z$')[0]] axes.legend(prop=dict(size=14)) plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0.) axes.set_ylim(-505, 505) # SET CORRECT Y-LIM HERE conversion = 65.5 #Gyro 500 SET CORRECT CONV FACTOR HERE # Refer datasheet. Convert ADC result into a Physical measurement. # If you don't understand this, pls. leave project. print 'Me Ready' time.sleep(2) #Handshake MAY BE REDUNDANT print arduino.inWaiting() arduino.flushInput() arduino.write('e') print 'Sent Request...' data = [0]*6 while True: try: num = arduino.read(12) num = [ord(x) for x in num] except: print 'Serial error!' raise RuntimeError _ind=0 #this var is connected to for loop below!! for i in range(0,12, 2): data[_ind] = (num[i]<<8)|num[i+1] if data[_ind] & 0x8000: data[_ind] = data[_ind] - 0x10000 _ind += 1 #print data[3:] datas = np.array([float(data[3])/conversion, float(data[4])/conversion, float(data[5])/conversion]) gyro_x.append(datas[0]) gyro_y.append(datas[1]) gyro_z.append(datas[2]) num_samples += 1 t.append(num_samples) calcPose(datas) #This function updates the global variable: "rpy" if num_samples>200: del t[0] del gyro_x[0] del gyro_y[0] del gyro_z[0] axes.set_xlim(t[0], num_samples) scopes[0].set_data(t, gyro_x) scopes[1].set_data(t, gyro_y) scopes[2].set_data(t, gyro_z) # pose matrix is just an easier way of giving input to the .set_data() # and .set_3d_properties() methods. You see, line needs 2 (end) points: # the rpy entries AND THE ORIGIN. pose matrix does just that: specifies # BOTH end points. pose = np.array([np.array([np.zeros(3), rpy[0]]).T, np.array([np.zeros(3), rpy[1]]).T, np.array([np.zeros(3), rpy[2]]).T]) r.set_data(pose[0][:2]) r.set_3d_properties(pose[0][2]) p.set_data(pose[1][:2]) p.set_3d_properties(pose[1][2]) if buff>25: buff=0 plt.draw() buff += 1 plt.ioff() plt.show()
gpl-2.0
pulinagrawal/nupic
external/linux32/lib/python2.6/site-packages/matplotlib/projections/__init__.py
69
2179
from geo import AitoffAxes, HammerAxes, LambertAxes from polar import PolarAxes from matplotlib import axes class ProjectionRegistry(object): """ Manages the set of projections available to the system. """ def __init__(self): self._all_projection_types = {} def register(self, *projections): """ Register a new set of projection(s). """ for projection in projections: name = projection.name self._all_projection_types[name] = projection def get_projection_class(self, name): """ Get a projection class from its *name*. """ return self._all_projection_types[name] def get_projection_names(self): """ Get a list of the names of all projections currently registered. """ names = self._all_projection_types.keys() names.sort() return names projection_registry = ProjectionRegistry() projection_registry.register( axes.Axes, PolarAxes, AitoffAxes, HammerAxes, LambertAxes) def register_projection(cls): projection_registry.register(cls) def get_projection_class(projection=None): """ Get a projection class from its name. If *projection* is None, a standard rectilinear projection is returned. """ if projection is None: projection = 'rectilinear' try: return projection_registry.get_projection_class(projection) except KeyError: raise ValueError("Unknown projection '%s'" % projection) def projection_factory(projection, figure, rect, **kwargs): """ Get a new projection instance. *projection* is a projection name. *figure* is a figure to add the axes to. *rect* is a :class:`~matplotlib.transforms.Bbox` object specifying the location of the axes within the figure. Any other kwargs are passed along to the specific projection constructor being used. """ return get_projection_class(projection)(figure, rect, **kwargs) def get_projection_names(): """ Get a list of acceptable projection names. """ return projection_registry.get_projection_names()
agpl-3.0
yarikoptic/pystatsmodels
statsmodels/graphics/tests/test_functional.py
3
2815
import numpy as np from numpy.testing import dec, assert_equal, assert_almost_equal from statsmodels.graphics.functional import \ banddepth, fboxplot, rainbowplot try: import matplotlib.pyplot as plt import matplotlib if matplotlib.__version__ < '1': raise have_matplotlib = True except: have_matplotlib = False def test_banddepth_BD2(): xx = np.arange(500) / 150. y1 = 1 + 0.5 * np.sin(xx) y2 = 0.3 + np.sin(xx + np.pi/6) y3 = -0.5 + np.sin(xx + np.pi/6) y4 = -1 + 0.3 * np.cos(xx + np.pi/6) data = np.asarray([y1, y2, y3, y4]) depth = banddepth(data, method='BD2') expected_depth = [0.5, 5./6, 5./6, 0.5] assert_almost_equal(depth, expected_depth) ## Plot to visualize why we expect this output #fig = plt.figure() #ax = fig.add_subplot(111) #for ii, yy in enumerate([y1, y2, y3, y4]): # ax.plot(xx, yy, label="y%s" % ii) #ax.legend() #plt.show() def test_banddepth_MBD(): xx = np.arange(5001) / 5000. y1 = np.zeros(xx.shape) y2 = 2 * xx - 1 y3 = np.ones(xx.shape) * 0.5 y4 = np.ones(xx.shape) * -0.25 data = np.asarray([y1, y2, y3, y4]) depth = banddepth(data, method='MBD') expected_depth = [5./6, (2*(0.75-3./8)+3)/6, 3.5/6, (2*3./8+3)/6] assert_almost_equal(depth, expected_depth, decimal=4) @dec.skipif(not have_matplotlib) def test_fboxplot_rainbowplot(): """Test fboxplot and rainbowplot together, is much faster.""" def harmfunc(t): """Test function, combination of a few harmonic terms.""" # Constant, 0 with p=0.9, 1 with p=1 - for creating outliers ci = int(np.random.random() > 0.9) a1i = np.random.random() * 0.05 a2i = np.random.random() * 0.05 b1i = (0.15 - 0.1) * np.random.random() + 0.1 b2i = (0.15 - 0.1) * np.random.random() + 0.1 func = (1 - ci) * (a1i * np.sin(t) + a2i * np.cos(t)) + \ ci * (b1i * np.sin(t) + b2i * np.cos(t)) return func np.random.seed(1234567) # Some basic test data, Model 6 from Sun and Genton. t = np.linspace(0, 2 * np.pi, 250) data = [] for ii in range(20): data.append(harmfunc(t)) # fboxplot test fig = plt.figure() ax = fig.add_subplot(111) _, depth, ix_depth, ix_outliers = fboxplot(data, wfactor=2, ax=ax) ix_expected = np.array([13, 4, 15, 19, 8, 6, 3, 16, 9, 7, 1, 5, 2, 12, 17, 11, 14, 10, 0, 18]) assert_equal(ix_depth, ix_expected) ix_expected2 = np.array([2, 11, 17, 18]) assert_equal(ix_outliers, ix_expected2) plt.close(fig) # rainbowplot test (re-uses depth variable) xdata = np.arange(data[0].size) fig = rainbowplot(data, xdata=xdata, depth=depth, cmap=plt.cm.rainbow) plt.close(fig)
bsd-3-clause
HyperloopTeam/FullOpenMDAO
lib/python2.7/site-packages/matplotlib/figure.py
10
58719
""" The figure module provides the top-level :class:`~matplotlib.artist.Artist`, the :class:`Figure`, which contains all the plot elements. The following classes are defined :class:`SubplotParams` control the default spacing of the subplots :class:`Figure` top level container for all plot elements """ from __future__ import (absolute_import, division, print_function, unicode_literals) import six import warnings from operator import itemgetter import numpy as np from matplotlib import rcParams from matplotlib import docstring from matplotlib import __version__ as _mpl_version import matplotlib.artist as martist from matplotlib.artist import Artist, allow_rasterization import matplotlib.cbook as cbook from matplotlib.cbook import Stack, iterable from matplotlib import _image from matplotlib.image import FigureImage import matplotlib.colorbar as cbar from matplotlib.axes import Axes, SubplotBase, subplot_class_factory from matplotlib.blocking_input import BlockingMouseInput, BlockingKeyMouseInput from matplotlib.legend import Legend from matplotlib.patches import Rectangle from matplotlib.projections import (get_projection_names, process_projection_requirements) from matplotlib.text import Text, _process_text_args from matplotlib.transforms import (Affine2D, Bbox, BboxTransformTo, TransformedBbox) from matplotlib.backend_bases import NonGuiException docstring.interpd.update(projection_names=get_projection_names()) class AxesStack(Stack): """ Specialization of the Stack to handle all tracking of Axes in a Figure. This stack stores ``key, (ind, axes)`` pairs, where: * **key** should be a hash of the args and kwargs used in generating the Axes. * **ind** is a serial number for tracking the order in which axes were added. The AxesStack is a callable, where ``ax_stack()`` returns the current axes. Alternatively the :meth:`current_key_axes` will return the current key and associated axes. """ def __init__(self): Stack.__init__(self) self._ind = 0 def as_list(self): """ Return a list of the Axes instances that have been added to the figure """ ia_list = [a for k, a in self._elements] ia_list.sort() return [a for i, a in ia_list] def get(self, key): """ Return the Axes instance that was added with *key*. If it is not present, return None. """ item = dict(self._elements).get(key) if item is None: return None return item[1] def _entry_from_axes(self, e): ind, k = dict([(a, (ind, k)) for (k, (ind, a)) in self._elements])[e] return (k, (ind, e)) def remove(self, a): """Remove the axes from the stack.""" Stack.remove(self, self._entry_from_axes(a)) def bubble(self, a): """ Move the given axes, which must already exist in the stack, to the top. """ return Stack.bubble(self, self._entry_from_axes(a)) def add(self, key, a): """ Add Axes *a*, with key *key*, to the stack, and return the stack. If *a* is already on the stack, don't add it again, but return *None*. """ # All the error checking may be unnecessary; but this method # is called so seldom that the overhead is negligible. if not isinstance(a, Axes): raise ValueError("second argument, %s, is not an Axes" % a) try: hash(key) except TypeError: raise ValueError("first argument, %s, is not a valid key" % key) a_existing = self.get(key) if a_existing is not None: Stack.remove(self, (key, a_existing)) warnings.warn( "key %s already existed; Axes is being replaced" % key) # I don't think the above should ever happen. if a in self: return None self._ind += 1 return Stack.push(self, (key, (self._ind, a))) def current_key_axes(self): """ Return a tuple of ``(key, axes)`` for the active axes. If no axes exists on the stack, then returns ``(None, None)``. """ if not len(self._elements): return self._default, self._default else: key, (index, axes) = self._elements[self._pos] return key, axes def __call__(self): return self.current_key_axes()[1] def __contains__(self, a): return a in self.as_list() class SubplotParams: """ A class to hold the parameters for a subplot """ def __init__(self, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None): """ All dimensions are fraction of the figure width or height. All values default to their rc params The following attributes are available *left* : 0.125 The left side of the subplots of the figure *right* : 0.9 The right side of the subplots of the figure *bottom* : 0.1 The bottom of the subplots of the figure *top* : 0.9 The top of the subplots of the figure *wspace* : 0.2 The amount of width reserved for blank space between subplots *hspace* : 0.2 The amount of height reserved for white space between subplots """ self.validate = True self.update(left, bottom, right, top, wspace, hspace) def update(self, left=None, bottom=None, right=None, top=None, wspace=None, hspace=None): """ Update the current values. If any kwarg is None, default to the current value, if set, otherwise to rc """ thisleft = getattr(self, 'left', None) thisright = getattr(self, 'right', None) thistop = getattr(self, 'top', None) thisbottom = getattr(self, 'bottom', None) thiswspace = getattr(self, 'wspace', None) thishspace = getattr(self, 'hspace', None) self._update_this('left', left) self._update_this('right', right) self._update_this('bottom', bottom) self._update_this('top', top) self._update_this('wspace', wspace) self._update_this('hspace', hspace) def reset(): self.left = thisleft self.right = thisright self.top = thistop self.bottom = thisbottom self.wspace = thiswspace self.hspace = thishspace if self.validate: if self.left >= self.right: reset() raise ValueError('left cannot be >= right') if self.bottom >= self.top: reset() raise ValueError('bottom cannot be >= top') def _update_this(self, s, val): if val is None: val = getattr(self, s, None) if val is None: key = 'figure.subplot.' + s val = rcParams[key] setattr(self, s, val) class Figure(Artist): """ The Figure instance supports callbacks through a *callbacks* attribute which is a :class:`matplotlib.cbook.CallbackRegistry` instance. The events you can connect to are 'dpi_changed', and the callback will be called with ``func(fig)`` where fig is the :class:`Figure` instance. *patch* The figure patch is drawn by a :class:`matplotlib.patches.Rectangle` instance *suppressComposite* For multiple figure images, the figure will make composite images depending on the renderer option_image_nocomposite function. If suppressComposite is True|False, this will override the renderer. """ def __str__(self): return "Figure(%gx%g)" % tuple(self.bbox.size) def __init__(self, figsize=None, # defaults to rc figure.figsize dpi=None, # defaults to rc figure.dpi facecolor=None, # defaults to rc figure.facecolor edgecolor=None, # defaults to rc figure.edgecolor linewidth=0.0, # the default linewidth of the frame frameon=None, # whether or not to draw the figure frame subplotpars=None, # default to rc tight_layout=None, # default to rc figure.autolayout ): """ *figsize* w,h tuple in inches *dpi* Dots per inch *facecolor* The figure patch facecolor; defaults to rc ``figure.facecolor`` *edgecolor* The figure patch edge color; defaults to rc ``figure.edgecolor`` *linewidth* The figure patch edge linewidth; the default linewidth of the frame *frameon* If *False*, suppress drawing the figure frame *subplotpars* A :class:`SubplotParams` instance, defaults to rc *tight_layout* If *False* use *subplotpars*; if *True* adjust subplot parameters using :meth:`tight_layout` with default padding. When providing a dict containing the keys `pad`, `w_pad`, `h_pad` and `rect`, the default :meth:`tight_layout` paddings will be overridden. Defaults to rc ``figure.autolayout``. """ Artist.__init__(self) self.callbacks = cbook.CallbackRegistry() if figsize is None: figsize = rcParams['figure.figsize'] if dpi is None: dpi = rcParams['figure.dpi'] if facecolor is None: facecolor = rcParams['figure.facecolor'] if edgecolor is None: edgecolor = rcParams['figure.edgecolor'] if frameon is None: frameon = rcParams['figure.frameon'] self.dpi_scale_trans = Affine2D() self.dpi = dpi self.bbox_inches = Bbox.from_bounds(0, 0, *figsize) self.bbox = TransformedBbox(self.bbox_inches, self.dpi_scale_trans) self.frameon = frameon self.transFigure = BboxTransformTo(self.bbox) # the figurePatch name is deprecated self.patch = self.figurePatch = Rectangle( xy=(0, 0), width=1, height=1, facecolor=facecolor, edgecolor=edgecolor, linewidth=linewidth) self._set_artist_props(self.patch) self.patch.set_aa(False) self._hold = rcParams['axes.hold'] self.canvas = None self._suptitle = None if subplotpars is None: subplotpars = SubplotParams() self.subplotpars = subplotpars self.set_tight_layout(tight_layout) self._axstack = AxesStack() # track all figure axes and current axes self.clf() self._cachedRenderer = None # TODO: I'd like to dynamically add the _repr_html_ method # to the figure in the right context, but then IPython doesn't # use it, for some reason. def _repr_html_(self): # We can't use "isinstance" here, because then we'd end up importing # webagg unconditiionally. if (self.canvas is not None and 'WebAgg' in self.canvas.__class__.__name__): from matplotlib.backends import backend_webagg return backend_webagg.ipython_inline_display(self) def show(self, warn=True): """ If using a GUI backend with pyplot, display the figure window. If the figure was not created using :func:`~matplotlib.pyplot.figure`, it will lack a :class:`~matplotlib.backend_bases.FigureManagerBase`, and will raise an AttributeError. For non-GUI backends, this does nothing, in which case a warning will be issued if *warn* is True (default). """ try: manager = getattr(self.canvas, 'manager') except AttributeError as err: raise AttributeError("%s\n" "Figure.show works only " "for figures managed by pyplot, normally " "created by pyplot.figure()." % err) if manager is not None: try: manager.show() return except NonGuiException: pass if warn: import warnings warnings.warn( "matplotlib is currently using a non-GUI backend, " "so cannot show the figure") def _get_axes(self): return self._axstack.as_list() axes = property(fget=_get_axes, doc="Read-only: list of axes in Figure") def _get_dpi(self): return self._dpi def _set_dpi(self, dpi): self._dpi = dpi self.dpi_scale_trans.clear().scale(dpi, dpi) self.callbacks.process('dpi_changed', self) dpi = property(_get_dpi, _set_dpi) def get_tight_layout(self): """ Return the Boolean flag, True to use :meth`tight_layout` when drawing. """ return self._tight def set_tight_layout(self, tight): """ Set whether :meth:`tight_layout` is used upon drawing. If None, the rcParams['figure.autolayout'] value will be set. When providing a dict containing the keys `pad`, `w_pad`, `h_pad` and `rect`, the default :meth:`tight_layout` paddings will be overridden. ACCEPTS: [True | False | dict | None ] """ if tight is None: tight = rcParams['figure.autolayout'] self._tight = bool(tight) self._tight_parameters = tight if isinstance(tight, dict) else {} def autofmt_xdate(self, bottom=0.2, rotation=30, ha='right'): """ Date ticklabels often overlap, so it is useful to rotate them and right align them. Also, a common use case is a number of subplots with shared xaxes where the x-axis is date data. The ticklabels are often long, and it helps to rotate them on the bottom subplot and turn them off on other subplots, as well as turn off xlabels. *bottom* The bottom of the subplots for :meth:`subplots_adjust` *rotation* The rotation of the xtick labels *ha* The horizontal alignment of the xticklabels """ allsubplots = np.alltrue([hasattr(ax, 'is_last_row') for ax in self.axes]) if len(self.axes) == 1: for label in self.axes[0].get_xticklabels(): label.set_ha(ha) label.set_rotation(rotation) else: if allsubplots: for ax in self.get_axes(): if ax.is_last_row(): for label in ax.get_xticklabels(): label.set_ha(ha) label.set_rotation(rotation) else: for label in ax.get_xticklabels(): label.set_visible(False) ax.set_xlabel('') if allsubplots: self.subplots_adjust(bottom=bottom) def get_children(self): 'get a list of artists contained in the figure' children = [self.patch] children.extend(self.artists) children.extend(self.axes) children.extend(self.lines) children.extend(self.patches) children.extend(self.texts) children.extend(self.images) children.extend(self.legends) return children def contains(self, mouseevent): """ Test whether the mouse event occurred on the figure. Returns True,{} """ if six.callable(self._contains): return self._contains(self, mouseevent) # inside = mouseevent.x >= 0 and mouseevent.y >= 0 inside = self.bbox.contains(mouseevent.x, mouseevent.y) return inside, {} def get_window_extent(self, *args, **kwargs): 'get the figure bounding box in display space; kwargs are void' return self.bbox def suptitle(self, t, **kwargs): """ Add a centered title to the figure. kwargs are :class:`matplotlib.text.Text` properties. Using figure coordinates, the defaults are: *x* : 0.5 The x location of the text in figure coords *y* : 0.98 The y location of the text in figure coords *horizontalalignment* : 'center' The horizontal alignment of the text *verticalalignment* : 'top' The vertical alignment of the text A :class:`matplotlib.text.Text` instance is returned. Example:: fig.suptitle('this is the figure title', fontsize=12) """ x = kwargs.pop('x', 0.5) y = kwargs.pop('y', 0.98) if ('horizontalalignment' not in kwargs) and ('ha' not in kwargs): kwargs['horizontalalignment'] = 'center' if ('verticalalignment' not in kwargs) and ('va' not in kwargs): kwargs['verticalalignment'] = 'top' sup = self.text(x, y, t, **kwargs) if self._suptitle is not None: self._suptitle.set_text(t) self._suptitle.set_position((x, y)) self._suptitle.update_from(sup) sup.remove() else: self._suptitle = sup return self._suptitle def set_canvas(self, canvas): """ Set the canvas the contains the figure ACCEPTS: a FigureCanvas instance """ self.canvas = canvas def hold(self, b=None): """ Set the hold state. If hold is None (default), toggle the hold state. Else set the hold state to boolean value b. e.g.:: hold() # toggle hold hold(True) # hold is on hold(False) # hold is off """ if b is None: self._hold = not self._hold else: self._hold = b def figimage(self, X, xo=0, yo=0, alpha=None, norm=None, cmap=None, vmin=None, vmax=None, origin=None, **kwargs): """ Adds a non-resampled image to the figure. call signatures:: figimage(X, **kwargs) adds a non-resampled array *X* to the figure. :: figimage(X, xo, yo) with pixel offsets *xo*, *yo*, *X* must be a float array: * If *X* is MxN, assume luminance (grayscale) * If *X* is MxNx3, assume RGB * If *X* is MxNx4, assume RGBA Optional keyword arguments: ========= ========================================================= Keyword Description ========= ========================================================= xo or yo An integer, the *x* and *y* image offset in pixels cmap a :class:`matplotlib.colors.Colormap` instance, e.g., cm.jet. If *None*, default to the rc ``image.cmap`` value norm a :class:`matplotlib.colors.Normalize` instance. The default is normalization(). This scales luminance -> 0-1 vmin|vmax are used to scale a luminance image to 0-1. If either is *None*, the min and max of the luminance values will be used. Note if you pass a norm instance, the settings for *vmin* and *vmax* will be ignored. alpha the alpha blending value, default is *None* origin [ 'upper' | 'lower' ] Indicates where the [0,0] index of the array is in the upper left or lower left corner of the axes. Defaults to the rc image.origin value ========= ========================================================= figimage complements the axes image (:meth:`~matplotlib.axes.Axes.imshow`) which will be resampled to fit the current axes. If you want a resampled image to fill the entire figure, you can define an :class:`~matplotlib.axes.Axes` with size [0,1,0,1]. An :class:`matplotlib.image.FigureImage` instance is returned. .. plot:: mpl_examples/pylab_examples/figimage_demo.py Additional kwargs are Artist kwargs passed on to :class:`~matplotlib.image.FigureImage` """ if not self._hold: self.clf() im = FigureImage(self, cmap, norm, xo, yo, origin, **kwargs) im.set_array(X) im.set_alpha(alpha) if norm is None: im.set_clim(vmin, vmax) self.images.append(im) im._remove_method = lambda h: self.images.remove(h) return im def set_size_inches(self, *args, **kwargs): """ set_size_inches(w,h, forward=False) Set the figure size in inches (1in == 2.54cm) Usage:: fig.set_size_inches(w,h) # OR fig.set_size_inches((w,h) ) optional kwarg *forward=True* will cause the canvas size to be automatically updated; e.g., you can resize the figure window from the shell ACCEPTS: a w,h tuple with w,h in inches See Also -------- matplotlib.Figure.get_size_inches """ forward = kwargs.get('forward', False) if len(args) == 1: w, h = args[0] else: w, h = args dpival = self.dpi self.bbox_inches.p1 = w, h if forward: dpival = self.dpi canvasw = w * dpival canvash = h * dpival manager = getattr(self.canvas, 'manager', None) if manager is not None: manager.resize(int(canvasw), int(canvash)) def get_size_inches(self): """ Returns the current size of the figure in inches (1in == 2.54cm) as an numpy array. Returns ------- size : ndarray The size of the figure in inches See Also -------- matplotlib.Figure.set_size_inches """ return np.array(self.bbox_inches.p1) def get_edgecolor(self): 'Get the edge color of the Figure rectangle' return self.patch.get_edgecolor() def get_facecolor(self): 'Get the face color of the Figure rectangle' return self.patch.get_facecolor() def get_figwidth(self): 'Return the figwidth as a float' return self.bbox_inches.width def get_figheight(self): 'Return the figheight as a float' return self.bbox_inches.height def get_dpi(self): 'Return the dpi as a float' return self.dpi def get_frameon(self): 'get the boolean indicating frameon' return self.frameon def set_edgecolor(self, color): """ Set the edge color of the Figure rectangle ACCEPTS: any matplotlib color - see help(colors) """ self.patch.set_edgecolor(color) def set_facecolor(self, color): """ Set the face color of the Figure rectangle ACCEPTS: any matplotlib color - see help(colors) """ self.patch.set_facecolor(color) def set_dpi(self, val): """ Set the dots-per-inch of the figure ACCEPTS: float """ self.dpi = val def set_figwidth(self, val): """ Set the width of the figure in inches ACCEPTS: float """ self.bbox_inches.x1 = val def set_figheight(self, val): """ Set the height of the figure in inches ACCEPTS: float """ self.bbox_inches.y1 = val def set_frameon(self, b): """ Set whether the figure frame (background) is displayed or invisible ACCEPTS: boolean """ self.frameon = b def delaxes(self, a): 'remove a from the figure and update the current axes' self._axstack.remove(a) for func in self._axobservers: func(self) def _make_key(self, *args, **kwargs): 'make a hashable key out of args and kwargs' def fixitems(items): #items may have arrays and lists in them, so convert them # to tuples for the key ret = [] for k, v in items: # some objects can define __getitem__ without being # iterable and in those cases the conversion to tuples # will fail. So instead of using the iterable(v) function # we simply try and convert to a tuple, and proceed if not. try: v = tuple(v) except Exception: pass ret.append((k, v)) return tuple(ret) def fixlist(args): ret = [] for a in args: if iterable(a): a = tuple(a) ret.append(a) return tuple(ret) key = fixlist(args), fixitems(six.iteritems(kwargs)) return key @docstring.dedent_interpd def add_axes(self, *args, **kwargs): """ Add an axes at position *rect* [*left*, *bottom*, *width*, *height*] where all quantities are in fractions of figure width and height. kwargs are legal :class:`~matplotlib.axes.Axes` kwargs plus *projection* which sets the projection type of the axes. (For backward compatibility, ``polar=True`` may also be provided, which is equivalent to ``projection='polar'``). Valid values for *projection* are: %(projection_names)s. Some of these projections support additional kwargs, which may be provided to :meth:`add_axes`. Typical usage:: rect = l,b,w,h fig.add_axes(rect) fig.add_axes(rect, frameon=False, axisbg='g') fig.add_axes(rect, polar=True) fig.add_axes(rect, projection='polar') fig.add_axes(ax) If the figure already has an axes with the same parameters, then it will simply make that axes current and return it. If you do not want this behavior, e.g., you want to force the creation of a new Axes, you must use a unique set of args and kwargs. The axes :attr:`~matplotlib.axes.Axes.label` attribute has been exposed for this purpose. e.g., if you want two axes that are otherwise identical to be added to the figure, make sure you give them unique labels:: fig.add_axes(rect, label='axes1') fig.add_axes(rect, label='axes2') In rare circumstances, add_axes may be called with a single argument, an Axes instance already created in the present figure but not in the figure's list of axes. For example, if an axes has been removed with :meth:`delaxes`, it can be restored with:: fig.add_axes(ax) In all cases, the :class:`~matplotlib.axes.Axes` instance will be returned. In addition to *projection*, the following kwargs are supported: %(Axes)s """ if not len(args): return # shortcut the projection "key" modifications later on, if an axes # with the exact args/kwargs exists, return it immediately. key = self._make_key(*args, **kwargs) ax = self._axstack.get(key) if ax is not None: self.sca(ax) return ax if isinstance(args[0], Axes): a = args[0] assert(a.get_figure() is self) else: rect = args[0] projection_class, kwargs, key = process_projection_requirements( self, *args, **kwargs) # check that an axes of this type doesn't already exist, if it # does, set it as active and return it ax = self._axstack.get(key) if ax is not None and isinstance(ax, projection_class): self.sca(ax) return ax # create the new axes using the axes class given a = projection_class(self, rect, **kwargs) self._axstack.add(key, a) self.sca(a) return a @docstring.dedent_interpd def add_subplot(self, *args, **kwargs): """ Add a subplot. Examples:: fig.add_subplot(111) # equivalent but more general fig.add_subplot(1,1,1) # add subplot with red background fig.add_subplot(212, axisbg='r') # add a polar subplot fig.add_subplot(111, projection='polar') # add Subplot instance sub fig.add_subplot(sub) *kwargs* are legal :class:`~matplotlib.axes.Axes` kwargs plus *projection*, which chooses a projection type for the axes. (For backward compatibility, *polar=True* may also be provided, which is equivalent to *projection='polar'*). Valid values for *projection* are: %(projection_names)s. Some of these projections support additional *kwargs*, which may be provided to :meth:`add_axes`. The :class:`~matplotlib.axes.Axes` instance will be returned. If the figure already has a subplot with key (*args*, *kwargs*) then it will simply make that subplot current and return it. .. seealso:: :meth:`~matplotlib.pyplot.subplot` for an explanation of the args. The following kwargs are supported: %(Axes)s """ if not len(args): return if len(args) == 1 and isinstance(args[0], int): args = tuple([int(c) for c in str(args[0])]) if len(args) != 3: raise ValueError("Integer subplot specification must " + "be a three digit number. " + "Not {n:d}".format(n=len(args))) if isinstance(args[0], SubplotBase): a = args[0] assert(a.get_figure() is self) # make a key for the subplot (which includes the axes object id # in the hash) key = self._make_key(*args, **kwargs) else: projection_class, kwargs, key = process_projection_requirements( self, *args, **kwargs) # try to find the axes with this key in the stack ax = self._axstack.get(key) if ax is not None: if isinstance(ax, projection_class): # the axes already existed, so set it as active & return self.sca(ax) return ax else: # Undocumented convenience behavior: # subplot(111); subplot(111, projection='polar') # will replace the first with the second. # Without this, add_subplot would be simpler and # more similar to add_axes. self._axstack.remove(ax) a = subplot_class_factory(projection_class)(self, *args, **kwargs) self._axstack.add(key, a) self.sca(a) return a def clf(self, keep_observers=False): """ Clear the figure. Set *keep_observers* to True if, for example, a gui widget is tracking the axes in the figure. """ self.suppressComposite = None self.callbacks = cbook.CallbackRegistry() for ax in tuple(self.axes): # Iterate over the copy. ax.cla() self.delaxes(ax) # removes ax from self._axstack toolbar = getattr(self.canvas, 'toolbar', None) if toolbar is not None: toolbar.update() self._axstack.clear() self.artists = [] self.lines = [] self.patches = [] self.texts = [] self.images = [] self.legends = [] if not keep_observers: self._axobservers = [] self._suptitle = None def clear(self): """ Clear the figure -- synonym for :meth:`clf`. """ self.clf() @allow_rasterization def draw(self, renderer): """ Render the figure using :class:`matplotlib.backend_bases.RendererBase` instance *renderer*. """ # draw the figure bounding box, perhaps none for white figure if not self.get_visible(): return renderer.open_group('figure') if self.get_tight_layout() and self.axes: try: self.tight_layout(renderer, **self._tight_parameters) except ValueError: pass # ValueError can occur when resizing a window. if self.frameon: self.patch.draw(renderer) # a list of (zorder, func_to_call, list_of_args) dsu = [] for a in self.patches: dsu.append((a.get_zorder(), a, a.draw, [renderer])) for a in self.lines: dsu.append((a.get_zorder(), a, a.draw, [renderer])) for a in self.artists: dsu.append((a.get_zorder(), a, a.draw, [renderer])) # override the renderer default if self.suppressComposite # is not None not_composite = renderer.option_image_nocomposite() if self.suppressComposite is not None: not_composite = self.suppressComposite if (len(self.images) <= 1 or not_composite or not cbook.allequal([im.origin for im in self.images])): for a in self.images: dsu.append((a.get_zorder(), a, a.draw, [renderer])) else: # make a composite image blending alpha # list of (_image.Image, ox, oy) mag = renderer.get_image_magnification() ims = [(im.make_image(mag), im.ox, im.oy, im.get_alpha()) for im in self.images] im = _image.from_images(self.bbox.height * mag, self.bbox.width * mag, ims) im.is_grayscale = False l, b, w, h = self.bbox.bounds def draw_composite(): gc = renderer.new_gc() gc.set_clip_rectangle(self.bbox) gc.set_clip_path(self.get_clip_path()) renderer.draw_image(gc, l, b, im) gc.restore() dsu.append((self.images[0].get_zorder(), self.images[0], draw_composite, [])) # render the axes for a in self.axes: dsu.append((a.get_zorder(), a, a.draw, [renderer])) # render the figure text for a in self.texts: dsu.append((a.get_zorder(), a, a.draw, [renderer])) for a in self.legends: dsu.append((a.get_zorder(), a, a.draw, [renderer])) dsu = [row for row in dsu if not row[1].get_animated()] dsu.sort(key=itemgetter(0)) for zorder, a, func, args in dsu: func(*args) renderer.close_group('figure') self._cachedRenderer = renderer self.canvas.draw_event(renderer) def draw_artist(self, a): """ draw :class:`matplotlib.artist.Artist` instance *a* only -- this is available only after the figure is drawn """ assert self._cachedRenderer is not None a.draw(self._cachedRenderer) def get_axes(self): return self.axes def legend(self, handles, labels, *args, **kwargs): """ Place a legend in the figure. Labels are a sequence of strings, handles is a sequence of :class:`~matplotlib.lines.Line2D` or :class:`~matplotlib.patches.Patch` instances, and loc can be a string or an integer specifying the legend location USAGE:: legend( (line1, line2, line3), ('label1', 'label2', 'label3'), 'upper right') The *loc* location codes are:: 'best' : 0, (currently not supported for figure legends) 'upper right' : 1, 'upper left' : 2, 'lower left' : 3, 'lower right' : 4, 'right' : 5, 'center left' : 6, 'center right' : 7, 'lower center' : 8, 'upper center' : 9, 'center' : 10, *loc* can also be an (x,y) tuple in figure coords, which specifies the lower left of the legend box. figure coords are (0,0) is the left, bottom of the figure and 1,1 is the right, top. Keyword arguments: *prop*: [ *None* | FontProperties | dict ] A :class:`matplotlib.font_manager.FontProperties` instance. If *prop* is a dictionary, a new instance will be created with *prop*. If *None*, use rc settings. *numpoints*: integer The number of points in the legend line, default is 4 *scatterpoints*: integer The number of points in the legend line, default is 4 *scatteryoffsets*: list of floats a list of yoffsets for scatter symbols in legend *markerscale*: [ *None* | scalar ] The relative size of legend markers vs. original. If *None*, use rc settings. *fancybox*: [ *None* | *False* | *True* ] if *True*, draw a frame with a round fancybox. If *None*, use rc *shadow*: [ *None* | *False* | *True* ] If *True*, draw a shadow behind legend. If *None*, use rc settings. *ncol* : integer number of columns. default is 1 *mode* : [ "expand" | *None* ] if mode is "expand", the legend will be horizontally expanded to fill the axes area (or *bbox_to_anchor*) *title* : string the legend title Padding and spacing between various elements use following keywords parameters. The dimensions of these values are given as a fraction of the fontsize. Values from rcParams will be used if None. ================ ==================================================== Keyword Description ================ ==================================================== borderpad the fractional whitespace inside the legend border labelspacing the vertical space between the legend entries handlelength the length of the legend handles handletextpad the pad between the legend handle and text borderaxespad the pad between the axes and legend border columnspacing the spacing between columns ================ ==================================================== .. Note:: Not all kinds of artist are supported by the legend. See LINK (FIXME) for details. **Example:** .. plot:: mpl_examples/pylab_examples/figlegend_demo.py """ l = Legend(self, handles, labels, *args, **kwargs) self.legends.append(l) l._remove_method = lambda h: self.legends.remove(h) return l @docstring.dedent_interpd def text(self, x, y, s, *args, **kwargs): """ Add text to figure. Call signature:: text(x, y, s, fontdict=None, **kwargs) Add text to figure at location *x*, *y* (relative 0-1 coords). See :func:`~matplotlib.pyplot.text` for the meaning of the other arguments. kwargs control the :class:`~matplotlib.text.Text` properties: %(Text)s """ override = _process_text_args({}, *args, **kwargs) t = Text(x=x, y=y, text=s) t.update(override) self._set_artist_props(t) self.texts.append(t) t._remove_method = lambda h: self.texts.remove(h) return t def _set_artist_props(self, a): if a != self: a.set_figure(self) a.set_transform(self.transFigure) @docstring.dedent_interpd def gca(self, **kwargs): """ Get the current axes, creating one if necessary The following kwargs are supported for ensuring the returned axes adheres to the given projection etc., and for axes creation if the active axes does not exist: %(Axes)s """ ckey, cax = self._axstack.current_key_axes() # if there exists an axes on the stack see if it maches # the desired axes configuration if cax is not None: # if no kwargs are given just return the current axes # this is a convenience for gca() on axes such as polar etc. if not kwargs: return cax # if the user has specified particular projection detail # then build up a key which can represent this else: # we don't want to modify the original kwargs # so take a copy so that we can do what we like to it kwargs_copy = kwargs.copy() projection_class, _, key = process_projection_requirements( self, **kwargs_copy) # let the returned axes have any gridspec by removing it from # the key ckey = ckey[1:] key = key[1:] # if the cax matches this key then return the axes, otherwise # continue and a new axes will be created if key == ckey and isinstance(cax, projection_class): return cax # no axes found, so create one which spans the figure return self.add_subplot(1, 1, 1, **kwargs) def sca(self, a): 'Set the current axes to be a and return a' self._axstack.bubble(a) for func in self._axobservers: func(self) return a def _gci(self): """ helper for :func:`~matplotlib.pyplot.gci`; do not use elsewhere. """ # Look first for an image in the current Axes: cax = self._axstack.current_key_axes()[1] if cax is None: return None im = cax._gci() if im is not None: return im # If there is no image in the current Axes, search for # one in a previously created Axes. Whether this makes # sense is debatable, but it is the documented behavior. for ax in reversed(self.axes): im = ax._gci() if im is not None: return im return None def __getstate__(self): state = self.__dict__.copy() # the axobservers cannot currently be pickled. # Additionally, the canvas cannot currently be pickled, but this has # the benefit of meaning that a figure can be detached from one canvas, # and re-attached to another. for attr_to_pop in ('_axobservers', 'show', 'canvas', '_cachedRenderer'): state.pop(attr_to_pop, None) # add version information to the state state['__mpl_version__'] = _mpl_version # check to see if the figure has a manager and whether it is registered # with pyplot if getattr(self.canvas, 'manager', None) is not None: manager = self.canvas.manager import matplotlib._pylab_helpers if manager in list(six.itervalues( matplotlib._pylab_helpers.Gcf.figs)): state['_restore_to_pylab'] = True return state def __setstate__(self, state): version = state.pop('__mpl_version__') restore_to_pylab = state.pop('_restore_to_pylab', False) if version != _mpl_version: import warnings warnings.warn("This figure was saved with matplotlib version %s " "and is unlikely to function correctly." % (version, )) self.__dict__ = state # re-initialise some of the unstored state information self._axobservers = [] self.canvas = None if restore_to_pylab: # lazy import to avoid circularity import matplotlib.pyplot as plt import matplotlib._pylab_helpers as pylab_helpers allnums = plt.get_fignums() num = max(allnums) + 1 if allnums else 1 mgr = plt._backend_mod.new_figure_manager_given_figure(num, self) # XXX The following is a copy and paste from pyplot. Consider # factoring to pylab_helpers if self.get_label(): mgr.set_window_title(self.get_label()) # make this figure current on button press event def make_active(event): pylab_helpers.Gcf.set_active(mgr) mgr._cidgcf = mgr.canvas.mpl_connect('button_press_event', make_active) pylab_helpers.Gcf.set_active(mgr) self.number = num plt.draw_if_interactive() def add_axobserver(self, func): 'whenever the axes state change, ``func(self)`` will be called' self._axobservers.append(func) def savefig(self, *args, **kwargs): """ Save the current figure. Call signature:: savefig(fname, dpi=None, facecolor='w', edgecolor='w', orientation='portrait', papertype=None, format=None, transparent=False, bbox_inches=None, pad_inches=0.1, frameon=None) The output formats available depend on the backend being used. Arguments: *fname*: A string containing a path to a filename, or a Python file-like object, or possibly some backend-dependent object such as :class:`~matplotlib.backends.backend_pdf.PdfPages`. If *format* is *None* and *fname* is a string, the output format is deduced from the extension of the filename. If the filename has no extension, the value of the rc parameter ``savefig.format`` is used. If *fname* is not a string, remember to specify *format* to ensure that the correct backend is used. Keyword arguments: *dpi*: [ *None* | ``scalar > 0`` ] The resolution in dots per inch. If *None* it will default to the value ``savefig.dpi`` in the matplotlibrc file. *facecolor*, *edgecolor*: the colors of the figure rectangle *orientation*: [ 'landscape' | 'portrait' ] not supported on all backends; currently only on postscript output *papertype*: One of 'letter', 'legal', 'executive', 'ledger', 'a0' through 'a10', 'b0' through 'b10'. Only supported for postscript output. *format*: One of the file extensions supported by the active backend. Most backends support png, pdf, ps, eps and svg. *transparent*: If *True*, the axes patches will all be transparent; the figure patch will also be transparent unless facecolor and/or edgecolor are specified via kwargs. This is useful, for example, for displaying a plot on top of a colored background on a web page. The transparency of these patches will be restored to their original values upon exit of this function. *frameon*: If *True*, the figure patch will be colored, if *False*, the figure background will be transparent. If not provided, the rcParam 'savefig.frameon' will be used. *bbox_inches*: Bbox in inches. Only the given portion of the figure is saved. If 'tight', try to figure out the tight bbox of the figure. *pad_inches*: Amount of padding around the figure when bbox_inches is 'tight'. *bbox_extra_artists*: A list of extra artists that will be considered when the tight bbox is calculated. """ kwargs.setdefault('dpi', rcParams['savefig.dpi']) frameon = kwargs.pop('frameon', rcParams['savefig.frameon']) transparent = kwargs.pop('transparent', rcParams['savefig.transparent']) if transparent: kwargs.setdefault('facecolor', 'none') kwargs.setdefault('edgecolor', 'none') original_axes_colors = [] for ax in self.axes: patch = ax.patch original_axes_colors.append((patch.get_facecolor(), patch.get_edgecolor())) patch.set_facecolor('none') patch.set_edgecolor('none') else: kwargs.setdefault('facecolor', rcParams['savefig.facecolor']) kwargs.setdefault('edgecolor', rcParams['savefig.edgecolor']) if frameon: original_frameon = self.get_frameon() self.set_frameon(frameon) self.canvas.print_figure(*args, **kwargs) if frameon: self.set_frameon(original_frameon) if transparent: for ax, cc in zip(self.axes, original_axes_colors): ax.patch.set_facecolor(cc[0]) ax.patch.set_edgecolor(cc[1]) @docstring.dedent_interpd def colorbar(self, mappable, cax=None, ax=None, use_gridspec=True, **kw): """ Create a colorbar for a ScalarMappable instance, *mappable*. Documentation for the pylab thin wrapper: %(colorbar_doc)s """ if ax is None: ax = self.gca() # Store the value of gca so that we can set it back later on. current_ax = self.gca() if cax is None: if use_gridspec and isinstance(ax, SubplotBase): cax, kw = cbar.make_axes_gridspec(ax, **kw) else: cax, kw = cbar.make_axes(ax, **kw) cax.hold(True) cb = cbar.colorbar_factory(cax, mappable, **kw) self.sca(current_ax) return cb def subplots_adjust(self, *args, **kwargs): """ Call signature:: subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=None) Update the :class:`SubplotParams` with *kwargs* (defaulting to rc when *None*) and update the subplot locations """ self.subplotpars.update(*args, **kwargs) for ax in self.axes: if not isinstance(ax, SubplotBase): # Check if sharing a subplots axis if (ax._sharex is not None and isinstance(ax._sharex, SubplotBase)): ax._sharex.update_params() ax.set_position(ax._sharex.figbox) elif (ax._sharey is not None and isinstance(ax._sharey, SubplotBase)): ax._sharey.update_params() ax.set_position(ax._sharey.figbox) else: ax.update_params() ax.set_position(ax.figbox) def ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1, mouse_pop=3, mouse_stop=2): """ Call signature:: ginput(self, n=1, timeout=30, show_clicks=True, mouse_add=1, mouse_pop=3, mouse_stop=2) Blocking call to interact with the figure. This will wait for *n* clicks from the user and return a list of the coordinates of each click. If *timeout* is zero or negative, does not timeout. If *n* is zero or negative, accumulate clicks until a middle click (or potentially both mouse buttons at once) terminates the input. Right clicking cancels last input. The buttons used for the various actions (adding points, removing points, terminating the inputs) can be overriden via the arguments *mouse_add*, *mouse_pop* and *mouse_stop*, that give the associated mouse button: 1 for left, 2 for middle, 3 for right. The keyboard can also be used to select points in case your mouse does not have one or more of the buttons. The delete and backspace keys act like right clicking (i.e., remove last point), the enter key terminates input and any other key (not already used by the window manager) selects a point. """ blocking_mouse_input = BlockingMouseInput(self, mouse_add=mouse_add, mouse_pop=mouse_pop, mouse_stop=mouse_stop) return blocking_mouse_input(n=n, timeout=timeout, show_clicks=show_clicks) def waitforbuttonpress(self, timeout=-1): """ Call signature:: waitforbuttonpress(self, timeout=-1) Blocking call to interact with the figure. This will return True is a key was pressed, False if a mouse button was pressed and None if *timeout* was reached without either being pressed. If *timeout* is negative, does not timeout. """ blocking_input = BlockingKeyMouseInput(self) return blocking_input(timeout=timeout) def get_default_bbox_extra_artists(self): bbox_artists = [artist for artist in self.get_children() if artist.get_visible()] for ax in self.axes: if ax.get_visible(): bbox_artists.extend(ax.get_default_bbox_extra_artists()) # we don't want the figure's patch to influence the bbox calculation bbox_artists.remove(self.patch) return bbox_artists def get_tightbbox(self, renderer): """ Return a (tight) bounding box of the figure in inches. It only accounts axes title, axis labels, and axis ticklabels. Needs improvement. """ bb = [] for ax in self.axes: if ax.get_visible(): bb.append(ax.get_tightbbox(renderer)) if len(bb) == 0: return self.bbox_inches _bbox = Bbox.union([b for b in bb if b.width != 0 or b.height != 0]) bbox_inches = TransformedBbox(_bbox, Affine2D().scale(1. / self.dpi)) return bbox_inches def tight_layout(self, renderer=None, pad=1.08, h_pad=None, w_pad=None, rect=None): """ Adjust subplot parameters to give specified padding. Parameters: *pad* : float padding between the figure edge and the edges of subplots, as a fraction of the font-size. *h_pad*, *w_pad* : float padding (height/width) between edges of adjacent subplots. Defaults to `pad_inches`. *rect* : if rect is given, it is interpreted as a rectangle (left, bottom, right, top) in the normalized figure coordinate that the whole subplots area (including labels) will fit into. Default is (0, 0, 1, 1). """ from .tight_layout import (get_renderer, get_tight_layout_figure, get_subplotspec_list) subplotspec_list = get_subplotspec_list(self.axes) if None in subplotspec_list: warnings.warn("This figure includes Axes that are not " "compatible with tight_layout, so its " "results might be incorrect.") if renderer is None: renderer = get_renderer(self) kwargs = get_tight_layout_figure(self, self.axes, subplotspec_list, renderer, pad=pad, h_pad=h_pad, w_pad=w_pad, rect=rect) self.subplots_adjust(**kwargs) def figaspect(arg): """ Create a figure with specified aspect ratio. If *arg* is a number, use that aspect ratio. If *arg* is an array, figaspect will determine the width and height for a figure that would fit array preserving aspect ratio. The figure width, height in inches are returned. Be sure to create an axes with equal with and height, e.g., Example usage:: # make a figure twice as tall as it is wide w, h = figaspect(2.) fig = Figure(figsize=(w,h)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) ax.imshow(A, **kwargs) # make a figure with the proper aspect for an array A = rand(5,3) w, h = figaspect(A) fig = Figure(figsize=(w,h)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) ax.imshow(A, **kwargs) Thanks to Fernando Perez for this function """ isarray = hasattr(arg, 'shape') # min/max sizes to respect when autoscaling. If John likes the idea, they # could become rc parameters, for now they're hardwired. figsize_min = np.array((4.0, 2.0)) # min length for width/height figsize_max = np.array((16.0, 16.0)) # max length for width/height #figsize_min = rcParams['figure.figsize_min'] #figsize_max = rcParams['figure.figsize_max'] # Extract the aspect ratio of the array if isarray: nr, nc = arg.shape[:2] arr_ratio = float(nr) / nc else: arr_ratio = float(arg) # Height of user figure defaults fig_height = rcParams['figure.figsize'][1] # New size for the figure, keeping the aspect ratio of the caller newsize = np.array((fig_height / arr_ratio, fig_height)) # Sanity checks, don't drop either dimension below figsize_min newsize /= min(1.0, *(newsize / figsize_min)) # Avoid humongous windows as well newsize /= max(1.0, *(newsize / figsize_max)) # Finally, if we have a really funky aspect ratio, break it but respect # the min/max dimensions (we don't want figures 10 feet tall!) newsize = np.clip(newsize, figsize_min, figsize_max) return newsize docstring.interpd.update(Figure=martist.kwdoc(Figure))
gpl-2.0
dylanGeng/BuildingMachineLearningSystemsWithPython
ch09/fft.py
24
3673
# This code is supporting material for the book # Building Machine Learning Systems with Python # by Willi Richert and Luis Pedro Coelho # published by PACKT Publishing # # It is made available under the MIT License import sys import os import glob import numpy as np import scipy import scipy.io.wavfile from utils import GENRE_DIR, CHART_DIR import matplotlib.pyplot as plt from matplotlib.ticker import EngFormatter def write_fft(fft_features, fn): """ Write the FFT features to separate files to speed up processing. """ base_fn, ext = os.path.splitext(fn) data_fn = base_fn + ".fft" np.save(data_fn, fft_features) print("Written "%data_fn) def create_fft(fn): sample_rate, X = scipy.io.wavfile.read(fn) fft_features = abs(scipy.fft(X)[:1000]) write_fft(fft_features, fn) def read_fft(genre_list, base_dir=GENRE_DIR): X = [] y = [] for label, genre in enumerate(genre_list): genre_dir = os.path.join(base_dir, genre, "*.fft.npy") file_list = glob.glob(genre_dir) assert(file_list), genre_dir for fn in file_list: fft_features = np.load(fn) X.append(fft_features[:2000]) y.append(label) return np.array(X), np.array(y) def plot_wav_fft(wav_filename, desc=None): plt.clf() plt.figure(num=None, figsize=(6, 4)) sample_rate, X = scipy.io.wavfile.read(wav_filename) spectrum = np.fft.fft(X) freq = np.fft.fftfreq(len(X), 1.0 / sample_rate) plt.subplot(211) num_samples = 200.0 plt.xlim(0, num_samples / sample_rate) plt.xlabel("time [s]") plt.title(desc or wav_filename) plt.plot(np.arange(num_samples) / sample_rate, X[:num_samples]) plt.grid(True) plt.subplot(212) plt.xlim(0, 5000) plt.xlabel("frequency [Hz]") plt.xticks(np.arange(5) * 1000) if desc: desc = desc.strip() fft_desc = desc[0].lower() + desc[1:] else: fft_desc = wav_filename plt.title("FFT of %s" % fft_desc) plt.plot(freq, abs(spectrum), linewidth=5) plt.grid(True) plt.tight_layout() rel_filename = os.path.split(wav_filename)[1] plt.savefig("%s_wav_fft.png" % os.path.splitext(rel_filename)[0], bbox_inches='tight') plt.show() def plot_wav_fft_demo(): plot_wav_fft("sine_a.wav", "400Hz sine wave") plot_wav_fft("sine_b.wav", "3,000Hz sine wave") plot_wav_fft("sine_mix.wav", "Mixed sine wave") def plot_specgram(ax, fn): sample_rate, X = scipy.io.wavfile.read(fn) ax.specgram(X, Fs=sample_rate, xextent=(0, 30)) def plot_specgrams(base_dir=CHART_DIR): """ Plot a bunch of spectrograms of wav files in different genres """ plt.clf() genres = ["classical", "jazz", "country", "pop", "rock", "metal"] num_files = 3 f, axes = plt.subplots(len(genres), num_files) for genre_idx, genre in enumerate(genres): for idx, fn in enumerate(glob.glob(os.path.join(GENRE_DIR, genre, "*.wav"))): if idx == num_files: break axis = axes[genre_idx, idx] axis.yaxis.set_major_formatter(EngFormatter()) axis.set_title("%s song %i" % (genre, idx + 1)) plot_specgram(axis, fn) specgram_file = os.path.join(base_dir, "Spectrogram_Genres.png") plt.savefig(specgram_file, bbox_inches="tight") plt.show() if __name__ == "__main__": # for fn in glob.glob(os.path.join(sys.argv[1], "*.wav")): # create_fft(fn) # plot_decomp() if len(sys.argv) > 1: plot_wav_fft(sys.argv[1], desc="some sample song") else: plot_wav_fft_demo() plot_specgrams()
mit