repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
QuLogic/vispy | examples/basics/plotting/mpl_plot.py | 14 | 1579 | # -*- coding: utf-8 -*-
# vispy: testskip
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Example demonstrating how to use vispy.pyplot, which uses mplexporter
to convert matplotlib commands to vispy draw commands.
Requires matplotlib.
"""
import numpy as np
# You can use either matplotlib or vispy to render this example:
# import matplotlib.pyplot as plt
import vispy.mpl_plot as plt
from vispy.io import read_png, load_data_file
n = 200
freq = 10
fs = 100.
t = np.arange(n) / fs
tone = np.sin(2*np.pi*freq*t)
noise = np.random.RandomState(0).randn(n)
signal = tone + noise
magnitude = np.abs(np.fft.fft(signal))
freqs = np.fft.fftfreq(n, 1. / fs)
flim = n // 2
# Signal
fig = plt.figure()
ax = plt.subplot(311)
ax.imshow(read_png(load_data_file('pyplot/logo.png')))
ax = plt.subplot(312)
ax.plot(t, signal, 'k-')
# Frequency content
ax = plt.subplot(313)
idx = np.argmax(magnitude[:flim])
ax.text(freqs[idx], magnitude[idx], 'Max: %s Hz' % freqs[idx],
verticalalignment='top')
ax.plot(freqs[:flim], magnitude[:flim], 'k-o')
plt.draw()
# NOTE: show() has currently been overwritten to convert to vispy format, so:
# 1. It must be called to show the results, and
# 2. Any plotting commands executed after this will not take effect.
# We are working to remove this limitation.
if __name__ == '__main__':
plt.show(True)
| bsd-3-clause |
aje/POT | examples/plot_optim_OTreg.py | 2 | 2940 | # -*- coding: utf-8 -*-
"""
==================================
Regularized OT with generic solver
==================================
Illustrates the use of the generic solver for regularized OT with
user-designed regularization term. It uses Conditional gradient as in [6] and
generalized Conditional Gradient as proposed in [5][7].
[5] N. Courty; R. Flamary; D. Tuia; A. Rakotomamonjy, Optimal Transport for
Domain Adaptation, in IEEE Transactions on Pattern Analysis and Machine
Intelligence , vol.PP, no.99, pp.1-1.
[6] Ferradans, S., Papadakis, N., Peyré, G., & Aujol, J. F. (2014).
Regularized discrete optimal transport. SIAM Journal on Imaging Sciences,
7(3), 1853-1882.
[7] Rakotomamonjy, A., Flamary, R., & Courty, N. (2015). Generalized
conditional gradient: analysis of convergence and applications.
arXiv preprint arXiv:1510.06567.
"""
import numpy as np
import matplotlib.pylab as pl
import ot
import ot.plot
##############################################################################
# Generate data
# -------------
#%% parameters
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = ot.datasets.get_1D_gauss(n, m=20, s=5) # m= mean, s= std
b = ot.datasets.get_1D_gauss(n, m=60, s=10)
# loss matrix
M = ot.dist(x.reshape((n, 1)), x.reshape((n, 1)))
M /= M.max()
##############################################################################
# Solve EMD
# ---------
#%% EMD
G0 = ot.emd(a, b, M)
pl.figure(3, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, G0, 'OT matrix G0')
##############################################################################
# Solve EMD with Frobenius norm regularization
# --------------------------------------------
#%% Example with Frobenius norm regularization
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg = 1e-1
Gl2 = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(3)
ot.plot.plot1D_mat(a, b, Gl2, 'OT matrix Frob. reg')
##############################################################################
# Solve EMD with entropic regularization
# --------------------------------------
#%% Example with entropic regularization
def f(G):
return np.sum(G * np.log(G))
def df(G):
return np.log(G) + 1.
reg = 1e-3
Ge = ot.optim.cg(a, b, M, reg, f, df, verbose=True)
pl.figure(4, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Ge, 'OT matrix Entrop. reg')
##############################################################################
# Solve EMD with Frobenius norm + entropic regularization
# -------------------------------------------------------
#%% Example with Frobenius norm + entropic regularization with gcg
def f(G):
return 0.5 * np.sum(G**2)
def df(G):
return G
reg1 = 1e-3
reg2 = 1e-1
Gel2 = ot.optim.gcg(a, b, M, reg1, reg2, f, df, verbose=True)
pl.figure(5, figsize=(5, 5))
ot.plot.plot1D_mat(a, b, Gel2, 'OT entropic + matrix Frob. reg')
pl.show()
| mit |
cheminfo/RDKitjs | old/src/similarityMap_basic_functions.py | 1 | 3270 | def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0):
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp(-z/(2*(1-rho**2))) / denom
def MolToMPL(mol,size=(300,300),kekulize=True, wedgeBonds=True, imageType=None, fitImage=False, options=None, **kwargs):
if not mol:
raise ValueError('Null molecule provided')
from rdkit.Chem.Draw.mplCanvas import Canvas
canvas = Canvas(size)
if options is None:
options = DrawingOptions()
options.bgColor=None
if fitImage:
drawingOptions.dotsPerAngstrom = int(min(size) / 10)
options.wedgeDashedBonds=wedgeBonds
drawer = MolDrawing(canvas=canvas, drawingOptions=options)
omol=mol
if kekulize:
from rdkit import Chem
mol = Chem.Mol(mol.ToBinary())
Chem.Kekulize(mol)
if not mol.GetNumConformers():
from rdkit.Chem import AllChem
AllChem.Compute2DCoords(mol)
drawer.AddMol(mol,**kwargs)
omol._atomPs=drawer.atomPs[mol]
for k,v in iteritems(omol._atomPs):
omol._atomPs[k]=canvas.rescalePt(v)
canvas._figure.set_size_inches(float(size[0])/100,float(size[1])/100)
return canvas._figure
def calcAtomGaussians(mol,a=0.03,step=0.02,weights=None):
import numpy
from matplotlib import mlab
x = numpy.arange(0,1,step)
y = numpy.arange(0,1,step)
X,Y = numpy.meshgrid(x,y)
if weights is None:
weights=[1.]*mol.GetNumAtoms()
Z = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[0][0], mol._atomPs[0][1])*weights[0] # this is not bivariate case ... only univariate no mixtures #matplotlib.mlab.bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0, mux=0.0, muy=0.0, sigmaxy=0.0)
for i in range(1,mol.GetNumAtoms()):
Zp = mlab.bivariate_normal(X,Y,a,a,mol._atomPs[i][0], mol._atomPs[i][1])
Z += Zp*weights[i]
return X,Y,Z
def GetSimilarityMapFromWeights(mol, weights, colorMap=cm.PiYG, scale=-1, size=(250, 250), sigma=None, #@UndefinedVariable #pylint: disable=E1101
coordScale=1.5, step=0.01, colors='k', contourLines=10, alpha=0.5, **kwargs):
if mol.GetNumAtoms() < 2: raise ValueError("too few atoms")
fig = Draw.MolToMPL(mol, coordScale=coordScale, size=size, **kwargs)
if sigma is None:
if mol.GetNumBonds() > 0:
bond = mol.GetBondWithIdx(0)
idx1 = bond.GetBeginAtomIdx()
idx2 = bond.GetEndAtomIdx()
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[idx1][i]-mol._atomPs[idx2][i])**2 for i in range(2)]))
else:
sigma = 0.3 * math.sqrt(sum([(mol._atomPs[0][i]-mol._atomPs[1][i])**2 for i in range(2)]))
sigma = round(sigma, 2)
x, y, z = Draw.calcAtomGaussians(mol, sigma, weights=weights, step=step)
# scaling
if scale <= 0.0: maxScale = max(math.fabs(numpy.min(z)), math.fabs(numpy.max(z)))
else: maxScale = scale
# coloring
fig.axes[0].imshow(z, cmap=colorMap, interpolation='bilinear', origin='lower', extent=(0,1,0,1), vmin=-maxScale, vmax=maxScale)
# contour lines
# only draw them when at least one weight is not zero
if len([w for w in weights if w != 0.0]):
fig.axes[0].contour(x, y, z, contourLines, colors=colors, alpha=alpha, **kwargs)
return fig
| bsd-3-clause |
mjudsp/Tsallis | examples/cluster/plot_cluster_comparison.py | 58 | 4681 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example aims at showing characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. The last dataset is an example of a 'null'
situation for clustering: the data is homogeneous, and
there is no good clustering.
While these examples give some intuition about the algorithms,
this intuition might not apply to very high dimensional data.
The results could be improved by tweaking the parameters for
each clustering strategy, for instance setting the number of
clusters for the methods that needs this parameter
specified. Note that affinity propagation has a tendency to
create many clusters. Thus in this example its two parameters
(damping and per-point preference) were set to mitigate this
behavior.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
np.random.seed(0)
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
colors = np.array([x for x in 'bgrcmykbgrcmykbgrcmykbgrcmyk'])
colors = np.hstack([colors] * 20)
clustering_names = [
'MiniBatchKMeans', 'AffinityPropagation', 'MeanShift',
'SpectralClustering', 'Ward', 'AgglomerativeClustering',
'DBSCAN', 'Birch']
plt.figure(figsize=(len(clustering_names) * 2 + 3, 9.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
datasets = [noisy_circles, noisy_moons, blobs, no_structure]
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=0.3)
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(X, n_neighbors=10, include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# create clustering estimators
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=2)
ward = cluster.AgglomerativeClustering(n_clusters=2, linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(n_clusters=2,
eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=.2)
affinity_propagation = cluster.AffinityPropagation(damping=.9,
preference=-200)
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock", n_clusters=2,
connectivity=connectivity)
birch = cluster.Birch(n_clusters=2)
clustering_algorithms = [
two_means, affinity_propagation, ms, spectral, ward, average_linkage,
dbscan, birch]
for name, algorithm in zip(clustering_names, clustering_algorithms):
# predict cluster memberships
t0 = time.time()
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(4, len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
JonnyWong16/plexpy | lib/tqdm/_tqdm_gui.py | 4 | 13326 | """
GUI progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm_gui import tgrange[, tqdm_gui]
>>> for i in tgrange(10): #same as: for i in tqdm_gui(xrange(10))
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import division, absolute_import
# import compatibility functions and utilities
# import sys
from time import time
from ._utils import _range
# to inherit from the tqdm class
from ._tqdm import tqdm, TqdmExperimentalWarning
from warnings import warn
__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
__all__ = ['tqdm_gui', 'tgrange']
class tqdm_gui(tqdm): # pragma: no cover
"""
Experimental GUI version of tqdm!
"""
# TODO: @classmethod: write() on GUI?
def __init__(self, *args, **kwargs):
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
kwargs['gui'] = True
super(tqdm_gui, self).__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
warn('GUI is experimental/alpha', TqdmExperimentalWarning)
self.mpl = mpl
self.plt = plt
self.sp = None
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(self.mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
if self.total:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if self.total:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
if self.unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
def __iter__(self):
# TODO: somehow allow the following:
# if not self.gui:
# return super(tqdm_gui, self).__iter__()
iterable = self.iterable
if self.disable:
for obj in iterable:
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
unit = self.unit
unit_scale = self.unit_scale
ascii = self.ascii
start_t = self.start_t
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
bar_format = self.bar_format
plt = self.plt
ax = self.ax
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
line1 = self.line1
line2 = self.line2
for obj in iterable:
yield obj
# Update and print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
delta_it = n - last_print_n
# check the counter first (avoid calls to time())
if delta_it >= miniters:
cur_t = time()
delta_t = cur_t - last_print_t
if delta_t >= mininterval:
elapsed = cur_t - start_t
# EMA (not just overall average)
if smoothing and delta_t:
avg_time = delta_t / delta_it \
if avg_time is None \
else smoothing * delta_t / delta_it + \
(1 - smoothing) * avg_time
# Inline due to multiple calls
total = self.total
# instantaneous rate
y = delta_it / delta_t
# overall rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(self.format_meter(
n, total, elapsed, 0,
self.desc, ascii, unit, unit_scale,
1 / avg_time if avg_time else None, bar_format),
fontname="DejaVu Sans Mono", fontsize=11)
plt.pause(1e-9)
# If no `miniters` was specified, adjust automatically
# to the maximum iteration rate seen so far.
if dynamic_miniters:
if maxinterval and delta_t > maxinterval:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif mininterval and delta_t:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
miniters = smoothing * delta_it * mininterval \
/ delta_t + (1 - smoothing) * miniters
else:
miniters = smoothing * delta_it + \
(1 - smoothing) * miniters
# Store old values for next call
last_print_n = n
last_print_t = cur_t
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.close()
def update(self, n=1):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
if n < 0:
n = 1
self.n += n
delta_it = self.n - self.last_print_n # should be n?
if delta_it >= self.miniters:
# We check the counter first, to reduce the overhead of time()
cur_t = time()
delta_t = cur_t - self.last_print_t
if delta_t >= self.mininterval:
elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t:
self.avg_time = delta_t / delta_it \
if self.avg_time is None \
else self.smoothing * delta_t / delta_it + \
(1 - self.smoothing) * self.avg_time
# Inline due to multiple calls
total = self.total
ax = self.ax
# instantaneous rate
y = delta_it / delta_t
# smoothed rate
z = self.n / elapsed
# update line data
self.xdata.append(self.n * 100.0 / total
if total else cur_t)
self.ydata.append(y)
self.zdata.append(z)
# Discard old values
if (not total) and elapsed > 66:
self.xdata.popleft()
self.ydata.popleft()
self.zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
self.line1.set_data(self.xdata, self.ydata)
self.line2.set_data(self.xdata, self.zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(0, 0.001, xmin=0,
xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [self.n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in self.xdata]
self.line1.set_data(t_ago, self.ydata)
self.line2.set_data(t_ago, self.zdata)
ax.set_title(self.format_meter(
self.n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None,
self.bar_format),
fontname="DejaVu Sans Mono", fontsize=11)
self.plt.pause(1e-9)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t > self.maxinterval:
self.miniters = self.miniters * self.maxinterval \
/ delta_t
elif self.mininterval and delta_t:
self.miniters = self.smoothing * delta_it \
* self.mininterval / delta_t + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = self.smoothing * delta_it + \
(1 - self.smoothing) * self.miniters
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
self.disable = True
self._instances.remove(self)
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
def tgrange(*args, **kwargs):
"""
A shortcut for tqdm_gui(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm_gui(_range(*args), **kwargs)
| gpl-3.0 |
samuel1208/scikit-learn | examples/ensemble/plot_bias_variance.py | 357 | 7324 | """
============================================================
Single estimator versus bagging: bias-variance decomposition
============================================================
This example illustrates and compares the bias-variance decomposition of the
expected mean squared error of a single estimator against a bagging ensemble.
In regression, the expected mean squared error of an estimator can be
decomposed in terms of bias, variance and noise. On average over datasets of
the regression problem, the bias term measures the average amount by which the
predictions of the estimator differ from the predictions of the best possible
estimator for the problem (i.e., the Bayes model). The variance term measures
the variability of the predictions of the estimator when fit over different
instances LS of the problem. Finally, the noise measures the irreducible part
of the error which is due the variability in the data.
The upper left figure illustrates the predictions (in dark red) of a single
decision tree trained over a random dataset LS (the blue dots) of a toy 1d
regression problem. It also illustrates the predictions (in light red) of other
single decision trees trained over other (and different) randomly drawn
instances LS of the problem. Intuitively, the variance term here corresponds to
the width of the beam of predictions (in light red) of the individual
estimators. The larger the variance, the more sensitive are the predictions for
`x` to small changes in the training set. The bias term corresponds to the
difference between the average prediction of the estimator (in cyan) and the
best possible model (in dark blue). On this problem, we can thus observe that
the bias is quite low (both the cyan and the blue curves are close to each
other) while the variance is large (the red beam is rather wide).
The lower left figure plots the pointwise decomposition of the expected mean
squared error of a single decision tree. It confirms that the bias term (in
blue) is low while the variance is large (in green). It also illustrates the
noise part of the error which, as expected, appears to be constant and around
`0.01`.
The right figures correspond to the same plots but using instead a bagging
ensemble of decision trees. In both figures, we can observe that the bias term
is larger than in the previous case. In the upper right figure, the difference
between the average prediction (in cyan) and the best possible model is larger
(e.g., notice the offset around `x=2`). In the lower right figure, the bias
curve is also slightly higher than in the lower left figure. In terms of
variance however, the beam of predictions is narrower, which suggests that the
variance is lower. Indeed, as the lower right figure confirms, the variance
term (in green) is lower than for single decision trees. Overall, the bias-
variance decomposition is therefore no longer the same. The tradeoff is better
for bagging: averaging several decision trees fit on bootstrap copies of the
dataset slightly increases the bias term but allows for a larger reduction of
the variance, which results in a lower overall mean squared error (compare the
red curves int the lower figures). The script output also confirms this
intuition. The total error of the bagging ensemble is lower than the total
error of a single decision tree, and this difference indeed mainly stems from a
reduced variance.
For further details on bias-variance decomposition, see section 7.3 of [1]_.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman,
"Elements of Statistical Learning", Springer, 2009.
"""
print(__doc__)
# Author: Gilles Louppe <g.louppe@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingRegressor
from sklearn.tree import DecisionTreeRegressor
# Settings
n_repeat = 50 # Number of iterations for computing expectations
n_train = 50 # Size of the training set
n_test = 1000 # Size of the test set
noise = 0.1 # Standard deviation of the noise
np.random.seed(0)
# Change this for exploring the bias-variance decomposition of other
# estimators. This should work well for estimators with high variance (e.g.,
# decision trees or KNN), but poorly for estimators with low variance (e.g.,
# linear models).
estimators = [("Tree", DecisionTreeRegressor()),
("Bagging(Tree)", BaggingRegressor(DecisionTreeRegressor()))]
n_estimators = len(estimators)
# Generate data
def f(x):
x = x.ravel()
return np.exp(-x ** 2) + 1.5 * np.exp(-(x - 2) ** 2)
def generate(n_samples, noise, n_repeat=1):
X = np.random.rand(n_samples) * 10 - 5
X = np.sort(X)
if n_repeat == 1:
y = f(X) + np.random.normal(0.0, noise, n_samples)
else:
y = np.zeros((n_samples, n_repeat))
for i in range(n_repeat):
y[:, i] = f(X) + np.random.normal(0.0, noise, n_samples)
X = X.reshape((n_samples, 1))
return X, y
X_train = []
y_train = []
for i in range(n_repeat):
X, y = generate(n_samples=n_train, noise=noise)
X_train.append(X)
y_train.append(y)
X_test, y_test = generate(n_samples=n_test, noise=noise, n_repeat=n_repeat)
# Loop over estimators to compare
for n, (name, estimator) in enumerate(estimators):
# Compute predictions
y_predict = np.zeros((n_test, n_repeat))
for i in range(n_repeat):
estimator.fit(X_train[i], y_train[i])
y_predict[:, i] = estimator.predict(X_test)
# Bias^2 + Variance + Noise decomposition of the mean squared error
y_error = np.zeros(n_test)
for i in range(n_repeat):
for j in range(n_repeat):
y_error += (y_test[:, j] - y_predict[:, i]) ** 2
y_error /= (n_repeat * n_repeat)
y_noise = np.var(y_test, axis=1)
y_bias = (f(X_test) - np.mean(y_predict, axis=1)) ** 2
y_var = np.var(y_predict, axis=1)
print("{0}: {1:.4f} (error) = {2:.4f} (bias^2) "
" + {3:.4f} (var) + {4:.4f} (noise)".format(name,
np.mean(y_error),
np.mean(y_bias),
np.mean(y_var),
np.mean(y_noise)))
# Plot figures
plt.subplot(2, n_estimators, n + 1)
plt.plot(X_test, f(X_test), "b", label="$f(x)$")
plt.plot(X_train[0], y_train[0], ".b", label="LS ~ $y = f(x)+noise$")
for i in range(n_repeat):
if i == 0:
plt.plot(X_test, y_predict[:, i], "r", label="$\^y(x)$")
else:
plt.plot(X_test, y_predict[:, i], "r", alpha=0.05)
plt.plot(X_test, np.mean(y_predict, axis=1), "c",
label="$\mathbb{E}_{LS} \^y(x)$")
plt.xlim([-5, 5])
plt.title(name)
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.subplot(2, n_estimators, n_estimators + n + 1)
plt.plot(X_test, y_error, "r", label="$error(x)$")
plt.plot(X_test, y_bias, "b", label="$bias^2(x)$"),
plt.plot(X_test, y_var, "g", label="$variance(x)$"),
plt.plot(X_test, y_noise, "c", label="$noise(x)$")
plt.xlim([-5, 5])
plt.ylim([0, 0.1])
if n == 0:
plt.legend(loc="upper left", prop={"size": 11})
plt.show()
| bsd-3-clause |
robket/BioScripts | alignment.py | 1 | 9138 | import numpy as np
from matplotlib import pyplot as plt
from scipy.misc import toimage
from collections import defaultdict, Counter
from types import SimpleNamespace
from PIL import ImageDraw
# This color table is sourced from https://github.com/trident01/BioExt-1/blob/master/AlignmentImage.java
LIGHT_GRAY = 196
FIXED_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"A": [255, 0, 0],
"C": [255, 255, 0],
"T": [0, 255, 0],
"G": [190, 0, 95],
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
GRAY_GAPS_COLOR_TABLE = defaultdict(lambda: [0, 0, 0], {
"-": [LIGHT_GRAY, LIGHT_GRAY, LIGHT_GRAY]})
BLACK_COLOR_TABLE = defaultdict(lambda: [0, 0, 0])
class Alignment:
def __init__(self, query_start, query_seq, target_start, target_seq, sequence_name, target_label, expected_errors):
self.name = sequence_name
self.target_label = target_label
self.expected_errors = expected_errors
self.query_start = int(query_start) - 1
self.query_seq = query_seq
query_gap_count = query_seq.count("-")
self.query_length = len(query_seq) - query_gap_count
self.target_start = int(target_start) - 1
self.target_seq = target_seq
target_gap_count = target_seq.count("-")
self.target_length = len(target_seq) - target_gap_count
self.no_gap_length = len(target_seq) - target_gap_count - query_gap_count
if len(target_seq) != len(query_seq):
raise ValueError("Length of target sequence not equal to length of query sequence")
def alignment_iterator(alignment, ignore_case=True, include_gaps=False):
target_index = 0
target_offset = 0
query_index = 0
while target_index < len(alignment.target_seq) and query_index < len(alignment.query_seq):
if alignment.target_seq[target_index] == "-": # If it is an insertion
target_offset += 1
elif alignment.query_seq[query_index] != "-" or include_gaps:
reference_index = alignment.target_start + target_index - target_offset
query_nucleotide = alignment.query_seq[query_index].upper() if ignore_case else alignment.query_seq[query_index]
target_nucleotide = alignment.target_seq[target_index].upper() if ignore_case else alignment.target_seq[target_index]
yield SimpleNamespace(reference_index=reference_index,
target_nucleotide=target_nucleotide,
query_nucleotide=query_nucleotide)
target_index += 1
query_index += 1
def count_mismatches(alignment, ignore_case=True):
mismatch_count = 0
for position in alignment_iterator(alignment, ignore_case):
if position.target_nucleotide != position.query_nucleotide:
mismatch_count += 1
return mismatch_count
def save_expected_error_rates(alignments, output_file):
expected_error_rates = [a.expected_errors / a.query_length for a in alignments]
plt.cla()
plt.hist(expected_error_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Expected Error Rate')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Expected Error Rates')
plt.grid(True)
plt.savefig(output_file)
def save_mismatch_rates(alignments, output_file, ignore_case=True):
mismatch_rates = [count_mismatches(a, ignore_case) / a.no_gap_length for a in alignments]
plt.cla()
plt.hist(mismatch_rates, 50, log=True)
plt.ylim(ymin=0.9)
plt.xlabel('Rate of mismatches')
plt.ylabel('Number of sequences')
plt.tick_params(which='both', direction='out')
plt.title('Mismatch Rates')
plt.grid(True)
plt.savefig(output_file)
def gap_distribution(sequence):
dist = Counter()
count_length = 0
for char in sequence:
if char == "-":
count_length += 1
elif count_length > 0:
dist[count_length] += 1
count_length = 0
if count_length > 0:
dist[count_length] += 1
return dist
def save_insertion_or_deletion_dist(alignments, output_file, insertion_not_deletion=True):
size_counter = Counter()
for a in alignments:
size_counter += gap_distribution(a.target_seq if insertion_not_deletion else a.query_seq)
sizes, counts = zip(*size_counter.items())
number_of_bins = max(sizes)
number_of_bins = round(number_of_bins / np.ceil(number_of_bins/50))
plt.cla()
n, bins, patches = plt.hist(sizes, number_of_bins, weights=counts, log=True)
plt.ylim(ymin=0.9)
plt.xlim(xmin=1)
plt.xlabel('Size of insertion' if insertion_not_deletion else 'Size of deletion')
plt.ylabel('Count')
plt.tick_params(which='both', direction='out')
plt.title('Insertion size distribution' if insertion_not_deletion else 'Deletion size distribution')
plt.grid(True)
plt.savefig(output_file)
# Get nucleotide distribution
def nucleotide_distribution(alignments, ignore_case=False, include_gaps=True):
max_index = 0
distribution = defaultdict(Counter)
for a in alignments:
for position in alignment_iterator(a, ignore_case, include_gaps):
distribution[position.reference_index][position.query_nucleotide] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [distribution[i] for i in range(max_index)]
def save_nucleotide_map(alignments, output, ignore_case=True, include_gaps=True):
nucleotides = nucleotide_distribution(alignments, ignore_case, include_gaps)
width = len(nucleotides)
keys = set()
for distribution_at_base in nucleotides:
keys.update(set(distribution_at_base.keys()))
keys = sorted(list(keys), key=lambda x: "ZZZ" if x == "-" else x)
nucleotide_count_array = np.zeros((len(keys), width), dtype=np.uint32)
for i, key in enumerate(keys):
for j, counts in enumerate(nucleotides):
nucleotide_count_array[i, j] = counts[key]
cum_sum = nucleotide_count_array.cumsum(axis=0)
height = cum_sum[-1,].max()
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
for i, key in enumerate(keys):
start = 0 if i == 0 else cum_sum[i - 1, x]
end = cum_sum[i, x]
data_matrix[start:end, x, 0:3] = FIXED_COLOR_TABLE[key]
img = to_image(data_matrix[::-1,], ruler_underneath=True)
img.save(output)
# Get coverage map
def coverage_map(alignments, include_gaps=False):
max_index = 0
coverage = Counter()
for a in alignments:
for position in alignment_iterator(a, True, include_gaps):
coverage[position.reference_index] += 1
max_index = max(max_index, a.target_start + a.target_length)
return [coverage[i] for i in range(max_index)]
def save_coverage_map(alignments, output):
coverage_with_gaps = coverage_map(alignments, True)
coverage_without_gaps = coverage_map(alignments, False)
width = len(coverage_with_gaps)
height = max(coverage_with_gaps)
data_matrix = np.full((height, width, 3), 255, dtype=np.uint8)
for x in range(width):
y1 = coverage_without_gaps[x]
y2 = coverage_with_gaps[x]
data_matrix[0:y1, x, 0:3] = 0
data_matrix[y1:y2, x, 0:3] = 127
img = to_image(data_matrix[::-1], add_ruler=True, ruler_underneath=True)
img.save(output)
def save_alignment_map(coords, output_file, sort_key=sum, crop=True, no_ruler=False):
if crop:
minimum = min(coords, key=lambda x: x[0])[0]
else:
minimum = 0
maximum = max(coords, key=lambda x: x[1])[1]
dimensions = (len(coords), maximum - minimum)
data_matrix = np.full((dimensions[0], dimensions[1] + 1), 255, dtype=np.uint8)
if sort_key is not None:
coords.sort(key=sort_key)
is_multiple_alignment = len(coords[0]) > 3 and type(coords[0][3]) == list
# Greyscale over the bounds (or black if not multiple alignment)
for i, coord in enumerate(coords):
start = coord[0]
end = coord[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = LIGHT_GRAY if is_multiple_alignment else 0
# Black over the subalignments, if any
if is_multiple_alignment:
for i, coord in enumerate(coords):
for subalignment in coord[3]:
start = subalignment[0]
end = subalignment[1]
# np.put(data_matrix[i], range(start - minimum, end - minimum), 0)
data_matrix[i, (start - minimum):(end - minimum)] = 0
img = to_image(data_matrix, not no_ruler, offset=minimum)
img.save(output_file)
def to_image(data_matrix, add_ruler=True, ruler_underneath = False, offset=1):
maximum = offset + data_matrix.shape[1]
if add_ruler:
shape = list(data_matrix.shape)
shape[0] = 12 # Number of rows
ruler_matrix = np.full(shape, 255, dtype=data_matrix.dtype)
# tens ticks
ruler_matrix[0 if ruler_underneath else 11, 10-(offset%10)::10] = 0
# 50s ticks
ruler_matrix[1 if ruler_underneath else 10, 50-(offset%50)::50] = 0
if ruler_underneath:
img = toimage(np.vstack([data_matrix, ruler_matrix]))
else:
img = toimage(np.vstack([ruler_matrix, data_matrix]))
draw = ImageDraw.Draw(img)
# Hundreds words
for i in range((offset//100) + 1, maximum // 100 + 1):
centering = (6 * (int(np.log10(i)) + 3) - 1) // 2
draw.text((i * 100 - centering - offset, (data_matrix.shape[0] + 2) if ruler_underneath else 0), str(i) + "00", fill="black")
else:
img = toimage(data_matrix)
return img
| mit |
XCage15/privacyidea | privacyidea/lib/stats.py | 3 | 5464 | # -*- coding: utf-8 -*-
#
# 2015-07-16 Initial writeup
# (c) Cornelius Kölbel
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
__doc__ = """This module reads audit data and can create statistics from
audit data using pandas.
This module is tested in tests/test_lib_stats.py
"""
import logging
from privacyidea.lib.log import log_with
import datetime
import StringIO
log = logging.getLogger(__name__)
try:
import matplotlib
MATPLOT_READY = True
matplotlib.style.use('ggplot')
matplotlib.use('Agg')
except Exception as exx:
MATPLOT_READY = False
log.warning("If you want to see statistics you need to install python "
"matplotlib.")
customcmap = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
@log_with(log)
def get_statistics(auditobject, start_time=datetime.datetime.now()
-datetime.timedelta(days=7),
end_time=datetime.datetime.now()):
"""
Create audit statistics and return a JSON object
The auditobject is passed from the upper level, usually from the REST API
as g.auditobject.
:param auditobject: The audit object
:type auditobject: Audit Object as defined in auditmodules.base.Audit
:return: JSON
"""
result = {}
df = auditobject.get_dataframe(start_time=start_time, end_time=end_time)
# authentication successful/fail per user or serial
for key in ["user", "serial"]:
result["validate_%s_plot" % key] = _get_success_fail(df, key)
# get simple usage
for key in ["serial", "action"]:
result["%s_plot" % key] = _get_number_of(df, key)
# failed authentication requests
for key in ["user", "serial"]:
result["validate_failed_%s_plot" % key] = _get_fail(df, key)
result["admin_plot"] = _get_number_of(df, "action", nums=20)
return result
def _get_success_fail(df, key):
try:
output = StringIO.StringIO()
series = df[df.action.isin(["POST /validate/check",
"GET /validate/check"])].groupby([key,
'success']).size().unstack()
fig = series.plot(kind="bar", stacked=True,
legend=True,
title="Authentications",
grid=True,
color=customcmap).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,%s' % image_data
except Exception as exx:
log.info(exx)
image_uri = "%s" % exx
return image_uri
def _get_fail(df, key):
try:
output = StringIO.StringIO()
series = df[(df.success==0)
& (df.action.isin(["POST /validate/check",
"GET /validate/check"]))][
key].value_counts()[:5]
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1,1,1)
fig = series.plot(ax=ax, kind="bar",
colormap="Reds",
stacked=False,
legend=False,
grid=True,
title="Failed Authentications").get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,%s' % image_data
except Exception as exx:
log.info(exx)
image_uri = "%s" % exx
return image_uri
def _get_number_of(df, key, nums=5):
"""
return a data url image with a single keyed value.
It plots the "nums" most occurrences of the "key" column in the dataframe.
:param df: The DataFrame
:type df: Pandas DataFrame
:param key: The key, which should be plotted.
:param count: how many of the most often values should be plotted
:return: A data url
"""
output = StringIO.StringIO()
output.truncate(0)
try:
plot_canvas = matplotlib.pyplot.figure()
ax = plot_canvas.add_subplot(1, 1, 1)
series = df[key].value_counts()[:nums]
fig = series.plot(ax=ax, kind="bar", colormap="Blues",
legend=False,
stacked=False,
title="Numbers of %s" % key,
grid=True).get_figure()
fig.savefig(output, format="png")
o_data = output.getvalue()
output.close()
image_data = o_data.encode("base64")
image_uri = 'data:image/png;base64,%s' % image_data
except Exception as exx:
log.info(exx)
image_uri = "No data"
return image_uri
| agpl-3.0 |
behzadnouri/scipy | scipy/interpolate/_cubic.py | 8 | 29300 | """Interpolation algorithms using piecewise cubic polynomials."""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy._lib.six import string_types
from . import BPoly, PPoly
from .polyint import _isscalar
from scipy._lib._util import _asarray_validated
from scipy.linalg import solve_banded, solve
__all__ = ["PchipInterpolator", "pchip_interpolate", "pchip",
"Akima1DInterpolator", "CubicSpline"]
class PchipInterpolator(BPoly):
r"""PCHIP 1-d monotonic cubic interpolation.
`x` and `y` are arrays of values used to approximate some function f,
with ``y = f(x)``. The interpolant uses monotonic cubic splines
to find the value of new points. (PCHIP stands for Piecewise Cubic
Hermite Interpolating Polynomial).
Parameters
----------
x : ndarray
A 1-D array of monotonically increasing real values. `x` cannot
include duplicate values (otherwise f is overspecified)
y : ndarray
A 1-D array of real values. `y`'s length along the interpolation
axis must be equal to the length of `x`. If N-D array, use `axis`
parameter to select correct axis.
axis : int, optional
Axis in the y array corresponding to the x-coordinate values.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
Akima1DInterpolator
CubicSpline
BPoly
Notes
-----
The interpolator preserves monotonicity in the interpolation data and does
not overshoot if the data is not smooth.
The first derivatives are guaranteed to be continuous, but the second
derivatives may jump at :math:`x_k`.
Determines the derivatives at the points :math:`x_k`, :math:`f'_k`,
by using PCHIP algorithm [1]_.
Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k`
are the slopes at internal points :math:`x_k`.
If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of
them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the
weighted harmonic mean
.. math::
\frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k}
where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`.
The end slopes are set using a one-sided scheme [2]_.
References
----------
.. [1] F. N. Fritsch and R. E. Carlson, Monotone Piecewise Cubic Interpolation,
SIAM J. Numer. Anal., 17(2), 238 (1980).
DOI:10.1137/0717021
.. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004.
DOI: http://dx.doi.org/10.1137/1.9780898717952
"""
def __init__(self, x, y, axis=0, extrapolate=None):
x = _asarray_validated(x, check_finite=False, as_inexact=True)
y = _asarray_validated(y, check_finite=False, as_inexact=True)
axis = axis % y.ndim
xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1))
yp = np.rollaxis(y, axis)
dk = self._find_derivatives(xp, yp)
data = np.hstack((yp[:, None, ...], dk[:, None, ...]))
_b = BPoly.from_derivatives(x, data, orders=None)
super(PchipInterpolator, self).__init__(_b.c, _b.x,
extrapolate=extrapolate)
self.axis = axis
def roots(self):
"""
Return the roots of the interpolated function.
"""
return (PPoly.from_bernstein_basis(self._bpoly)).roots()
@staticmethod
def _edge_case(h0, h1, m0, m1):
# one-sided three-point estimate for the derivative
d = ((2*h0 + h1)*m0 - h0*m1) / (h0 + h1)
# try to preserve shape
mask = np.sign(d) != np.sign(m0)
mask2 = (np.sign(m0) != np.sign(m1)) & (np.abs(d) > 3.*np.abs(m0))
mmm = (~mask) & mask2
d[mask] = 0.
d[mmm] = 3.*m0[mmm]
return d
@staticmethod
def _find_derivatives(x, y):
# Determine the derivatives at the points y_k, d_k, by using
# PCHIP algorithm is:
# We choose the derivatives at the point x_k by
# Let m_k be the slope of the kth segment (between k and k+1)
# If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0
# else use weighted harmonic mean:
# w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1}
# 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1})
# where h_k is the spacing between x_k and x_{k+1}
y_shape = y.shape
if y.ndim == 1:
# So that _edge_case doesn't end up assigning to scalars
x = x[:, None]
y = y[:, None]
hk = x[1:] - x[:-1]
mk = (y[1:] - y[:-1]) / hk
if y.shape[0] == 2:
# edge case: only have two points, use linear interpolation
dk = np.zeros_like(y)
dk[0] = mk
dk[1] = mk
return dk.reshape(y_shape)
smk = np.sign(mk)
condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0)
w1 = 2*hk[1:] + hk[:-1]
w2 = hk[1:] + 2*hk[:-1]
# values where division by zero occurs will be excluded
# by 'condition' afterwards
with np.errstate(divide='ignore'):
whmean = (w1/mk[:-1] + w2/mk[1:]) / (w1 + w2)
dk = np.zeros_like(y)
dk[1:-1][condition] = 0.0
dk[1:-1][~condition] = 1.0 / whmean[~condition]
# special case endpoints, as suggested in
# Cleve Moler, Numerical Computing with MATLAB, Chap 3.4
dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1])
dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2])
return dk.reshape(y_shape)
def pchip_interpolate(xi, yi, x, der=0, axis=0):
"""
Convenience function for pchip interpolation.
xi and yi are arrays of values used to approximate some function f,
with ``yi = f(xi)``. The interpolant uses monotonic cubic splines
to find the value of new points x and the derivatives there.
See `PchipInterpolator` for details.
Parameters
----------
xi : array_like
A sorted list of x-coordinates, of length N.
yi : array_like
A 1-D array of real values. `yi`'s length along the interpolation
axis must be equal to the length of `xi`. If N-D array, use axis
parameter to select correct axis.
x : scalar or array_like
Of length M.
der : int or list, optional
Derivatives to extract. The 0-th derivative can be included to
return the function value.
axis : int, optional
Axis in the yi array corresponding to the x-coordinate values.
See Also
--------
PchipInterpolator
Returns
-------
y : scalar or array_like
The result, of length R or length M or M by R,
"""
P = PchipInterpolator(xi, yi, axis=axis)
if der == 0:
return P(x)
elif _isscalar(der):
return P.derivative(der)(x)
else:
return [P.derivative(nu)(x) for nu in der]
# Backwards compatibility
pchip = PchipInterpolator
class Akima1DInterpolator(PPoly):
"""
Akima interpolator
Fit piecewise cubic polynomials, given vectors x and y. The interpolation
method by Akima uses a continuously differentiable sub-spline built from
piecewise cubic polynomials. The resultant curve passes through the given
data points and will appear smooth and natural.
Parameters
----------
x : ndarray, shape (m, )
1-D array of monotonically increasing real values.
y : ndarray, shape (m, ...)
N-D array of real values. The length of `y` along the first axis must
be equal to the length of `x`.
axis : int, optional
Specifies the axis of `y` along which to interpolate. Interpolation
defaults to the first axis of `y`.
Methods
-------
__call__
derivative
antiderivative
roots
See Also
--------
PchipInterpolator
CubicSpline
PPoly
Notes
-----
.. versionadded:: 0.14
Use only for precise data, as the fitted curve passes through the given
points exactly. This routine is useful for plotting a pleasingly smooth
curve through a few given points for purposes of plotting.
References
----------
[1] A new method of interpolation and smooth curve fitting based
on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4),
589-602.
"""
def __init__(self, x, y, axis=0):
# Original implementation in MATLAB by N. Shamsundar (BSD licensed), see
# http://www.mathworks.de/matlabcentral/fileexchange/1814-akima-interpolation
x, y = map(np.asarray, (x, y))
axis = axis % y.ndim
if np.any(np.diff(x) < 0.):
raise ValueError("x must be strictly ascending")
if x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if x.size != y.shape[axis]:
raise ValueError("x.shape must equal y.shape[%s]" % axis)
# move interpolation axis to front
y = np.rollaxis(y, axis)
# determine slopes between breakpoints
m = np.empty((x.size + 3, ) + y.shape[1:])
dx = np.diff(x)
dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)]
m[2:-2] = np.diff(y, axis=0) / dx
# add two additional points on the left ...
m[1] = 2. * m[2] - m[3]
m[0] = 2. * m[1] - m[2]
# ... and on the right
m[-2] = 2. * m[-3] - m[-4]
m[-1] = 2. * m[-2] - m[-3]
# if m1 == m2 != m3 == m4, the slope at the breakpoint is not defined.
# This is the fill value:
t = .5 * (m[3:] + m[:-3])
# get the denominator of the slope t
dm = np.abs(np.diff(m, axis=0))
f1 = dm[2:]
f2 = dm[:-2]
f12 = f1 + f2
# These are the mask of where the the slope at breakpoint is defined:
ind = np.nonzero(f12 > 1e-9 * np.max(f12))
x_ind, y_ind = ind[0], ind[1:]
# Set the slope at breakpoint
t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] +
f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind]
# calculate the higher order coefficients
c = (3. * m[2:-2] - 2. * t[:-1] - t[1:]) / dx
d = (t[:-1] + t[1:] - 2. * m[2:-2]) / dx ** 2
coeff = np.zeros((4, x.size - 1) + y.shape[1:])
coeff[3] = y[:-1]
coeff[2] = t[:-1]
coeff[1] = c
coeff[0] = d
super(Akima1DInterpolator, self).__init__(coeff, x, extrapolate=False)
self.axis = axis
def extend(self, c, x, right=True):
raise NotImplementedError("Extending a 1D Akima interpolator is not "
"yet implemented")
# These are inherited from PPoly, but they do not produce an Akima
# interpolator. Hence stub them out.
@classmethod
def from_spline(cls, tck, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
raise NotImplementedError("This method does not make sense for "
"an Akima interpolator.")
class CubicSpline(PPoly):
"""Cubic spline data interpolator.
Interpolate data with a piecewise cubic polynomial which is twice
continuously differentiable [1]_. The result is represented as a `PPoly`
instance with breakpoints matching the given data.
Parameters
----------
x : array_like, shape (n,)
1-d array containing values of the independent variable.
Values must be real, finite and in strictly increasing order.
y : array_like
Array containing values of the dependent variable. It can have
arbitrary number of dimensions, but the length along `axis` (see below)
must match the length of `x`. Values must be finite.
axis : int, optional
Axis along which `y` is assumed to be varying. Meaning that for
``x[i]`` the corresponding values are ``np.take(y, i, axis=axis)``.
Default is 0.
bc_type : string or 2-tuple, optional
Boundary condition type. Two additional equations, given by the
boundary conditions, are required to determine all coefficients of
polynomials on each segment [2]_.
If `bc_type` is a string, then the specified condition will be applied
at both ends of a spline. Available conditions are:
* 'not-a-knot' (default): The first and second segment at a curve end
are the same polynomial. It is a good default when there is no
information on boundary conditions.
* 'periodic': The interpolated functions is assumed to be periodic
of period ``x[-1] - x[0]``. The first and last value of `y` must be
identical: ``y[0] == y[-1]``. This boundary condition will result in
``y'[0] == y'[-1]`` and ``y''[0] == y''[-1]``.
* 'clamped': The first derivative at curves ends are zero. Assuming
a 1D `y`, ``bc_type=((1, 0.0), (1, 0.0))`` is the same condition.
* 'natural': The second derivative at curve ends are zero. Assuming
a 1D `y`, ``bc_type=((2, 0.0), (2, 0.0))`` is the same condition.
If `bc_type` is a 2-tuple, the first and the second value will be
applied at the curve start and end respectively. The tuple values can
be one of the previously mentioned strings (except 'periodic') or a
tuple `(order, deriv_values)` allowing to specify arbitrary
derivatives at curve ends:
* `order`: the derivative order, 1 or 2.
* `deriv_value`: array_like containing derivative values, shape must
be the same as `y`, excluding `axis` dimension. For example, if `y`
is 1D, then `deriv_value` must be a scalar. If `y` is 3D with the
shape (n0, n1, n2) and axis=2, then `deriv_value` must be 2D
and have the shape (n0, n1).
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. If None (default), `extrapolate` is
set to 'periodic' for ``bc_type='periodic'`` and to True otherwise.
Attributes
----------
x : ndarray, shape (n,)
Breakpoints. The same `x` which was passed to the constructor.
c : ndarray, shape (4, n-1, ...)
Coefficients of the polynomials on each segment. The trailing
dimensions match the dimensions of `y`, excluding `axis`. For example,
if `y` is 1-d, then ``c[k, i]`` is a coefficient for
``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``.
axis : int
Interpolation axis. The same `axis` which was passed to the
constructor.
Methods
-------
__call__
derivative
antiderivative
integrate
roots
See Also
--------
Akima1DInterpolator
PchipInterpolator
PPoly
Notes
-----
Parameters `bc_type` and `interpolate` work independently, i.e. the former
controls only construction of a spline, and the latter only evaluation.
When a boundary condition is 'not-a-knot' and n = 2, it is replaced by
a condition that the first derivative is equal to the linear interpolant
slope. When both boundary conditions are 'not-a-knot' and n = 3, the
solution is sought as a parabola passing through given points.
When 'not-a-knot' boundary conditions is applied to both ends, the
resulting spline will be the same as returned by `splrep` (with ``s=0``)
and `InterpolatedUnivariateSpline`, but these two methods use a
representation in B-spline basis.
.. versionadded:: 0.18.0
Examples
--------
In this example the cubic spline is used to interpolate a sampled sinusoid.
You can see that the spline continuity property holds for the first and
second derivatives and violates only for the third derivative.
>>> from scipy.interpolate import CubicSpline
>>> import matplotlib.pyplot as plt
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> cs = CubicSpline(x, y)
>>> xs = np.arange(-0.5, 9.6, 0.1)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(x, y, 'o', label='data')
>>> plt.plot(xs, np.sin(xs), label='true')
>>> plt.plot(xs, cs(xs), label="S")
>>> plt.plot(xs, cs(xs, 1), label="S'")
>>> plt.plot(xs, cs(xs, 2), label="S''")
>>> plt.plot(xs, cs(xs, 3), label="S'''")
>>> plt.xlim(-0.5, 9.5)
>>> plt.legend(loc='lower left', ncol=2)
>>> plt.show()
In the second example, the unit circle is interpolated with a spline. A
periodic boundary condition is used. You can see that the first derivative
values, ds/dx=0, ds/dy=1 at the periodic point (1, 0) are correctly
computed. Note that a circle cannot be exactly represented by a cubic
spline. To increase precision, more breakpoints would be required.
>>> theta = 2 * np.pi * np.linspace(0, 1, 5)
>>> y = np.c_[np.cos(theta), np.sin(theta)]
>>> cs = CubicSpline(theta, y, bc_type='periodic')
>>> print("ds/dx={:.1f} ds/dy={:.1f}".format(cs(0, 1)[0], cs(0, 1)[1]))
ds/dx=0.0 ds/dy=1.0
>>> xs = 2 * np.pi * np.linspace(0, 1, 100)
>>> plt.figure(figsize=(6.5, 4))
>>> plt.plot(y[:, 0], y[:, 1], 'o', label='data')
>>> plt.plot(np.cos(xs), np.sin(xs), label='true')
>>> plt.plot(cs(xs)[:, 0], cs(xs)[:, 1], label='spline')
>>> plt.axes().set_aspect('equal')
>>> plt.legend(loc='center')
>>> plt.show()
The third example is the interpolation of a polynomial y = x**3 on the
interval 0 <= x<= 1. A cubic spline can represent this function exactly.
To achieve that we need to specify values and first derivatives at
endpoints of the interval. Note that y' = 3 * x**2 and thus y'(0) = 0 and
y'(1) = 3.
>>> cs = CubicSpline([0, 1], [0, 1], bc_type=((1, 0), (1, 3)))
>>> x = np.linspace(0, 1)
>>> np.allclose(x**3, cs(x))
True
References
----------
.. [1] `Cubic Spline Interpolation
<https://en.wikiversity.org/wiki/Cubic_Spline_Interpolation>`_
on Wikiversity.
.. [2] Carl de Boor, "A Practical Guide to Splines", Springer-Verlag, 1978.
"""
def __init__(self, x, y, axis=0, bc_type='not-a-knot', extrapolate=None):
x, y = map(np.asarray, (x, y))
if np.issubdtype(x.dtype, np.complexfloating):
raise ValueError("`x` must contain real values.")
if np.issubdtype(y.dtype, np.complexfloating):
dtype = complex
else:
dtype = float
y = y.astype(dtype, copy=False)
axis = axis % y.ndim
if x.ndim != 1:
raise ValueError("`x` must be 1-dimensional.")
if x.shape[0] < 2:
raise ValueError("`x` must contain at least 2 elements.")
if x.shape[0] != y.shape[axis]:
raise ValueError("The length of `y` along `axis`={0} doesn't "
"match the length of `x`".format(axis))
if not np.all(np.isfinite(x)):
raise ValueError("`x` must contain only finite values.")
if not np.all(np.isfinite(y)):
raise ValueError("`y` must contain only finite values.")
dx = np.diff(x)
if np.any(dx <= 0):
raise ValueError("`x` must be strictly increasing sequence.")
n = x.shape[0]
y = np.rollaxis(y, axis)
bc, y = self._validate_bc(bc_type, y, y.shape[1:], axis)
if extrapolate is None:
if bc[0] == 'periodic':
extrapolate = 'periodic'
else:
extrapolate = True
dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1))
slope = np.diff(y, axis=0) / dxr
# If bc is 'not-a-knot' this change is just a convention.
# If bc is 'periodic' then we already checked that y[0] == y[-1],
# and the spline is just a constant, we handle this case in the same
# way by setting the first derivatives to slope, which is 0.
if n == 2:
if bc[0] in ['not-a-knot', 'periodic']:
bc[0] = (1, slope[0])
if bc[1] in ['not-a-knot', 'periodic']:
bc[1] = (1, slope[0])
# This is a very special case, when both conditions are 'not-a-knot'
# and n == 3. In this case 'not-a-knot' can't be handled regularly
# as the both conditions are identical. We handle this case by
# constructing a parabola passing through given points.
if n == 3 and bc[0] == 'not-a-knot' and bc[1] == 'not-a-knot':
A = np.zeros((3, 3)) # This is a standard matrix.
b = np.empty((3,) + y.shape[1:], dtype=y.dtype)
A[0, 0] = 1
A[0, 1] = 1
A[1, 0] = dx[1]
A[1, 1] = 2 * (dx[0] + dx[1])
A[1, 2] = dx[0]
A[2, 1] = 1
A[2, 2] = 1
b[0] = 2 * slope[0]
b[1] = 3 * (dxr[0] * slope[1] + dxr[1] * slope[0])
b[2] = 2 * slope[1]
s = solve(A, b, overwrite_a=True, overwrite_b=True,
check_finite=False)
else:
# Find derivative values at each x[i] by solving a tridiagonal
# system.
A = np.zeros((3, n)) # This is a banded matrix representation.
b = np.empty((n,) + y.shape[1:], dtype=y.dtype)
# Filling the system for i=1..n-2
# (x[i-1] - x[i]) * s[i-1] +\
# 2 * ((x[i] - x[i-1]) + (x[i+1] - x[i])) * s[i] +\
# (x[i] - x[i-1]) * s[i+1] =\
# 3 * ((x[i+1] - x[i])*(y[i] - y[i-1])/(x[i] - x[i-1]) +\
# (x[i] - x[i-1])*(y[i+1] - y[i])/(x[i+1] - x[i]))
A[1, 1:-1] = 2 * (dx[:-1] + dx[1:]) # The diagonal
A[0, 2:] = dx[:-1] # The upper diagonal
A[-1, :-2] = dx[1:] # The lower diagonal
b[1:-1] = 3 * (dxr[1:] * slope[:-1] + dxr[:-1] * slope[1:])
bc_start, bc_end = bc
if bc_start == 'periodic':
# Due to the periodicity, and because y[-1] = y[0], the linear
# system has (n-1) unknowns/equations instead of n:
A = A[:, 0:-1]
A[1, 0] = 2 * (dx[-1] + dx[0])
A[0, 1] = dx[-1]
b = b[:-1]
# Also, due to the periodicity, the system is not tri-diagonal.
# We need to compute a "condensed" matrix of shape (n-2, n-2).
# See http://www.cfm.brown.edu/people/gk/chap6/node14.html for
# more explanations.
# The condensed matrix is obtained by removing the last column
# and last row of the (n-1, n-1) system matrix. The removed
# values are saved in scalar variables with the (n-1, n-1)
# system matrix indices forming their names:
a_m1_0 = dx[-2] # lower left corner value: A[-1, 0]
a_m1_m2 = dx[-1]
a_m1_m1 = 2 * (dx[-1] + dx[-2])
a_m2_m1 = dx[-2]
a_0_m1 = dx[0]
b[0] = 3 * (dxr[0] * slope[-1] + dxr[-1] * slope[0])
b[-1] = 3 * (dxr[-1] * slope[-2] + dxr[-2] * slope[-1])
Ac = A[:, :-1]
b1 = b[:-1]
b2 = np.zeros_like(b1)
b2[0] = -a_0_m1
b2[-1] = -a_m2_m1
# s1 and s2 are the solutions of (n-2, n-2) system
s1 = solve_banded((1, 1), Ac, b1, overwrite_ab=False,
overwrite_b=False, check_finite=False)
s2 = solve_banded((1, 1), Ac, b2, overwrite_ab=False,
overwrite_b=False, check_finite=False)
# computing the s[n-2] solution:
s_m1 = ((b[-1] - a_m1_0 * s1[0] - a_m1_m2 * s1[-1]) /
(a_m1_m1 + a_m1_0 * s2[0] + a_m1_m2 * s2[-1]))
# s is the solution of the (n, n) system:
s = np.empty((n,) + y.shape[1:], dtype=y.dtype)
s[:-2] = s1 + s_m1 * s2
s[-2] = s_m1
s[-1] = s[0]
else:
if bc_start == 'not-a-knot':
A[1, 0] = dx[1]
A[0, 1] = x[2] - x[0]
d = x[2] - x[0]
b[0] = ((dxr[0] + 2*d) * dxr[1] * slope[0] +
dxr[0]**2 * slope[1]) / d
elif bc_start[0] == 1:
A[1, 0] = 1
A[0, 1] = 0
b[0] = bc_start[1]
elif bc_start[0] == 2:
A[1, 0] = 2 * dx[0]
A[0, 1] = dx[0]
b[0] = -0.5 * bc_start[1] * dx[0]**2 + 3 * (y[1] - y[0])
if bc_end == 'not-a-knot':
A[1, -1] = dx[-2]
A[-1, -2] = x[-1] - x[-3]
d = x[-1] - x[-3]
b[-1] = ((dxr[-1]**2*slope[-2] +
(2*d + dxr[-1])*dxr[-2]*slope[-1]) / d)
elif bc_end[0] == 1:
A[1, -1] = 1
A[-1, -2] = 0
b[-1] = bc_end[1]
elif bc_end[0] == 2:
A[1, -1] = 2 * dx[-1]
A[-1, -2] = dx[-1]
b[-1] = 0.5 * bc_end[1] * dx[-1]**2 + 3 * (y[-1] - y[-2])
s = solve_banded((1, 1), A, b, overwrite_ab=True,
overwrite_b=True, check_finite=False)
# Compute coefficients in PPoly form.
t = (s[:-1] + s[1:] - 2 * slope) / dxr
c = np.empty((4, n - 1) + y.shape[1:], dtype=t.dtype)
c[0] = t / dxr
c[1] = (slope - s[:-1]) / dxr - t
c[2] = s[:-1]
c[3] = y[:-1]
super(CubicSpline, self).__init__(c, x, extrapolate=extrapolate)
self.axis = axis
@staticmethod
def _validate_bc(bc_type, y, expected_deriv_shape, axis):
"""Validate and prepare boundary conditions.
Returns
-------
validated_bc : 2-tuple
Boundary conditions for a curve start and end.
y : ndarray
y casted to complex dtype if one of the boundary conditions has
complex dtype.
"""
if isinstance(bc_type, string_types):
if bc_type == 'periodic':
if not np.allclose(y[0], y[-1], rtol=1e-15, atol=1e-15):
raise ValueError(
"The first and last `y` point along axis {} must "
"be identical (within machine precision) when "
"bc_type='periodic'.".format(axis))
bc_type = (bc_type, bc_type)
else:
if len(bc_type) != 2:
raise ValueError("`bc_type` must contain 2 elements to "
"specify start and end conditions.")
if 'periodic' in bc_type:
raise ValueError("'periodic' `bc_type` is defined for both "
"curve ends and cannot be used with other "
"boundary conditions.")
validated_bc = []
for bc in bc_type:
if isinstance(bc, string_types):
if bc == 'clamped':
validated_bc.append((1, np.zeros(expected_deriv_shape)))
elif bc == 'natural':
validated_bc.append((2, np.zeros(expected_deriv_shape)))
elif bc in ['not-a-knot', 'periodic']:
validated_bc.append(bc)
else:
raise ValueError("bc_type={} is not allowed.".format(bc))
else:
try:
deriv_order, deriv_value = bc
except Exception:
raise ValueError("A specified derivative value must be "
"given in the form (order, value).")
if deriv_order not in [1, 2]:
raise ValueError("The specified derivative order must "
"be 1 or 2.")
deriv_value = np.asarray(deriv_value)
if deriv_value.shape != expected_deriv_shape:
raise ValueError(
"`deriv_value` shape {} is not the expected one {}."
.format(deriv_value.shape, expected_deriv_shape))
if np.issubdtype(deriv_value.dtype, np.complexfloating):
y = y.astype(complex, copy=False)
validated_bc.append((deriv_order, deriv_value))
return validated_bc, y
| bsd-3-clause |
romeric/Fastor | benchmark/external/benchmark_inverse/benchmark_plot.py | 1 | 1615 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'serif','serif':['Palatino'],'size':14})
rc('text', usetex=True)
def read_results():
ms, ns, times_eigen, times_fastor = [], [], [], []
with open("benchmark_results.txt", "r") as f:
lines = f.readlines()
for line in lines:
sline = line.split(' ')
if len(sline) == 4:
times_eigen.append(float(sline[1]))
times_fastor.append(float(sline[2]))
elif len(sline) == 7 and "size" in sline[1]:
ms.append(int(sline[4]))
ns.append(int(sline[5]))
return np.array(ms), np.array(ns), np.array(times_eigen), np.array(times_fastor)
def main():
ms, ns, times_eigen, times_fastor = read_results()
fig, ax = plt.subplots()
index = np.arange(len(ms))
bar_width = 0.2
opacity = 0.8
rects1 = plt.bar(index, times_eigen/1e-6, bar_width,
alpha=opacity,
color='#C03B22',
label='Eigen')
rects3 = plt.bar(index + bar_width, times_fastor/1e-6, bar_width,
alpha=opacity,
color='#E98604',
label='Fastor')
xticks = [str(dim[0]) + 'x' + str(dim[1]) for dim in zip(ms,ns)]
plt.xlabel('(M,M)')
plt.ylabel('Time ($\mu$sec)')
plt.title("B = inv(A)")
plt.xticks(index, xticks, rotation=45)
plt.legend()
plt.tight_layout()
plt.grid(True)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
# plt.savefig('benchmark_inverse_single.png', format='png', dpi=300)
plt.show()
if __name__ == "__main__":
main() | mit |
CodeReclaimers/neat-python | examples/xor/visualize.py | 1 | 5915 | from __future__ import print_function
import copy
import warnings
import graphviz
import matplotlib.pyplot as plt
import numpy as np
def plot_stats(statistics, ylog=False, view=False, filename='avg_fitness.svg'):
""" Plots the population's average and best fitness. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
generation = range(len(statistics.most_fit_genomes))
best_fitness = [c.fitness for c in statistics.most_fit_genomes]
avg_fitness = np.array(statistics.get_fitness_mean())
stdev_fitness = np.array(statistics.get_fitness_stdev())
plt.plot(generation, avg_fitness, 'b-', label="average")
plt.plot(generation, avg_fitness - stdev_fitness, 'g-.', label="-1 sd")
plt.plot(generation, avg_fitness + stdev_fitness, 'g-.', label="+1 sd")
plt.plot(generation, best_fitness, 'r-', label="best")
plt.title("Population's average and best fitness")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.grid()
plt.legend(loc="best")
if ylog:
plt.gca().set_yscale('symlog')
plt.savefig(filename)
if view:
plt.show()
plt.close()
def plot_spikes(spikes, view=False, filename=None, title=None):
""" Plots the trains for a single spiking neuron. """
t_values = [t for t, I, v, u, f in spikes]
v_values = [v for t, I, v, u, f in spikes]
u_values = [u for t, I, v, u, f in spikes]
I_values = [I for t, I, v, u, f in spikes]
f_values = [f for t, I, v, u, f in spikes]
fig = plt.figure()
plt.subplot(4, 1, 1)
plt.ylabel("Potential (mv)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, v_values, "g-")
if title is None:
plt.title("Izhikevich's spiking neuron model")
else:
plt.title("Izhikevich's spiking neuron model ({0!s})".format(title))
plt.subplot(4, 1, 2)
plt.ylabel("Fired")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, f_values, "r-")
plt.subplot(4, 1, 3)
plt.ylabel("Recovery (u)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, u_values, "r-")
plt.subplot(4, 1, 4)
plt.ylabel("Current (I)")
plt.xlabel("Time (in ms)")
plt.grid()
plt.plot(t_values, I_values, "r-o")
if filename is not None:
plt.savefig(filename)
if view:
plt.show()
plt.close()
fig = None
return fig
def plot_species(statistics, view=False, filename='speciation.svg'):
""" Visualizes speciation throughout evolution. """
if plt is None:
warnings.warn("This display is not available due to a missing optional dependency (matplotlib)")
return
species_sizes = statistics.get_species_sizes()
num_generations = len(species_sizes)
curves = np.array(species_sizes).T
fig, ax = plt.subplots()
ax.stackplot(range(num_generations), *curves)
plt.title("Speciation")
plt.ylabel("Size per Species")
plt.xlabel("Generations")
plt.savefig(filename)
if view:
plt.show()
plt.close()
def draw_net(config, genome, view=False, filename=None, node_names=None, show_disabled=True, prune_unused=False,
node_colors=None, fmt='svg'):
""" Receives a genome and draws a neural network with arbitrary topology. """
# Attributes for network nodes.
if graphviz is None:
warnings.warn("This display is not available due to a missing optional dependency (graphviz)")
return
if node_names is None:
node_names = {}
assert type(node_names) is dict
if node_colors is None:
node_colors = {}
assert type(node_colors) is dict
node_attrs = {
'shape': 'circle',
'fontsize': '9',
'height': '0.2',
'width': '0.2'}
dot = graphviz.Digraph(format=fmt, node_attr=node_attrs)
inputs = set()
for k in config.genome_config.input_keys:
inputs.add(k)
name = node_names.get(k, str(k))
input_attrs = {'style': 'filled', 'shape': 'box', 'fillcolor': node_colors.get(k, 'lightgray')}
dot.node(name, _attributes=input_attrs)
outputs = set()
for k in config.genome_config.output_keys:
outputs.add(k)
name = node_names.get(k, str(k))
node_attrs = {'style': 'filled', 'fillcolor': node_colors.get(k, 'lightblue')}
dot.node(name, _attributes=node_attrs)
if prune_unused:
connections = set()
for cg in genome.connections.values():
if cg.enabled or show_disabled:
connections.add((cg.in_node_id, cg.out_node_id))
used_nodes = copy.copy(outputs)
pending = copy.copy(outputs)
while pending:
new_pending = set()
for a, b in connections:
if b in pending and a not in used_nodes:
new_pending.add(a)
used_nodes.add(a)
pending = new_pending
else:
used_nodes = set(genome.nodes.keys())
for n in used_nodes:
if n in inputs or n in outputs:
continue
attrs = {'style': 'filled',
'fillcolor': node_colors.get(n, 'white')}
dot.node(str(n), _attributes=attrs)
for cg in genome.connections.values():
if cg.enabled or show_disabled:
#if cg.input not in used_nodes or cg.output not in used_nodes:
# continue
input, output = cg.key
a = node_names.get(input, str(input))
b = node_names.get(output, str(output))
style = 'solid' if cg.enabled else 'dotted'
color = 'green' if cg.weight > 0 else 'red'
width = str(0.1 + abs(cg.weight / 5.0))
dot.edge(a, b, _attributes={'style': style, 'color': color, 'penwidth': width})
dot.render(filename, view=view)
return dot
| bsd-3-clause |
imaculate/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
zorojean/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
franciscomoura/data-science-and-bigdata | introducao-linguagens-estatisticas/mineracao-dados-python/codigo-fonte/code-06.py | 1 | 2285 | # -*- coding: utf-8 -*-
# code-06.py
"""
Dependência: Matplotlib, NumPy
Executar no prompt: pip install matplotlib
Executar no prompt: pip install numpy
Executar no prompt: pip install scikit-learn
Executar no prompt: pip install scipy
*** Atenção:
Este arquivo deverá executado no mesmo diretório do arquivo iris.csv
"""
import numpy as np
# lê as primeiras 4 colunas
data = np.genfromtxt('iris.csv', delimiter=',', usecols=(0, 1, 2, 3))
# lê a quinta coluna(última)
target_names = np.genfromtxt('iris.csv', delimiter=',', usecols=(4), dtype=str)
# converter o vetor de strings que contêm a classe em números inteiros
target = np.zeros(len(target_names), dtype=np.int)
target[target_names == 'setosa'] = 0
target[target_names == 'versicolor'] = 1
target[target_names == 'virginica'] = 2
# parte 1
from sklearn.cluster import KMeans
# inicialização correta para o cluster mostrar o mesmo resultado a cada execução
kmeans = KMeans(n_clusters=3, init="k-means++", random_state=3425)
kmeans.fit(data)
# parte 2
clusters = kmeans.predict(data)
# parte 3
print("Completude e homogeneidade:")
from sklearn.metrics import completeness_score, homogeneity_score
print(completeness_score(target, clusters))
# Saída: 0.764986151449
print(homogeneity_score(target, clusters))
# Saída: 0.751485402199
# parte 4 - revisada
print("Gera o gráfico de dispersão")
import pylab as pl
pl.figure()
pl.subplot(211) # topo, figura com as classes reais
pl.plot(data[target == 0, 2], data[target == 0, 3], 'bo', alpha=.7) # 0 setosa
pl.plot(data[target == 1, 2], data[target == 1, 3], 'ro', alpha=.7) # 1 versicolor
pl.plot(data[target == 2, 2], data[target == 2, 3], 'go', alpha=.7) # 2 virginica
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.subplot(212) # embaixo, figura com as classes atribuídas automaticamente
pl.plot(data[clusters == 0, 2], data[clusters == 0, 3], 'go', alpha=.7) # clusters 0 verginica
pl.plot(data[clusters == 1, 2], data[clusters == 1, 3], 'bo', alpha=.7) # clusters 1 setosa
pl.plot(data[clusters == 2, 2], data[clusters == 2, 3], 'ro', alpha=.7) # clusters 2 versicolor
pl.xlabel('Comprimento da petala - cm')
pl.ylabel('Largura da petala - cm')
pl.axis([0.5, 7, 0, 3])
pl.show()
| apache-2.0 |
softwaresaved/SSINetworkGraphics | Fellows/Python/map_fellows_network.py | 1 | 3548 | import os
import ast
import requests, gspread
import numpy as np
import matplotlib.pyplot as plt
from oauth2client.client import SignedJwtAssertionCredentials
from mpl_toolkits.basemap import Basemap
#Google Authorisation section and getting a worksheet from Google Spreadsheet
def authenticate_google_docs():
f = file(os.path.join('SSI Network Graphics-3357cb9f30de.p12'), 'rb')
SIGNED_KEY = f.read()
f.close()
scope = ['https://spreadsheets.google.com/feeds', 'https://docs.google.com/feeds']
credentials = SignedJwtAssertionCredentials('devasena.prasad@gmail.com', SIGNED_KEY, scope)
data = {
'refresh_token' : '1/NM56uCG7uFT6VVAAYX3B5TbcMk43wn1xE8Wr-7dsb7lIgOrJDtdun6zK6XiATCKT',
'client_id' : '898367260-pmm78rtfct8af7e0utis686bv78eqmqs.apps.googleusercontent.com',
'client_secret' : 'Cby-rjWDg_wWTSQw_8DDKb3v',
'grant_type' : 'refresh_token',
}
r = requests.post('https://accounts.google.com/o/oauth2/token', data = data)
credentials.access_token = ast.literal_eval(r.text)['access_token']
gc = gspread.authorize(credentials)
return gc
gc_ret = authenticate_google_docs()
sh = gc_ret.open_by_url('https://docs.google.com/spreadsheets/d/13_ZIdeF7oS0xwp_nhGRoVTv7PaXvfLMwVxvgt_hNOkg/edit#gid=383409775')
worksheet_list = sh.worksheets() # Get list of worksheets
#Print the names of first and second worksheets
print "First 2 worksheets of Fellows data Google spreadsheet are:", worksheet_list[0], worksheet_list[1]
# Get all values from the first, seventh and eight columns of Sample datset
values_list_names = worksheet_list[0].col_values(1)
destination_lat_values = worksheet_list[0].col_values(7)
destination_lon_values = worksheet_list[0].col_values(8)
print "Names of SSI fellows are:",values_list_names
print "Destination Latitude values are:",destination_lat_values
print "Destination Longitude values are:", destination_lon_values
# get all values from first, fourth and fifth columns of Home Institutions worksheet
fellows_list_names = worksheet_list[1].col_values(1)
home_lat_values = worksheet_list[1].col_values(4)
home_lon_values = worksheet_list[1].col_values(5)
print "Names of SSI fellows are:",fellows_list_names
print "Home Institution Latitude values are:",home_lat_values
print "Home Institution Longitude values are:", home_lon_values
# create new figure, axes instances.
fig=plt.figure()
ax=fig.add_axes([0.1,0.1,0.8,0.8])
# setup mercator map projection.
m = Basemap(llcrnrlon=-150.,llcrnrlat=-40.,urcrnrlon=150.,urcrnrlat=80.,\
rsphere=(6378137.00,6356752.3142),\
resolution='l',projection='merc',\
lat_0=40.,lon_0=-20.,lat_ts=20.)
#Plotting fellows routes on map
print "No. of unique fellows are:", (len(worksheet_list[1].col_values(1))-1)
colcode = ['b','r','g','y','m','c','k','w']
i = 1
j = 1
print "No. of destination entries in the Sample datasheet:", (len(worksheet_list[0].col_values(7))-1)
while i < len(worksheet_list[1].col_values(1)):
while j < len(worksheet_list[0].col_values(7)):
m.drawgreatcircle(float(home_lon_values[i]),float(home_lat_values[i]),float(destination_lon_values[j]),float(destination_lat_values[j]),linewidth=2,color=colcode[i-1])
j = j + 1
i = i + 1
#label=fellows_list_names[i]
m.drawcoastlines()
m.fillcontinents()
# draw parallels
m.drawparallels(np.arange(10,90,20),labels=[1,1,0,1])
# draw meridians
m.drawmeridians(np.arange(-180,180,30),labels=[1,1,0,1])
ax.set_title('SSI Fellows Impact')
plt.legend()
plt.show()
| bsd-3-clause |
jballanc/openmicroscopy | components/tools/OmeroPy/src/omero/install/logs_library.py | 5 | 6661 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Function for parsing OMERO log files.
The format expected is defined for Python in
omero.util.configure_logging.
Copyright 2010 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
:author: Josh Moore <josh@glencoesoftware.com>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as lines
import matplotlib.transforms as mtransforms
import matplotlib.text as mtext
from time import mktime, strptime
import fileinput
import logging
import sys
import os
import re
def parse_time(value):
"""
parse the time format used by log4j into seconds (float)
since the epoch
"""
parts = value.split(",")
value = parts[0]
millis = float(parts[1]) / 1000.0
t = mktime(strptime(value, "%Y-%m-%d %H:%M:%S"))
t = float(t)
t += millis
return t
class log_line(object):
"""
2009-04-09 15:11:58,029 INFO [ ome.services.util.ServiceHandler] (l.Server-6) Meth: interface ome.api.IQuery.findByQuery
01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789
"""
def __init__(self, line):
self.line = line
line.strip()
self.date = line[0:23]
self.level = line[24:28]
self.thread = line[74:84]
self.message = line[85:].strip()
self.status = line[86:91]
self.method = line[96:].strip()
def contains(self, s):
return 0 <= self.line.find(s)
def contains_any(self, l):
for i in l:
if self.contains(i):
return True
return False
class log_watcher(object):
def __init__(self, files, entries, exits, storeonce = None, storeall = None):
if storeonce is None: storeonce = []
if storeall is None: storeall = []
self.files = files
self.entries = entries
self.exits = exits
self.storeonce = storeonce
self.storeall = storeall
def gen(self):
self.m = {}
try:
for line in fileinput.input(self.files):
ll = log_line(line)
if ll.contains_any(self.entries):
self.m[ll.thread] = ll
elif ll.contains_any(self.storeonce):
try:
value = self.m[ll.thread]
try:
value.once
except:
value.once = ll
except KeyError:
logging.debug("Not found: " + line)
elif ll.contains_any(self.storeall):
try:
value = self.m[ll.thread]
value.all.append(ll)
except AttributeError:
value.all = [ll]
except KeyError:
logging.debug("Not found: " + line)
elif ll.contains_any(self.exits):
try:
value = self.m[ll.thread]
del self.m[ll.thread] # Free memory
value.start = parse_time(value.date)
value.stop = parse_time(ll.date)
value.took = value.stop - value.start
yield value
except KeyError:
logging.debug("Not found: " + line)
finally:
fileinput.close()
class allthreads_watcher(log_watcher):
def __init__(self, files):
log_watcher.__init__(self, files, ["Meth:","Executor.doWork"],["Rslt:","Excp:"])
class saveAndReturnObject_watcher(log_watcher):
def __init__(self, files):
log_watcher.__init__(self, files, ["saveAndReturnObject"],["Rslt:","Excp:"],storeonce=["Args:"],storeall=["Adding log"])
# http://matplotlib.sourceforge.net/examples/api/line_with_text.html
class MyLine(lines.Line2D):
def __init__(self, *args, **kwargs):
# we'll update the position when the line data is set
self.text = mtext.Text(0, 0, '')
lines.Line2D.__init__(self, *args, **kwargs)
# we can't access the label attr until *after* the line is
# inited
self.text.set_text(self.get_label())
def set_figure(self, figure):
self.text.set_figure(figure)
lines.Line2D.set_figure(self, figure)
def set_axes(self, axes):
self.text.set_axes(axes)
lines.Line2D.set_axes(self, axes)
def set_transform(self, transform):
# 2 pixel offset
texttrans = transform + mtransforms.Affine2D().translate(2, 2)
self.text.set_transform(texttrans)
lines.Line2D.set_transform(self, transform)
def set_data(self, x, y):
if len(x):
self.text.set_position((x[-1], y[-1]))
lines.Line2D.set_data(self, x, y)
def draw(self, renderer):
# draw my label at the end of the line with 2 pixel offset
lines.Line2D.draw(self, renderer)
self.text.draw(renderer)
def plot_threads(watcher, all_colors = ("blue","red","yellow","green","pink","purple")):
digit = re.compile(".*(\d+).*")
fig = plt.figure()
ax = fig.add_subplot(111)
first = None
last = None
colors = {}
for ll in watcher.gen():
last = ll.stop
if first is None:
first = ll.start
if ll.thread.strip() == "main":
t = -1
else:
try:
t = digit.match(ll.thread).group(1)
except:
print "Error parsing thread:", ll.thread
raise
y = np.array([int(t),int(t)])
x = np.array([ll.start-first, ll.stop-first])
c = colors.get(t,all_colors[0])
i = all_colors.index(c)
colors[t] = all_colors[ (i+1) % len(all_colors) ]
if True:
line = MyLine(x, y, c=c, lw=2, alpha=0.5)#, mfc='red')#, ms=12, label=str(len(ll.logs)))
#line.text.set_text('line label')
line.text.set_color('red')
#line.text.set_fontsize(16)
ax.add_line(line)
else:
# http://matplotlib.sourceforge.net/examples/pylab_examples/broken_barh.html
ax.broken_barh([ (110, 30), (150, 10) ] , (10, 9), facecolors='blue')
ax.set_ylim(-2,25)
ax.set_xlim(0, (last-first))
plt.show()
if __name__ == "__main__":
for g in allthreads_watcher(sys.argv).gen():
print "Date:%s\nElapsed:%s\nLevel:%s\nThread:%s\nMethod:%s\nStatus:%s\n\n" % (g.date, g.took, g.level, g.thread, g.message, g.status)
| gpl-2.0 |
YinongLong/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
garrettkatz/directional-fibers | dfibers/experiments/levy_opt/levy_opt.py | 1 | 6952 | """
Measure global optimization performance of Levy function
"""
import sys, time
import numpy as np
import matplotlib.pyplot as pt
import multiprocessing as mp
import dfibers.traversal as tv
import dfibers.numerical_utilities as nu
import dfibers.logging_utilities as lu
import dfibers.fixed_points as fx
import dfibers.solvers as sv
import dfibers.examples.levy as lv
from mpl_toolkits.mplot3d import Axes3D
def run_trial(args):
basename, sample, timeout = args
stop_time = time.clock() + timeout
logfile = open("%s_s%d.log"%(basename,sample),"w")
# Set up fiber arguments
np.random.seed()
v = 20*np.random.rand(2,1) - 10 # random point in domain
c = lv.f(v) # direction at that point
c = c + 0.1*np.random.randn(2,1) # perturb for more variability
fiber_kwargs = {
"f": lv.f,
"ef": lv.ef,
"Df": lv.Df,
"compute_step_amount": lambda trace: (0.0001, 0),
"v": v,
"c": c,
"stop_time": stop_time,
"terminate": lambda trace: (np.fabs(trace.x[:-1]) > 10).any(),
"max_solve_iterations": 2**5,
}
solve_start = time.clock()
# Run in one direction
solution = sv.fiber_solver(
logger=lu.Logger(logfile).plus_prefix("+: "),
**fiber_kwargs)
X1 = np.concatenate(solution["Fiber trace"].points, axis=1)
V1 = solution["Fixed points"]
z = solution["Fiber trace"].z_initial
# print("Status: %s\n"%solution["Fiber trace"].status)
# Run in other direction (negate initial tangent)
solution = sv.fiber_solver(
z= -z,
logger=lu.Logger(logfile).plus_prefix("-: "),
**fiber_kwargs)
X2 = np.concatenate(solution["Fiber trace"].points, axis=1)
V2 = solution["Fixed points"]
# print("Status: %s\n"%solution["Fiber trace"].status)
# Join fiber segments
fiber = np.concatenate((np.fliplr(X1), X2), axis=1)
# Union solutions
fxpts = fx.sanitize_points(
np.concatenate((V1, V2), axis=1),
f = lv.f,
ef = lv.ef,
Df = lv.Df,
duplicates = lambda V, v: (np.fabs(V - v) < 10**-6).all(axis=0),
)
# Save results
with open("%s_s%d.npz"%(basename,sample), 'w') as rf: np.savez(rf, **{
"fxpts": fxpts,
"fiber": fiber,
"runtime": time.clock() - solve_start })
logfile.close()
def run_experiment(basename, num_samples, timeout, num_procs=0):
pool_args = []
for sample in range(num_samples):
pool_args.append((basename, sample, timeout))
if num_procs > 0:
num_procs = min(num_procs, mp.cpu_count())
print("using %d processes..."%num_procs)
pool = mp.Pool(processes=num_procs)
pool.map(run_trial, pool_args)
pool.close()
pool.join()
else:
for pa in pool_args: run_trial(pa)
def compile_results(basename, num_samples):
L = []
F = []
runtimes = []
for sample in range(num_samples):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
Fs = np.fabs(lv.f(fxpts)).max(axis=0)
Ls = lv.levy(fxpts)
within = (np.fabs(fxpts) < 10).all(axis=0)
mean_within = Ls[within].mean() if within.any() else np.nan
print("sample %d: %d secs, %d solns, mean %f, mean within %f, min %f"%(
sample, data["runtime"], len(Ls), Ls.mean(), mean_within, Ls.min()))
L.append(Ls)
F.append(Fs)
runtimes.append(data["runtime"])
counts = np.array([len(Ls) for Ls in L])
bests = np.array([Ls.min() for Ls in L])
resids = np.array([Fs.max() for Fs in F])
runtimes = np.array(runtimes)
print("avg count = %d, avg best = %f, avg resid = %f, best best = %f"%(
counts.mean(), bests.mean(), resids.mean(), bests.min()))
return counts, bests, runtimes
def plot_results(basename, num_samples, counts, bests, runtimes, timeout):
### Optimization order stats
pt.figure(figsize=(5,4))
pt.subplot(2,1,1)
pt.plot(np.sort(bests), '-k.')
pt.xlabel("Ordered samples")
pt.ylabel("Best objective value")
##### Work complexity
pt.subplot(2,1,2)
terms = (runtimes < timeout)
pt.plot(runtimes[terms], bests[terms], 'k+', markerfacecolor='none')
pt.plot(runtimes[~terms], bests[~terms], 'ko', markerfacecolor='none')
pt.legend(["terminated","timed out"])
pt.xlabel("Runtime (seconds)")
pt.ylabel("Best objective value")
pt.tight_layout()
pt.show()
### Fiber visuals
pt.figure(figsize=(4,7))
# objective fun
X_surface, Y_surface = np.mgrid[-10:10:100j,-10:10:100j]
L = lv.levy(np.array([X_surface.flatten(), Y_surface.flatten()])).reshape(X_surface.shape)
ax_surface = pt.gcf().add_subplot(2,1,1,projection="3d")
ax_surface.plot_surface(X_surface, Y_surface, L, linewidth=0, antialiased=False, color='gray')
ax_surface.set_xlabel("v0")
ax_surface.set_ylabel("v1")
ax_surface.set_zlabel("levy(v)")
ax_surface.view_init(azim=-80, elev=20)
# fibers
ax = pt.gcf().add_subplot(2,1,2)
X_grid, Y_grid = np.mgrid[-10:10:60j,-10:10:60j]
XY = np.array([X_grid.flatten(), Y_grid.flatten()])
C_XY = lv.f(XY)
ax.quiver(XY[0,:],XY[1,:],C_XY[0,:],C_XY[1,:],color=0.5*np.ones((1,3)),
scale=10,units='xy',angles='xy')
num_plot_samples = 3
sort_idx = np.argsort(bests)
plot_idx = [0] + list(np.random.permutation(num_samples)[:num_plot_samples-1])
samples = sort_idx[plot_idx]
# samples = [41,73,20] # all through global
# samples = [41, 97, 11] # two through global
# samples = [41, 49, 13] # two through global, one horiz not through
# samples = [41, 46, 70] # one through global, one horiz
# samples = [41, 96, 27] # two through global, one almost horiz
samples = [41, 63, 28] # two through global, all interesting
print("samples:")
print(samples)
for i,sample in enumerate(samples[::-1]):
with open("%s_s%d.npz"%(basename,sample), 'r') as rf: data = dict(np.load(rf))
fxpts = data["fxpts"]
fiber = data["fiber"][:,::]
L = lv.levy(fxpts).min()
col = 0.5*float(num_plot_samples-i-1)/num_plot_samples
print(sample,col)
ax.plot(fiber[0],fiber[1],color=(col,col,col,1), linestyle='-', linewidth=1)
pt.plot(fxpts[0],fxpts[1], 'o', color=(col,col,col,1))
pt.xlabel("v0")
pt.ylabel("v1",rotation=0)
pt.yticks(np.linspace(-10,10,5))
pt.xlim([-10,10])
pt.ylim([-10,10])
pt.tight_layout()
pt.show()
if __name__ == "__main__":
basename = "levy_opt"
num_samples = 100
num_plot_samples = 3
timeout = 60*30
num_procs = 10
# run_experiment(basename, num_samples=num_samples, timeout=timeout, num_procs=num_procs)
counts, bests, runtimes = compile_results(basename, num_samples)
plot_results(basename, num_samples, counts, bests, runtimes, timeout)
| mit |
pedro-aaron/stego-chi-2 | embeddingRgb.py | 1 | 2081 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Watermarkero, Mario, Ariel
"""
from PIL import Image
import random
import matplotlib.pyplot as plt
import numpy as np
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def marcarPixel(color, bitporinsertar):
if (color%2)==1:
if bitporinsertar==0:
color=color-1
elif (color%2)==0:
if bitporinsertar==1:
color=color+1
return color
def plotLsbRgb(img):
fig, (ax1, ax2) = plt.subplots(2, 1)
ax1.set_title('Imagen RGB')
ax1.imshow(img)
ax2.set_title('LSB RGB')
img=255*(img%2)
ax2.imshow(img)
plt.subplots_adjust(top=0.92, bottom=0.08, left=0.10,
right=0.95, hspace=0.3,wspace=0.35)
#imagen original
path="img3.jpg"
imgOriginal = np.array(Image.open(path))
nFilas, nCols, nCanales = imgOriginal.shape
#marca
key=41196
random.seed(key)
porcentajeDeimagenPorMarcar=50
sizeMarca = nCols*int(porcentajeDeimagenPorMarcar*(nFilas/100))
#marca = [random.randint(0,1) for i in range(sizeMarca)]
plotLsbRgb(imgOriginal)
#proceso de marcado
imgMarcada = imgOriginal.copy();
cont = 1 #contador del numero de bits inscrustados
#Proceso de incrustacion
for fila in range(0,nFilas):
for columna in range(0,nCols):
pixel=imgOriginal[fila,columna]
newPixel = [marcarPixel(
pixel[0],random.randint(0,1)),
marcarPixel(pixel[1],random.randint(0,1)),
marcarPixel(pixel[2],random.randint(0,1))]
imgMarcada[fila,columna] = newPixel
if cont >= sizeMarca:
break
cont = cont +1
if cont >= sizeMarca:
break
plotLsbRgb(imgMarcada)
image = Image.fromarray(imgMarcada, 'RGB')
image.save('ImagenMarcada.bmp')
print('Porciento de la imagen marcada: ' + str(porcentajeDeimagenPorMarcar)+'%')
print('bits incrustados: ' + str(sizeMarca*3))
print('Bytes incrustados: ' + str(sizeMarca*3/8))
print('KiloBytes incrustados: ' + str(sizeMarca*3/8/1024))
print('MegaBytes incrustados: ' + str(sizeMarca*3/8/1024/1024))
| mit |
DeercoderResearch/0.5-CoCo | PythonAPI/getFoodImage.py | 2 | 3639 | from pycocotools.coco import COCO
from write_xml import write_to_file
import numpy as np
import skimage.io as io
import matplotlib.pyplot as plt
import os
import shutil
dataDir='..'
dataType='val2014'
annFile='%s/annotations/instances_%s.json'%(dataDir,dataType)
coco=COCO(annFile) # load database
cats=coco.loadCats(coco.getCatIds())
print cats
foodCategory = []
foodCategoryId = []
foodImageId = []
for cat in cats:
if cat['supercategory'] == 'food':
foodCategory.append(cat['name'])
print cat['name']
foodCategoryId = coco.getCatIds(foodCategory)
foodImageId = coco.getImgIds(catIds=foodCategoryId) #must add catIds=
#print len(foodImageId)
dstdir = './JPEGImages/'
# Get all food images and copy them to JPEGImages folders.(#JPEGImage#)
for cat in range(0, len(foodImageId)):
img = coco.loadImgs(foodImageId[cat])[0]
img_name = '%s/images/%s/%s'%(dataDir,dataType,img['file_name'])
#print img_name
shutil.copy(img_name, dstdir)
# Generate the SegmentationObject/SegmentationClass (#Segmentation#)
dstdir = './SegmentationClass'
dstdir_2 = './SegmentationObject'
# Generate the configuration files for Annotation folders(#Annotation#)
# Move to the above the share the loop of image_names.
for cat in range(0, len(foodImageId)):
img = coco.loadImgs(foodImageId[cat])[0]
img_name = os.path.splitext(img['file_name'])[0]
img_annotation_xml_name ='./Annotations/%s.xml'%(img_name)
img_annotation_jpg_name ='./JPEGImages/%s.jpg'%(img_name)
# print img_annotation_xml_name
file = open(img_annotation_xml_name, "wb")
# def write_to_file(img_name,food_type, file_name, img_width, img_height,left_x, left_y, right_x, right_y):
img_width = img['width']
img_height = img['height']
## Now load annotation in order to get bbox, food type
ann_id = coco.getAnnIds(imgIds=img['id'])
print "ann_id"
print ann_id
## Note: for one image, there are multiple labels, find the food_label
ann = coco.loadAnns(ann_id)
for ann_food in ann:
ann_cat_id = ann_food['category_id']
ann_cat = coco.loadCats(ann_cat_id)[0]
if ann_cat['supercategory'] == 'food':
print ann_cat['name']
food_ann = ann_food
break
print "annotation"
print ann_food
bbox = ann_food['bbox']
catId = ann_food['category_id']
cat = coco.loadCats(catId)[0]
left_x = bbox[0]
left_y = bbox[1]
right_x = left_x + bbox[2]
right_y = left_y + bbox[3]
food_type = cat['name']
print img_annotation_jpg_name
if food_type == 'donut':
shutil.copy(img_annotation_jpg_name, "./donut/")
elif food_type == 'cake':
shutil.copy(img_annotation_jpg_name, "./cake/")
elif food_type == 'hot dog':
shutil.copy(img_annotation_jpg_name, "./hotdog/")
elif food_type == 'sandwich':
shutil.copy(img_annotation_jpg_name, "./sandwich/")
elif food_type == 'carrot':
shutil.copy(img_annotation_jpg_name, "./carrot/")
elif food_type == 'apple':
shutil.copy(img_annotation_jpg_name, "./apple/")
elif food_type == 'orange':
shutil.copy(img_annotation_jpg_name, "./orange/")
elif food_type == 'banana':
shutil.copy(img_annotation_jpg_name, "./banana/")
elif food_type == 'pizza':
shutil.copy(img_annotation_jpg_name, "./pizza/")
write_to_file(img_annotation_jpg_name, food_type, img_annotation_xml_name, str(img_width), str(img_height), str(left_x), str(left_y), str(right_x), str(right_y))
file.close()
### ??????
# Generat the configuration for ImageSet
img = coco.loadImgs(foodImageId[5])[0]
img_name = '%s/images/%s/%s'%(dataDir,dataType,img['file_name'])
I = io.imread(img_name)
#plt.figure()
#plt.imshow(I)
#plt.show()
# JUST FOR DEBUGGING
print foodCategory
print foodCategoryId
print foodImageId
print img_name
| bsd-2-clause |
planetarymike/IDL-Colorbars | IDL_py_test/018_Pastels.py | 1 | 5628 | from matplotlib.colors import LinearSegmentedColormap
from numpy import nan, inf
cm_data = [[1., 0., 0.282353],
[1., 0., 0.282353],
[1., 0., 0.290196],
[1., 0., 0.298039],
[1., 0., 0.305882],
[1., 0., 0.313725],
[1., 0., 0.321569],
[1., 0., 0.329412],
[1., 0., 0.337255],
[1., 0., 0.345098],
[1., 0., 0.352941],
[1., 0., 0.356863],
[1., 0., 0.364706],
[1., 0., 0.372549],
[1., 0., 0.380392],
[1., 0., 0.388235],
[1., 0., 0.396078],
[1., 0., 0.403922],
[1., 0., 0.411765],
[1., 0., 0.419608],
[1., 0., 0.427451],
[1., 0., 0.435294],
[1., 0., 0.443137],
[1., 0., 0.45098],
[1., 0., 0.458824],
[1., 0., 0.466667],
[1., 0., 0.47451],
[1., 0., 0.482353],
[1., 0., 0.490196],
[1., 0., 0.498039],
[1., 0., 0.505882],
[1., 0., 0.513725],
[1., 0., 0.521569],
[1., 0., 0.529412],
[1., 0., 0.537255],
[1., 0., 0.545098],
[1., 0., 0.552941],
[1., 0., 0.556863],
[1., 0., 0.564706],
[1., 0., 0.572549],
[1., 0., 0.580392],
[1., 0., 0.588235],
[1., 0., 0.596078],
[1., 0., 0.603922],
[1., 0., 0.611765],
[1., 0., 0.619608],
[1., 0., 0.627451],
[1., 0., 0.635294],
[1., 0., 0.643137],
[1., 0., 0.65098],
[1., 0., 0.658824],
[1., 0., 0.666667],
[1., 0., 0.67451],
[1., 0., 0.682353],
[1., 0., 0.690196],
[1., 0., 0.698039],
[1., 0., 0.705882],
[1., 0., 0.713725],
[1., 0., 0.721569],
[1., 0., 0.729412],
[1., 0., 0.737255],
[1., 0., 0.745098],
[1., 0., 0.74902],
[1., 0., 0.756863],
[1., 0., 0.764706],
[1., 0., 0.772549],
[1., 0., 0.780392],
[1., 0., 0.788235],
[1., 0., 0.796078],
[1., 0., 0.803922],
[1., 0., 0.811765],
[1., 0., 0.819608],
[1., 0., 0.827451],
[1., 0., 0.835294],
[1., 0., 0.843137],
[1., 0., 0.85098],
[1., 0., 0.858824],
[1., 0., 0.866667],
[1., 0., 0.87451],
[1., 0., 0.882353],
[1., 0., 0.890196],
[1., 0., 0.898039],
[1., 0., 0.905882],
[1., 0., 0.913725],
[1., 0., 0.921569],
[1., 0., 0.929412],
[1., 0., 0.937255],
[1., 0., 0.945098],
[1., 0., 0.94902],
[1., 0., 0.956863],
[1., 0., 0.964706],
[1., 0., 0.972549],
[1., 0., 0.980392],
[1., 0., 0.988235],
[1., 0., 0.996078],
[0.992157, 0., 1.],
[0.984314, 0., 1.],
[0.976471, 0., 1.],
[0.968627, 0., 1.],
[0.960784, 0., 1.],
[0.952941, 0., 1.],
[0.945098, 0., 1.],
[0.937255, 0., 1.],
[0.929412, 0., 1.],
[0.921569, 0., 1.],
[0.913725, 0., 1.],
[0.905882, 0., 1.],
[0.898039, 0., 1.],
[0.890196, 0., 1.],
[0.882353, 0., 1.],
[0.87451, 0., 1.],
[0.866667, 0., 1.],
[0.858824, 0., 1.],
[0.85098, 0., 1.],
[0.847059, 0., 1.],
[0.839216, 0., 1.],
[0.831373, 0., 1.],
[0.823529, 0., 1.],
[0.815686, 0., 1.],
[0.807843, 0., 1.],
[0.8, 0., 1.],
[0.792157, 0., 1.],
[0.784314, 0., 1.],
[0.776471, 0., 1.],
[0.768627, 0., 1.],
[0.760784, 0., 1.],
[0.752941, 0., 1.],
[0.745098, 0., 1.],
[0.737255, 0., 1.],
[0.729412, 0., 1.],
[0., 0.54902, 1.],
[0., 0.572549, 1.],
[0., 0.596078, 1.],
[0., 0.615686, 1.],
[0., 0.639216, 1.],
[0., 0.662745, 1.],
[0., 0.682353, 1.],
[0., 0.705882, 1.],
[0., 0.729412, 1.],
[0., 0.752941, 1.],
[0., 0.772549, 1.],
[0., 0.796078, 1.],
[0., 0.819608, 1.],
[0., 0.839216, 1.],
[0., 0.862745, 1.],
[0., 0.886275, 1.],
[0., 0.909804, 1.],
[0., 0.929412, 1.],
[0., 0.952941, 1.],
[0., 0.976471, 1.],
[0., 1., 1.],
[0., 1., 0.976471],
[0., 1., 0.952941],
[0., 1., 0.929412],
[0., 1., 0.909804],
[0., 1., 0.886275],
[0., 1., 0.862745],
[0., 1., 0.839216],
[0., 1., 0.819608],
[0., 1., 0.796078],
[0., 1., 0.772549],
[0., 1., 0.752941],
[0., 1., 0.729412],
[0., 1., 0.705882],
[0., 1., 0.682353],
[0., 1., 0.662745],
[0., 1., 0.639216],
[0., 1., 0.615686],
[0., 1., 0.596078],
[0., 1., 0.572549],
[0., 1., 0.54902],
[0., 1., 0.52549],
[0., 1., 0.505882],
[0., 1., 0.482353],
[0., 1., 0.458824],
[0., 1., 0.439216],
[0., 1., 0.415686],
[0., 1., 0.392157],
[0., 1., 0.368627],
[0., 1., 0.34902],
[0., 1., 0.32549],
[0., 1., 0.301961],
[0., 1., 0.278431],
[0., 1., 0.258824],
[0., 1., 0.235294],
[0., 1., 0.211765],
[0., 1., 0.192157],
[0., 1., 0.168627],
[0., 1., 0.145098],
[0., 1., 0.121569],
[0., 1., 0.101961],
[0., 1., 0.0784314],
[0., 1., 0.054902],
[0., 1., 0.0352941],
[0., 1., 0.0117647],
[0.00784314, 1., 0.],
[0.0313725, 1., 0.],
[0.0509804, 1., 0.],
[0.0745098, 1., 0.],
[0.0980392, 1., 0.],
[0.117647, 1., 0.],
[0.141176, 1., 0.],
[0.164706, 1., 0.],
[0.188235, 1., 0.],
[0.207843, 1., 0.],
[0.231373, 1., 0.],
[0.254902, 1., 0.],
[0.278431, 1., 0.],
[0.298039, 1., 0.],
[0.321569, 1., 0.],
[0.345098, 1., 0.],
[0.364706, 1., 0.],
[0.388235, 1., 0.],
[0.411765, 1., 0.],
[0.435294, 1., 0.],
[0.454902, 1., 0.],
[0.478431, 1., 0.],
[0.501961, 1., 0.],
[0.521569, 1., 0.],
[0.545098, 1., 0.],
[0.568627, 1., 0.],
[0.592157, 1., 0.],
[0.611765, 1., 0.],
[0.635294, 1., 0.],
[0.658824, 1., 0.],
[0.678431, 1., 0.],
[0.701961, 1., 0.],
[0.72549, 1., 0.],
[0.74902, 1., 0.],
[0.768627, 1., 0.],
[0.792157, 1., 0.],
[0.815686, 1., 0.],
[0.839216, 1., 0.],
[0.858824, 1., 0.],
[0.882353, 1., 0.],
[0.905882, 1., 0.],
[0.92549, 1., 0.],
[0.94902, 1., 0.],
[0.972549, 1., 0.],
[0.996078, 1., 0.],
[1., 0.980392, 0.],
[1., 0.956863, 0.],
[1., 0.933333, 0.],
[1., 0.913725, 0.],
[1., 0.890196, 0.],
[1., 0.866667, 0.],
[1., 0.843137, 0.],
[1., 0.823529, 0.],
[1., 0.8, 0.],
[1., 0.776471, 0.],
[1., 0.756863, 0.],
[1., 0.733333, 0.],
[1., 0.709804, 0.],
[1., 0.686275, 0.],
[1., 0.666667, 0.],
[1., 0.666667, 0.]]
test_cm = LinearSegmentedColormap.from_list(__file__, cm_data)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from pycam02ucs.cm.viscm import viscm
viscm(test_cm)
except ImportError:
print("pycam02ucs not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| gpl-2.0 |
tequa/ammisoft | ammimain/WinPython-64bit-2.7.13.1Zero/python-2.7.13.amd64/Lib/site-packages/matplotlib/backends/backend_wx.py | 4 | 65344 | """
A wxPython backend for matplotlib, based (very heavily) on
backend_template.py and backend_gtk.py
Author: Jeremy O'Donoghue (jeremy@o-donoghue.com)
Derived from original copyright work by John Hunter
(jdhunter@ace.bsd.uchicago.edu)
Copyright (C) Jeremy O'Donoghue & John Hunter, 2003-4
License: This work is licensed under a PSF compatible license. A copy
should be included with this source code.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from six.moves import xrange
import sys
import os
import os.path
import math
import weakref
import warnings
import numpy as np
import matplotlib
from matplotlib.backend_bases import (RendererBase, GraphicsContextBase,
FigureCanvasBase, FigureManagerBase, NavigationToolbar2,
cursors, TimerBase)
from matplotlib.backend_bases import ShowBase
from matplotlib.backend_bases import _has_pil
from matplotlib._pylab_helpers import Gcf
from matplotlib.cbook import (is_string_like, is_writable_file_like,
warn_deprecated)
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
from matplotlib.widgets import SubplotTool
from matplotlib import rcParams
from . import wx_compat as wxc
import wx
# Debugging settings here...
# Debug level set here. If the debug level is less than 5, information
# messages (progressively more info for lower value) are printed. In addition,
# traceback is performed, and pdb activated, for all uncaught exceptions in
# this case
_DEBUG = 5
if _DEBUG < 5:
import traceback
import pdb
_DEBUG_lvls = {1: 'Low ', 2: 'Med ', 3: 'High', 4: 'Error'}
def DEBUG_MSG(string, lvl=3, o=None):
if lvl >= _DEBUG:
cls = o.__class__
# Jeremy, often times the commented line won't print but the
# one below does. I think WX is redefining stderr, damned
# beast
#print >>sys.stderr, "%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls)
print("%s- %s in %s" % (_DEBUG_lvls[lvl], string, cls))
def debug_on_error(type, value, tb):
"""Code due to Thomas Heller - published in Python Cookbook (O'Reilley)"""
traceback.print_exc(type, value, tb)
print()
pdb.pm() # jdh uncomment
class fake_stderr(object):
"""
Wx does strange things with stderr, as it makes the assumption that
there is probably no console. This redirects stderr to the console, since
we know that there is one!
"""
def write(self, msg):
print("Stderr: %s\n\r" % msg)
#if _DEBUG < 5:
#sys.excepthook = debug_on_error
#WxLogger =wx.LogStderr()
#sys.stderr = fake_stderr
# the True dots per inch on the screen; should be display dependent
# see
# http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5
# for some info about screen dpi
PIXELS_PER_INCH = 75
# Delay time for idle checks
IDLE_DELAY = 5
def error_msg_wx(msg, parent=None):
"""
Signal an error condition -- in a GUI, popup a error dialog
"""
dialog = wx.MessageDialog(parent=parent,
message=msg,
caption='Matplotlib backend_wx error',
style=wx.OK | wx.CENTRE)
dialog.ShowModal()
dialog.Destroy()
return None
def raise_msg_to_str(msg):
"""msg is a return arg from a raise. Join with new lines"""
if not is_string_like(msg):
msg = '\n'.join(map(str, msg))
return msg
class TimerWx(TimerBase):
'''
Subclass of :class:`backend_bases.TimerBase` that uses WxTimer events.
Attributes:
* interval: The time between timer events in milliseconds. Default
is 1000 ms.
* single_shot: Boolean flag indicating whether this timer should
operate as single shot (run once and then stop). Defaults to False.
* callbacks: Stores list of (func, args) tuples that will be called
upon timer events. This list can be manipulated directly, or the
functions add_callback and remove_callback can be used.
'''
def __init__(self, parent, *args, **kwargs):
TimerBase.__init__(self, *args, **kwargs)
# Create a new timer and connect the timer event to our handler.
# For WX, the events have to use a widget for binding.
self.parent = parent
self._timer = wx.Timer(self.parent, wx.NewId())
self.parent.Bind(wx.EVT_TIMER, self._on_timer, self._timer)
# Unbinding causes Wx to stop for some reason. Disabling for now.
# def __del__(self):
# TimerBase.__del__(self)
# self.parent.Bind(wx.EVT_TIMER, None, self._timer)
def _timer_start(self):
self._timer.Start(self._interval, self._single)
def _timer_stop(self):
self._timer.Stop()
def _timer_set_interval(self):
self._timer_start()
def _timer_set_single_shot(self):
self._timer.Start()
def _on_timer(self, *args):
TimerBase._on_timer(self)
class RendererWx(RendererBase):
"""
The renderer handles all the drawing primitives using a graphics
context instance that controls the colors/styles. It acts as the
'renderer' instance used by many classes in the hierarchy.
"""
# In wxPython, drawing is performed on a wxDC instance, which will
# generally be mapped to the client aread of the window displaying
# the plot. Under wxPython, the wxDC instance has a wx.Pen which
# describes the colour and weight of any lines drawn, and a wxBrush
# which describes the fill colour of any closed polygon.
fontweights = wxc.fontweights
fontangles = wxc.fontangles
# wxPython allows for portable font styles, choosing them appropriately
# for the target platform. Map some standard font names to the portable
# styles
# QUESTION: Is it be wise to agree standard fontnames across all backends?
fontnames = wxc.fontnames
def __init__(self, bitmap, dpi):
"""
Initialise a wxWindows renderer instance.
"""
warn_deprecated('2.0', message="The WX backend is "
"deprecated. It's untested "
"and will be removed in Matplotlib 2.2. "
"Use the WXAgg backend instead. "
"See Matplotlib usage FAQ for more info on backends.",
alternative='WXAgg')
RendererBase.__init__(self)
DEBUG_MSG("__init__()", 1, self)
self.width = bitmap.GetWidth()
self.height = bitmap.GetHeight()
self.bitmap = bitmap
self.fontd = {}
self.dpi = dpi
self.gc = None
def flipy(self):
return True
def offset_text_height(self):
return True
def get_text_width_height_descent(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop
"""
# return 1, 1
if ismath:
s = self.strip_math(s)
if self.gc is None:
gc = self.new_gc()
else:
gc = self.gc
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
gfx_ctx.SetFont(font, wx.BLACK)
w, h, descent, leading = gfx_ctx.GetFullTextExtent(s)
return w, h, descent
def get_canvas_width_height(self):
'return the canvas width and height in display coords'
return self.width, self.height
def handle_clip_rectangle(self, gc):
new_bounds = gc.get_clip_rectangle()
if new_bounds is not None:
new_bounds = new_bounds.bounds
gfx_ctx = gc.gfx_ctx
if gfx_ctx._lastcliprect != new_bounds:
gfx_ctx._lastcliprect = new_bounds
if new_bounds is None:
gfx_ctx.ResetClip()
else:
gfx_ctx.Clip(new_bounds[0],
self.height - new_bounds[1] - new_bounds[3],
new_bounds[2], new_bounds[3])
@staticmethod
def convert_path(gfx_ctx, path, transform):
wxpath = gfx_ctx.CreatePath()
for points, code in path.iter_segments(transform):
if code == Path.MOVETO:
wxpath.MoveToPoint(*points)
elif code == Path.LINETO:
wxpath.AddLineToPoint(*points)
elif code == Path.CURVE3:
wxpath.AddQuadCurveToPoint(*points)
elif code == Path.CURVE4:
wxpath.AddCurveToPoint(*points)
elif code == Path.CLOSEPOLY:
wxpath.CloseSubpath()
return wxpath
def draw_path(self, gc, path, transform, rgbFace=None):
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
transform = transform + \
Affine2D().scale(1.0, -1.0).translate(0.0, self.height)
wxpath = self.convert_path(gfx_ctx, path, transform)
if rgbFace is not None:
gfx_ctx.SetBrush(wx.Brush(gc.get_wxcolour(rgbFace)))
gfx_ctx.DrawPath(wxpath)
else:
gfx_ctx.StrokePath(wxpath)
gc.unselect()
def draw_image(self, gc, x, y, im):
bbox = gc.get_clip_rectangle()
if bbox is not None:
l, b, w, h = bbox.bounds
else:
l = 0
b = 0
w = self.width
h = self.height
rows, cols = im.shape[:2]
bitmap = wxc.BitmapFromBuffer(cols, rows, im.tostring())
gc = self.get_gc()
gc.select()
gc.gfx_ctx.DrawBitmap(bitmap, int(l), int(self.height - b),
int(w), int(-h))
gc.unselect()
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
if ismath:
s = self.strip_math(s)
DEBUG_MSG("draw_text()", 1, self)
gc.select()
self.handle_clip_rectangle(gc)
gfx_ctx = gc.gfx_ctx
font = self.get_wx_font(s, prop)
color = gc.get_wxcolour(gc.get_rgb())
gfx_ctx.SetFont(font, color)
w, h, d = self.get_text_width_height_descent(s, prop, ismath)
x = int(x)
y = int(y - h)
if angle == 0.0:
gfx_ctx.DrawText(s, x, y)
else:
rads = angle / 180.0 * math.pi
xo = h * math.sin(rads)
yo = h * math.cos(rads)
gfx_ctx.DrawRotatedText(s, x - xo, y - yo, rads)
gc.unselect()
def new_gc(self):
"""
Return an instance of a GraphicsContextWx, and sets the current gc copy
"""
DEBUG_MSG('new_gc()', 2, self)
self.gc = GraphicsContextWx(self.bitmap, self)
self.gc.select()
self.gc.unselect()
return self.gc
def get_gc(self):
"""
Fetch the locally cached gc.
"""
# This is a dirty hack to allow anything with access to a renderer to
# access the current graphics context
assert self.gc is not None, "gc must be defined"
return self.gc
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font = wx.Font(int(size + 0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font
def points_to_pixels(self, points):
"""
convert point measures to pixes using dpi and the pixels per
inch of the display
"""
return points * (PIXELS_PER_INCH / 72.0 * self.dpi / 72.0)
class GraphicsContextWx(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc...
This class stores a reference to a wxMemoryDC, and a
wxGraphicsContext that draws to it. Creating a wxGraphicsContext
seems to be fairly heavy, so these objects are cached based on the
bitmap object that is passed in.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, e.g., (0.5, 0.0, 1.0). wxPython uses an int interval, but
since wxPython colour management is rather simple, I have not chosen
to implement a separate colour manager class.
"""
_capd = {'butt': wx.CAP_BUTT,
'projecting': wx.CAP_PROJECTING,
'round': wx.CAP_ROUND}
_joind = {'bevel': wx.JOIN_BEVEL,
'miter': wx.JOIN_MITER,
'round': wx.JOIN_ROUND}
_dashd_wx = wxc.dashd_wx
_cache = weakref.WeakKeyDictionary()
def __init__(self, bitmap, renderer):
GraphicsContextBase.__init__(self)
#assert self.Ok(), "wxMemoryDC not OK to use"
DEBUG_MSG("__init__()", 1, self)
DEBUG_MSG("__init__() 2: %s" % bitmap, 1, self)
dc, gfx_ctx = self._cache.get(bitmap, (None, None))
if dc is None:
dc = wx.MemoryDC()
dc.SelectObject(bitmap)
gfx_ctx = wx.GraphicsContext.Create(dc)
gfx_ctx._lastcliprect = None
self._cache[bitmap] = dc, gfx_ctx
self.bitmap = bitmap
self.dc = dc
self.gfx_ctx = gfx_ctx
self._pen = wx.Pen('BLACK', 1, wx.SOLID)
gfx_ctx.SetPen(self._pen)
self._style = wx.SOLID
self.renderer = renderer
def select(self):
"""
Select the current bitmap into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(self.bitmap)
self.IsSelected = True
def unselect(self):
"""
Select a Null bitmasp into this wxDC instance
"""
if sys.platform == 'win32':
self.dc.SelectObject(wx.NullBitmap)
self.IsSelected = False
def set_foreground(self, fg, isRGBA=None):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
# Implementation note: wxPython has a separate concept of pen and
# brush - the brush fills any outline trace left by the pen.
# Here we set both to the same colour - if a figure is not to be
# filled, the renderer will set the brush to be transparent
# Same goes for text foreground...
DEBUG_MSG("set_foreground()", 1, self)
self.select()
GraphicsContextBase.set_foreground(self, fg, isRGBA)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_graylevel(self, frac):
"""
Set the foreground color. fg can be a matlab format string, a
html hex color string, an rgb unit tuple, or a float between 0
and 1. In the latter case, grayscale is used.
"""
DEBUG_MSG("set_graylevel()", 1, self)
self.select()
GraphicsContextBase.set_graylevel(self, frac)
self._pen.SetColour(self.get_wxcolour(self.get_rgb()))
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linewidth(self, w):
"""
Set the line width.
"""
w = float(w)
DEBUG_MSG("set_linewidth()", 1, self)
self.select()
if w > 0 and w < 1:
w = 1
GraphicsContextBase.set_linewidth(self, w)
lw = int(self.renderer.points_to_pixels(self._linewidth))
if lw == 0:
lw = 1
self._pen.SetWidth(lw)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_capstyle(self, cs):
"""
Set the capstyle as a string in ('butt', 'round', 'projecting')
"""
DEBUG_MSG("set_capstyle()", 1, self)
self.select()
GraphicsContextBase.set_capstyle(self, cs)
self._pen.SetCap(GraphicsContextWx._capd[self._capstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_joinstyle(self, js):
"""
Set the join style to be one of ('miter', 'round', 'bevel')
"""
DEBUG_MSG("set_joinstyle()", 1, self)
self.select()
GraphicsContextBase.set_joinstyle(self, js)
self._pen.SetJoin(GraphicsContextWx._joind[self._joinstyle])
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def set_linestyle(self, ls):
"""
Set the line style to be one of
"""
DEBUG_MSG("set_linestyle()", 1, self)
self.select()
GraphicsContextBase.set_linestyle(self, ls)
try:
self._style = GraphicsContextWx._dashd_wx[ls]
except KeyError:
self._style = wx.LONG_DASH # Style not used elsewhere...
# On MS Windows platform, only line width of 1 allowed for dash lines
if wx.Platform == '__WXMSW__':
self.set_linewidth(1)
self._pen.SetStyle(self._style)
self.gfx_ctx.SetPen(self._pen)
self.unselect()
def get_wxcolour(self, color):
"""return a wx.Colour from RGB format"""
DEBUG_MSG("get_wx_color()", 1, self)
if len(color) == 3:
r, g, b = color
r *= 255
g *= 255
b *= 255
return wx.Colour(red=int(r), green=int(g), blue=int(b))
else:
r, g, b, a = color
r *= 255
g *= 255
b *= 255
a *= 255
return wx.Colour(
red=int(r),
green=int(g),
blue=int(b),
alpha=int(a))
class FigureCanvasWx(FigureCanvasBase, wx.Panel):
"""
The FigureCanvas contains the figure and does event handling.
In the wxPython backend, it is derived from wxPanel, and (usually) lives
inside a frame instantiated by a FigureManagerWx. The parent window
probably implements a wx.Sizer to control the displayed control size - but
we give a hint as to our preferred minimum size.
"""
keyvald = {
wx.WXK_CONTROL: 'control',
wx.WXK_SHIFT: 'shift',
wx.WXK_ALT: 'alt',
wx.WXK_LEFT: 'left',
wx.WXK_UP: 'up',
wx.WXK_RIGHT: 'right',
wx.WXK_DOWN: 'down',
wx.WXK_ESCAPE: 'escape',
wx.WXK_F1: 'f1',
wx.WXK_F2: 'f2',
wx.WXK_F3: 'f3',
wx.WXK_F4: 'f4',
wx.WXK_F5: 'f5',
wx.WXK_F6: 'f6',
wx.WXK_F7: 'f7',
wx.WXK_F8: 'f8',
wx.WXK_F9: 'f9',
wx.WXK_F10: 'f10',
wx.WXK_F11: 'f11',
wx.WXK_F12: 'f12',
wx.WXK_SCROLL: 'scroll_lock',
wx.WXK_PAUSE: 'break',
wx.WXK_BACK: 'backspace',
wx.WXK_RETURN: 'enter',
wx.WXK_INSERT: 'insert',
wx.WXK_DELETE: 'delete',
wx.WXK_HOME: 'home',
wx.WXK_END: 'end',
wx.WXK_PAGEUP: 'pageup',
wx.WXK_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD0: '0',
wx.WXK_NUMPAD1: '1',
wx.WXK_NUMPAD2: '2',
wx.WXK_NUMPAD3: '3',
wx.WXK_NUMPAD4: '4',
wx.WXK_NUMPAD5: '5',
wx.WXK_NUMPAD6: '6',
wx.WXK_NUMPAD7: '7',
wx.WXK_NUMPAD8: '8',
wx.WXK_NUMPAD9: '9',
wx.WXK_NUMPAD_ADD: '+',
wx.WXK_NUMPAD_SUBTRACT: '-',
wx.WXK_NUMPAD_MULTIPLY: '*',
wx.WXK_NUMPAD_DIVIDE: '/',
wx.WXK_NUMPAD_DECIMAL: 'dec',
wx.WXK_NUMPAD_ENTER: 'enter',
wx.WXK_NUMPAD_UP: 'up',
wx.WXK_NUMPAD_RIGHT: 'right',
wx.WXK_NUMPAD_DOWN: 'down',
wx.WXK_NUMPAD_LEFT: 'left',
wx.WXK_NUMPAD_PAGEUP: 'pageup',
wx.WXK_NUMPAD_PAGEDOWN: 'pagedown',
wx.WXK_NUMPAD_HOME: 'home',
wx.WXK_NUMPAD_END: 'end',
wx.WXK_NUMPAD_INSERT: 'insert',
wx.WXK_NUMPAD_DELETE: 'delete',
}
def __init__(self, parent, id, figure):
"""
Initialise a FigureWx instance.
- Initialise the FigureCanvasBase and wxPanel parents.
- Set event handlers for:
EVT_SIZE (Resize event)
EVT_PAINT (Paint event)
"""
FigureCanvasBase.__init__(self, figure)
# Set preferred window size hint - helps the sizer (if one is
# connected)
l, b, w, h = figure.bbox.bounds
w = int(math.ceil(w))
h = int(math.ceil(h))
wx.Panel.__init__(self, parent, id, size=wx.Size(w, h))
def do_nothing(*args, **kwargs):
warnings.warn(
"could not find a setinitialsize function for backend_wx; "
"please report your wxpython version=%s "
"to the matplotlib developers list" %
wxc.backend_version)
pass
# try to find the set size func across wx versions
try:
getattr(self, 'SetInitialSize')
except AttributeError:
self.SetInitialSize = getattr(self, 'SetBestFittingSize',
do_nothing)
if not hasattr(self, 'IsShownOnScreen'):
self.IsShownOnScreen = getattr(self, 'IsVisible',
lambda *args: True)
# Create the drawing bitmap
self.bitmap = wxc.EmptyBitmap(w, h)
DEBUG_MSG("__init__() - bitmap w:%d h:%d" % (w, h), 2, self)
# TODO: Add support for 'point' inspection and plot navigation.
self._isDrawn = False
self.Bind(wx.EVT_SIZE, self._onSize)
self.Bind(wx.EVT_PAINT, self._onPaint)
self.Bind(wx.EVT_KEY_DOWN, self._onKeyDown)
self.Bind(wx.EVT_KEY_UP, self._onKeyUp)
self.Bind(wx.EVT_RIGHT_DOWN, self._onRightButtonDown)
self.Bind(wx.EVT_RIGHT_DCLICK, self._onRightButtonDClick)
self.Bind(wx.EVT_RIGHT_UP, self._onRightButtonUp)
self.Bind(wx.EVT_MOUSEWHEEL, self._onMouseWheel)
self.Bind(wx.EVT_LEFT_DOWN, self._onLeftButtonDown)
self.Bind(wx.EVT_LEFT_DCLICK, self._onLeftButtonDClick)
self.Bind(wx.EVT_LEFT_UP, self._onLeftButtonUp)
self.Bind(wx.EVT_MOTION, self._onMotion)
self.Bind(wx.EVT_LEAVE_WINDOW, self._onLeave)
self.Bind(wx.EVT_ENTER_WINDOW, self._onEnter)
self.Bind(wx.EVT_IDLE, self._onIdle)
# Add middle button events
self.Bind(wx.EVT_MIDDLE_DOWN, self._onMiddleButtonDown)
self.Bind(wx.EVT_MIDDLE_DCLICK, self._onMiddleButtonDClick)
self.Bind(wx.EVT_MIDDLE_UP, self._onMiddleButtonUp)
self.Bind(wx.EVT_MOUSE_CAPTURE_CHANGED, self._onCaptureLost)
self.Bind(wx.EVT_MOUSE_CAPTURE_LOST, self._onCaptureLost)
if wx.VERSION_STRING < "2.9":
# only needed in 2.8 to reduce flicker
self.SetBackgroundStyle(wx.BG_STYLE_CUSTOM)
self.Bind(wx.EVT_ERASE_BACKGROUND, self._onEraseBackground)
else:
# this does the same in 2.9+
self.SetBackgroundStyle(wx.BG_STYLE_PAINT)
self.macros = {} # dict from wx id to seq of macros
def Destroy(self, *args, **kwargs):
wx.Panel.Destroy(self, *args, **kwargs)
def Copy_to_Clipboard(self, event=None):
"copy bitmap of canvas to system clipboard"
bmp_obj = wx.BitmapDataObject()
bmp_obj.SetBitmap(self.bitmap)
if not wx.TheClipboard.IsOpened():
open_success = wx.TheClipboard.Open()
if open_success:
wx.TheClipboard.SetData(bmp_obj)
wx.TheClipboard.Close()
wx.TheClipboard.Flush()
def draw_idle(self):
"""
Delay rendering until the GUI is idle.
"""
DEBUG_MSG("draw_idle()", 1, self)
self._isDrawn = False # Force redraw
# Triggering a paint event is all that is needed to defer drawing
# until later. The platform will send the event when it thinks it is
# a good time (usually as soon as there are no other events pending).
self.Refresh(eraseBackground=False)
def draw(self, drawDC=None):
"""
Render the figure using RendererWx instance renderer, or using a
previously defined renderer if none is specified.
"""
DEBUG_MSG("draw()", 1, self)
self.renderer = RendererWx(self.bitmap, self.figure.dpi)
self.figure.draw(self.renderer)
self._isDrawn = True
self.gui_repaint(drawDC=drawDC)
def new_timer(self, *args, **kwargs):
"""
Creates a new backend-specific subclass of
:class:`backend_bases.Timer`. This is useful for getting periodic
events through the backend's native event loop. Implemented only
for backends with GUIs.
optional arguments:
*interval*
Timer interval in milliseconds
*callbacks*
Sequence of (func, args, kwargs) where func(*args, **kwargs) will
be executed by the timer every *interval*.
"""
return TimerWx(self, *args, **kwargs)
def flush_events(self):
wx.Yield()
def start_event_loop(self, timeout=0):
"""
Start an event loop. This is used to start a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events. This should not be
confused with the main GUI event loop, which is always running
and has nothing to do with this.
This call blocks until a callback function triggers
stop_event_loop() or *timeout* is reached. If *timeout* is
<=0, never timeout.
Raises RuntimeError if event loop is already running.
"""
if hasattr(self, '_event_loop'):
raise RuntimeError("Event loop already running")
id = wx.NewId()
timer = wx.Timer(self, id=id)
if timeout > 0:
timer.Start(timeout * 1000, oneShot=True)
self.Bind(wx.EVT_TIMER, self.stop_event_loop, id=id)
# Event loop handler for start/stop event loop
self._event_loop = wxc.EventLoop()
self._event_loop.Run()
timer.Stop()
def stop_event_loop(self, event=None):
"""
Stop an event loop. This is used to stop a blocking event
loop so that interactive functions, such as ginput and
waitforbuttonpress, can wait for events.
"""
if hasattr(self, '_event_loop'):
if self._event_loop.IsRunning():
self._event_loop.Exit()
del self._event_loop
def _get_imagesave_wildcards(self):
'return the wildcard string for the filesave dialog'
default_filetype = self.get_default_filetype()
filetypes = self.get_supported_filetypes_grouped()
sorted_filetypes = sorted(filetypes.items())
wildcards = []
extensions = []
filter_index = 0
for i, (name, exts) in enumerate(sorted_filetypes):
ext_list = ';'.join(['*.%s' % ext for ext in exts])
extensions.append(exts[0])
wildcard = '%s (%s)|%s' % (name, ext_list, ext_list)
if default_filetype in exts:
filter_index = i
wildcards.append(wildcard)
wildcards = '|'.join(wildcards)
return wildcards, extensions, filter_index
def gui_repaint(self, drawDC=None, origin='WX'):
"""
Performs update of the displayed image on the GUI canvas, using the
supplied wx.PaintDC device context.
The 'WXAgg' backend sets origin accordingly.
"""
DEBUG_MSG("gui_repaint()", 1, self)
if self.IsShownOnScreen():
if not drawDC:
# not called from OnPaint use a ClientDC
drawDC = wx.ClientDC(self)
# following is for 'WX' backend on Windows
# the bitmap can not be in use by another DC,
# see GraphicsContextWx._cache
if wx.Platform == '__WXMSW__' and origin == 'WX':
img = self.bitmap.ConvertToImage()
bmp = img.ConvertToBitmap()
drawDC.DrawBitmap(bmp, 0, 0)
else:
drawDC.DrawBitmap(self.bitmap, 0, 0)
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['bmp'] = 'Windows bitmap'
filetypes['jpeg'] = 'JPEG'
filetypes['jpg'] = 'JPEG'
filetypes['pcx'] = 'PCX'
filetypes['png'] = 'Portable Network Graphics'
filetypes['tif'] = 'Tagged Image Format File'
filetypes['tiff'] = 'Tagged Image Format File'
filetypes['xpm'] = 'X pixmap'
def print_figure(self, filename, *args, **kwargs):
# Use pure Agg renderer to draw
FigureCanvasBase.print_figure(self, filename, *args, **kwargs)
# Restore the current view; this is needed because the
# artist contains methods rely on particular attributes
# of the rendered figure for determining things like
# bounding boxes.
if self._isDrawn:
self.draw()
def print_bmp(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_BMP, *args, **kwargs)
if not _has_pil:
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_JPEG,
*args, **kwargs)
print_jpg = print_jpeg
def print_pcx(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PCX, *args, **kwargs)
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_PNG, *args, **kwargs)
if not _has_pil:
def print_tiff(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_TIF,
*args, **kwargs)
print_tif = print_tiff
def print_xpm(self, filename, *args, **kwargs):
return self._print_image(filename, wx.BITMAP_TYPE_XPM, *args, **kwargs)
def _print_image(self, filename, filetype, *args, **kwargs):
origBitmap = self.bitmap
l, b, width, height = self.figure.bbox.bounds
width = int(math.ceil(width))
height = int(math.ceil(height))
self.bitmap = wxc.EmptyBitmap(width, height)
renderer = RendererWx(self.bitmap, self.figure.dpi)
gc = renderer.new_gc()
self.figure.draw(renderer)
# image is the object that we call SaveFile on.
image = self.bitmap
# set the JPEG quality appropriately. Unfortunately, it is only
# possible to set the quality on a wx.Image object. So if we
# are saving a JPEG, convert the wx.Bitmap to a wx.Image,
# and set the quality.
if filetype == wx.BITMAP_TYPE_JPEG:
jpeg_quality = kwargs.get('quality',
rcParams['savefig.jpeg_quality'])
image = self.bitmap.ConvertToImage()
image.SetOption(wx.IMAGE_OPTION_QUALITY, str(jpeg_quality))
# Now that we have rendered into the bitmap, save it
# to the appropriate file type and clean up
if is_string_like(filename):
if not image.SaveFile(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
elif is_writable_file_like(filename):
if not isinstance(image, wx.Image):
image = image.ConvertToImage()
if not image.SaveStream(filename, filetype):
DEBUG_MSG('print_figure() file save error', 4, self)
raise RuntimeError(
'Could not save figure to %s\n' %
(filename))
# Restore everything to normal
self.bitmap = origBitmap
# Note: draw is required here since bits of state about the
# last renderer are strewn about the artist draw methods. Do
# not remove the draw without first verifying that these have
# been cleaned up. The artist contains() methods will fail
# otherwise.
if self._isDrawn:
self.draw()
self.Refresh()
def _onPaint(self, evt):
"""
Called when wxPaintEvt is generated
"""
DEBUG_MSG("_onPaint()", 1, self)
drawDC = wx.PaintDC(self)
if not self._isDrawn:
self.draw(drawDC=drawDC)
else:
self.gui_repaint(drawDC=drawDC)
evt.Skip()
def _onEraseBackground(self, evt):
"""
Called when window is redrawn; since we are blitting the entire
image, we can leave this blank to suppress flicker.
"""
pass
def _onSize(self, evt):
"""
Called when wxEventSize is generated.
In this application we attempt to resize to fit the window, so it
is better to take the performance hit and redraw the whole window.
"""
DEBUG_MSG("_onSize()", 2, self)
# Create a new, correctly sized bitmap
self._width, self._height = self.GetClientSize()
self.bitmap = wxc.EmptyBitmap(self._width, self._height)
self._isDrawn = False
if self._width <= 1 or self._height <= 1:
return # Empty figure
dpival = self.figure.dpi
winch = self._width / dpival
hinch = self._height / dpival
self.figure.set_size_inches(winch, hinch, forward=False)
# Rendering will happen on the associated paint event
# so no need to do anything here except to make sure
# the whole background is repainted.
self.Refresh(eraseBackground=False)
FigureCanvasBase.resize_event(self)
def _get_key(self, evt):
keyval = evt.KeyCode
if keyval in self.keyvald:
key = self.keyvald[keyval]
elif keyval < 256:
key = chr(keyval)
# wx always returns an uppercase, so make it lowercase if the shift
# key is not depressed (NOTE: this will not handle Caps Lock)
if not evt.ShiftDown():
key = key.lower()
else:
key = None
for meth, prefix in (
[evt.AltDown, 'alt'],
[evt.ControlDown, 'ctrl'], ):
if meth():
key = '{0}+{1}'.format(prefix, key)
return key
def _onIdle(self, evt):
'a GUI idle event'
evt.Skip()
FigureCanvasBase.idle_event(self, guiEvent=evt)
def _onKeyDown(self, evt):
"""Capture key press."""
key = self._get_key(evt)
evt.Skip()
FigureCanvasBase.key_press_event(self, key, guiEvent=evt)
def _onKeyUp(self, evt):
"""Release key."""
key = self._get_key(evt)
# print 'release key', key
evt.Skip()
FigureCanvasBase.key_release_event(self, key, guiEvent=evt)
def _set_capture(self, capture=True):
"""control wx mouse capture """
if self.HasCapture():
self.ReleaseMouse()
if capture:
self.CaptureMouse()
def _onCaptureLost(self, evt):
"""Capture changed or lost"""
self._set_capture(False)
def _onRightButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 3, guiEvent=evt)
def _onRightButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 3,
dblclick=True, guiEvent=evt)
def _onRightButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 3, guiEvent=evt)
def _onLeftButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 1, guiEvent=evt)
def _onLeftButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 1,
dblclick=True, guiEvent=evt)
def _onLeftButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 1, guiEvent=evt)
# Add middle button events
def _onMiddleButtonDown(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 2, guiEvent=evt)
def _onMiddleButtonDClick(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
self._set_capture(True)
FigureCanvasBase.button_press_event(self, x, y, 2,
dblclick=True, guiEvent=evt)
def _onMiddleButtonUp(self, evt):
"""End measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# print 'release button', 1
evt.Skip()
self._set_capture(False)
FigureCanvasBase.button_release_event(self, x, y, 2, guiEvent=evt)
def _onMouseWheel(self, evt):
"""Translate mouse wheel events into matplotlib events"""
# Determine mouse location
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
# Convert delta/rotation/rate into a floating point step size
delta = evt.GetWheelDelta()
rotation = evt.GetWheelRotation()
rate = evt.GetLinesPerAction()
# print "delta,rotation,rate",delta,rotation,rate
step = rate * float(rotation) / delta
# Done handling event
evt.Skip()
# Mac is giving two events for every wheel event
# Need to skip every second one
if wx.Platform == '__WXMAC__':
if not hasattr(self, '_skipwheelevent'):
self._skipwheelevent = True
elif self._skipwheelevent:
self._skipwheelevent = False
return # Return without processing event
else:
self._skipwheelevent = True
# Convert to mpl event
FigureCanvasBase.scroll_event(self, x, y, step, guiEvent=evt)
def _onMotion(self, evt):
"""Start measuring on an axis."""
x = evt.GetX()
y = self.figure.bbox.height - evt.GetY()
evt.Skip()
FigureCanvasBase.motion_notify_event(self, x, y, guiEvent=evt)
def _onLeave(self, evt):
"""Mouse has left the window."""
evt.Skip()
FigureCanvasBase.leave_notify_event(self, guiEvent=evt)
def _onEnter(self, evt):
"""Mouse has entered the window."""
FigureCanvasBase.enter_notify_event(self, guiEvent=evt)
########################################################################
#
# The following functions and classes are for pylab compatibility
# mode (matplotlib.pylab) and implement figure managers, etc...
#
########################################################################
def _create_wx_app():
"""
Creates a wx.App instance if it has not been created sofar.
"""
wxapp = wx.GetApp()
if wxapp is None:
wxapp = wx.App(False)
wxapp.SetExitOnFrameDelete(True)
# retain a reference to the app object so it does not get garbage
# collected and cause segmentation faults
_create_wx_app.theWxApp = wxapp
def draw_if_interactive():
"""
This should be overriden in a windowing environment if drawing
should be done in interactive python mode
"""
DEBUG_MSG("draw_if_interactive()", 1, None)
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw_idle()
class Show(ShowBase):
def mainloop(self):
needmain = not wx.App.IsMainLoopRunning()
if needmain:
wxapp = wx.GetApp()
if wxapp is not None:
wxapp.MainLoop()
show = Show()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# in order to expose the Figure constructor to the pylab
# interface we need to create the figure here
DEBUG_MSG("new_figure_manager()", 3, None)
_create_wx_app()
FigureClass = kwargs.pop('FigureClass', Figure)
fig = FigureClass(*args, **kwargs)
return new_figure_manager_given_figure(num, fig)
def new_figure_manager_given_figure(num, figure):
"""
Create a new figure manager instance for the given figure.
"""
fig = figure
frame = FigureFrameWx(num, fig)
figmgr = frame.get_figure_manager()
if matplotlib.is_interactive():
figmgr.frame.Show()
figure.canvas.draw_idle()
return figmgr
class FigureFrameWx(wx.Frame):
def __init__(self, num, fig):
# On non-Windows platform, explicitly set the position - fix
# positioning bug on some Linux platforms
if wx.Platform == '__WXMSW__':
pos = wx.DefaultPosition
else:
pos = wx.Point(20, 20)
l, b, w, h = fig.bbox.bounds
wx.Frame.__init__(self, parent=None, id=-1, pos=pos,
title="Figure %d" % num)
# Frame will be sized later by the Fit method
DEBUG_MSG("__init__()", 1, self)
self.num = num
statbar = StatusBarWx(self)
self.SetStatusBar(statbar)
self.canvas = self.get_canvas(fig)
self.canvas.SetInitialSize(wx.Size(fig.bbox.width, fig.bbox.height))
self.canvas.SetFocus()
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.canvas, 1, wx.TOP | wx.LEFT | wx.EXPAND)
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version
self.toolbar = self._get_toolbar(statbar)
if self.toolbar is not None:
self.toolbar.Realize()
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
if wxc.is_phoenix:
tw, th = self.toolbar.GetSize()
fw, fh = self.canvas.GetSize()
else:
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Fit()
self.canvas.SetMinSize((2, 2))
# give the window a matplotlib icon rather than the stock one.
# This is not currently working on Linux and is untested elsewhere.
# icon_path = os.path.join(matplotlib.rcParams['datapath'],
# 'images', 'matplotlib.png')
#icon = wx.IconFromBitmap(wx.Bitmap(icon_path))
# for xpm type icons try:
#icon = wx.Icon(icon_path, wx.BITMAP_TYPE_XPM)
# self.SetIcon(icon)
self.figmgr = FigureManagerWx(self.canvas, num, self)
self.Bind(wx.EVT_CLOSE, self._onClose)
def _get_toolbar(self, statbar):
if rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2Wx(self.canvas)
toolbar.set_status_bar(statbar)
else:
toolbar = None
return toolbar
def get_canvas(self, fig):
return FigureCanvasWx(self, -1, fig)
def get_figure_manager(self):
DEBUG_MSG("get_figure_manager()", 1, self)
return self.figmgr
def _onClose(self, evt):
DEBUG_MSG("onClose()", 1, self)
self.canvas.close_event()
self.canvas.stop_event_loop()
Gcf.destroy(self.num)
# self.Destroy()
def GetToolBar(self):
"""Override wxFrame::GetToolBar as we don't have managed toolbar"""
return self.toolbar
def Destroy(self, *args, **kwargs):
try:
self.canvas.mpl_disconnect(self.toolbar._idDrag)
# Rationale for line above: see issue 2941338.
except AttributeError:
pass # classic toolbar lacks the attribute
if not self.IsBeingDeleted():
wx.Frame.Destroy(self, *args, **kwargs)
if self.toolbar is not None:
self.toolbar.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
return True
class FigureManagerWx(FigureManagerBase):
"""
This class contains the FigureCanvas and GUI frame
It is instantiated by GcfWx whenever a new figure is created. GcfWx is
responsible for managing multiple instances of FigureManagerWx.
public attrs
canvas - a FigureCanvasWx(wx.Panel) instance
window - a wxFrame instance - wxpython.org/Phoenix/docs/html/Frame.html
"""
def __init__(self, canvas, num, frame):
DEBUG_MSG("__init__()", 1, self)
FigureManagerBase.__init__(self, canvas, num)
self.frame = frame
self.window = frame
self.tb = frame.GetToolBar()
self.toolbar = self.tb # consistent with other backends
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.tb is not None:
self.tb.update()
self.canvas.figure.add_axobserver(notify_axes_change)
def show(self):
self.frame.Show()
self.canvas.draw()
def destroy(self, *args):
DEBUG_MSG("destroy()", 1, self)
self.frame.Destroy()
wxapp = wx.GetApp()
if wxapp:
wxapp.Yield()
def get_window_title(self):
return self.window.GetTitle()
def set_window_title(self, title):
self.window.SetTitle(title)
def resize(self, width, height):
'Set the canvas size in pixels'
self.canvas.SetInitialSize(wx.Size(width, height))
self.window.GetSizer().Fit(self.window)
# Identifiers for toolbar controls - images_wx contains bitmaps for the images
# used in the controls. wxWindows does not provide any stock images, so I've
# 'stolen' those from GTK2, and transformed them into the appropriate format.
#import images_wx
_NTB_AXISMENU = wx.NewId()
_NTB_AXISMENU_BUTTON = wx.NewId()
_NTB_X_PAN_LEFT = wx.NewId()
_NTB_X_PAN_RIGHT = wx.NewId()
_NTB_X_ZOOMIN = wx.NewId()
_NTB_X_ZOOMOUT = wx.NewId()
_NTB_Y_PAN_UP = wx.NewId()
_NTB_Y_PAN_DOWN = wx.NewId()
_NTB_Y_ZOOMIN = wx.NewId()
_NTB_Y_ZOOMOUT = wx.NewId()
#_NTB_SUBPLOT =wx.NewId()
_NTB_SAVE = wx.NewId()
_NTB_CLOSE = wx.NewId()
def _load_bitmap(filename):
"""
Load a bitmap file from the backends/images subdirectory in which the
matplotlib library is installed. The filename parameter should not
contain any path information as this is determined automatically.
Returns a wx.Bitmap object
"""
basedir = os.path.join(rcParams['datapath'], 'images')
bmpFilename = os.path.normpath(os.path.join(basedir, filename))
if not os.path.exists(bmpFilename):
raise IOError('Could not find bitmap file "%s"; dying' % bmpFilename)
bmp = wx.Bitmap(bmpFilename)
return bmp
class MenuButtonWx(wx.Button):
"""
wxPython does not permit a menu to be incorporated directly into a toolbar.
This class simulates the effect by associating a pop-up menu with a button
in the toolbar, and managing this as though it were a menu.
"""
def __init__(self, parent):
wx.Button.__init__(self, parent, _NTB_AXISMENU_BUTTON, "Axes: ",
style=wx.BU_EXACTFIT)
self._toolbar = parent
self._menu = wx.Menu()
self._axisId = []
# First two menu items never change...
self._allId = wx.NewId()
self._invertId = wx.NewId()
self._menu.Append(self._allId, "All", "Select all axes", False)
self._menu.Append(self._invertId, "Invert", "Invert axes selected",
False)
self._menu.AppendSeparator()
self.Bind(wx.EVT_BUTTON, self._onMenuButton, id=_NTB_AXISMENU_BUTTON)
self.Bind(wx.EVT_MENU, self._handleSelectAllAxes, id=self._allId)
self.Bind(wx.EVT_MENU, self._handleInvertAxesSelected,
id=self._invertId)
def Destroy(self):
self._menu.Destroy()
self.Destroy()
def _onMenuButton(self, evt):
"""Handle menu button pressed."""
if wxc.is_phoenix:
x, y = self.GetPosition()
w, h = self.GetSize()
else:
x, y = self.GetPositionTuple()
w, h = self.GetSizeTuple()
self.PopupMenuXY(self._menu, x, y + h - 4)
# When menu returned, indicate selection in button
evt.Skip()
def _handleSelectAllAxes(self, evt):
"""Called when the 'select all axes' menu item is selected."""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _handleInvertAxesSelected(self, evt):
"""Called when the invert all menu item is selected"""
if len(self._axisId) == 0:
return
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
self._menu.Check(self._axisId[i], False)
else:
self._menu.Check(self._axisId[i], True)
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def _onMenuItemSelected(self, evt):
"""Called whenever one of the specific axis menu items is selected"""
current = self._menu.IsChecked(evt.GetId())
if current:
new = False
else:
new = True
self._menu.Check(evt.GetId(), new)
# Lines above would be deleted based on svn tracker ID 2841525;
# not clear whether this matters or not.
self._toolbar.set_active(self.getActiveAxes())
evt.Skip()
def updateAxes(self, maxAxis):
"""Ensures that there are entries for max_axis axes in the menu
(selected by default)."""
if maxAxis > len(self._axisId):
for i in range(len(self._axisId) + 1, maxAxis + 1, 1):
menuId = wx.NewId()
self._axisId.append(menuId)
self._menu.Append(menuId, "Axis %d" % i,
"Select axis %d" % i,
True)
self._menu.Check(menuId, True)
self.Bind(wx.EVT_MENU, self._onMenuItemSelected, id=menuId)
elif maxAxis < len(self._axisId):
for menuId in self._axisId[maxAxis:]:
self._menu.Delete(menuId)
self._axisId = self._axisId[:maxAxis]
self._toolbar.set_active(list(xrange(maxAxis)))
def getActiveAxes(self):
"""Return a list of the selected axes."""
active = []
for i in range(len(self._axisId)):
if self._menu.IsChecked(self._axisId[i]):
active.append(i)
return active
def updateButtonText(self, lst):
"""Update the list of selected axes in the menu button"""
axis_txt = ''
for e in lst:
axis_txt += '%d,' % (e + 1)
# remove trailing ',' and add to button string
self.SetLabel("Axes: %s" % axis_txt[:-1])
cursord = {
cursors.MOVE: wx.CURSOR_HAND,
cursors.HAND: wx.CURSOR_HAND,
cursors.POINTER: wx.CURSOR_ARROW,
cursors.SELECT_REGION: wx.CURSOR_CROSS,
}
class SubplotToolWX(wx.Frame):
def __init__(self, targetfig):
wx.Frame.__init__(self, None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = FigureCanvasWx(self, -1, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, self)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
self.SetSizer(sizer)
self.Fit()
tool = SubplotTool(targetfig, toolfig)
class NavigationToolbar2Wx(NavigationToolbar2, wx.ToolBar):
def __init__(self, canvas):
wx.ToolBar.__init__(self, canvas.GetParent(), -1)
NavigationToolbar2.__init__(self, canvas)
self.canvas = canvas
self._idle = True
self.statbar = None
self.prevZoomRect = None
# for now, use alternate zoom-rectangle drawing on all
# Macs. N.B. In future versions of wx it may be possible to
# detect Retina displays with window.GetContentScaleFactor()
# and/or dc.GetContentScaleFactor()
self.retinaFix = 'wxMac' in wx.PlatformInfo
def get_canvas(self, frame, fig):
return FigureCanvasWx(frame, -1, fig)
def _init_toolbar(self):
DEBUG_MSG("_init_toolbar", 1, self)
self._parent = self.canvas.GetParent()
self.wx_ids = {}
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.AddSeparator()
continue
self.wx_ids[text] = wx.NewId()
wxc._AddTool(self, self.wx_ids, text,
_load_bitmap(image_file + '.png'),
tooltip_text)
self.Bind(wx.EVT_TOOL, getattr(self, callback),
id=self.wx_ids[text])
self.Realize()
def zoom(self, *args):
self.ToggleTool(self.wx_ids['Pan'], False)
NavigationToolbar2.zoom(self, *args)
def pan(self, *args):
self.ToggleTool(self.wx_ids['Zoom'], False)
NavigationToolbar2.pan(self, *args)
def configure_subplots(self, evt):
frame = wx.Frame(None, -1, "Configure subplots")
toolfig = Figure((6, 3))
canvas = self.get_canvas(frame, toolfig)
# Create a figure manager to manage things
figmgr = FigureManager(canvas, 1, frame)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
frame.SetSizer(sizer)
frame.Fit()
tool = SubplotTool(self.canvas.figure, toolfig)
frame.Show()
def save_figure(self, *args):
# Fetch the required filename and file type.
filetypes, exts, filter_index = self.canvas._get_imagesave_wildcards()
default_file = self.canvas.get_default_filename()
dlg = wx.FileDialog(self._parent, "Save to file", "", default_file,
filetypes,
wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
dlg.SetFilterIndex(filter_index)
if dlg.ShowModal() == wx.ID_OK:
dirname = dlg.GetDirectory()
filename = dlg.GetFilename()
DEBUG_MSG(
'Save file dir:%s name:%s' %
(dirname, filename), 3, self)
format = exts[dlg.GetFilterIndex()]
basename, ext = os.path.splitext(filename)
if ext.startswith('.'):
ext = ext[1:]
if ext in ('svg', 'pdf', 'ps', 'eps', 'png') and format != ext:
# looks like they forgot to set the image type drop
# down, going with the extension.
warnings.warn(
'extension %s did not match the selected '
'image type %s; going with %s' %
(ext, format, ext), stacklevel=0)
format = ext
try:
self.canvas.print_figure(
os.path.join(dirname, filename), format=format)
except Exception as e:
error_msg_wx(str(e))
def set_cursor(self, cursor):
cursor = wxc.Cursor(cursord[cursor])
self.canvas.SetCursor(cursor)
def release(self, event):
try:
del self.lastrect
except AttributeError:
pass
def dynamic_update(self):
d = self._idle
self._idle = False
if d:
self.canvas.draw()
self._idle = True
def press(self, event):
if self._active == 'ZOOM':
if not self.retinaFix:
self.wxoverlay = wx.Overlay()
else:
self.savedRetinaImage = self.canvas.copy_from_bbox(
self.canvas.figure.gca().bbox)
self.zoomStartX = event.xdata
self.zoomStartY = event.ydata
def release(self, event):
if self._active == 'ZOOM':
# When the mouse is released we reset the overlay and it
# restores the former content to the window.
if not self.retinaFix:
self.wxoverlay.Reset()
del self.wxoverlay
else:
del self.savedRetinaImage
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.prevZoomRect = None
def draw_rubberband(self, event, x0, y0, x1, y1):
if self.retinaFix: # On Macs, use the following code
# wx.DCOverlay does not work properly on Retina displays.
rubberBandColor = '#C0C0FF'
if self.prevZoomRect:
self.prevZoomRect.pop(0).remove()
self.canvas.restore_region(self.savedRetinaImage)
X0, X1 = self.zoomStartX, event.xdata
Y0, Y1 = self.zoomStartY, event.ydata
lineX = (X0, X0, X1, X1, X0)
lineY = (Y0, Y1, Y1, Y0, Y0)
self.prevZoomRect = self.canvas.figure.gca().plot(
lineX, lineY, '-', color=rubberBandColor)
self.canvas.figure.gca().draw_artist(self.prevZoomRect[0])
self.canvas.blit(self.canvas.figure.gca().bbox)
return
# Use an Overlay to draw a rubberband-like bounding box.
dc = wx.ClientDC(self.canvas)
odc = wx.DCOverlay(self.wxoverlay, dc)
odc.Clear()
# Mac's DC is already the same as a GCDC, and it causes
# problems with the overlay if we try to use an actual
# wx.GCDC so don't try it.
if 'wxMac' not in wx.PlatformInfo:
dc = wx.GCDC(dc)
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
if y1 < y0:
y0, y1 = y1, y0
if x1 < y0:
x0, x1 = x1, x0
w = x1 - x0
h = y1 - y0
rect = wx.Rect(x0, y0, w, h)
rubberBandColor = '#C0C0FF' # or load from config?
# Set a pen for the border
color = wxc.NamedColour(rubberBandColor)
dc.SetPen(wx.Pen(color, 1))
# use the same color, plus alpha for the brush
r, g, b, a = color.Get(True)
color.Set(r, g, b, 0x60)
dc.SetBrush(wx.Brush(color))
if wxc.is_phoenix:
dc.DrawRectangle(rect)
else:
dc.DrawRectangleRect(rect)
def set_status_bar(self, statbar):
self.statbar = statbar
def set_message(self, s):
if self.statbar is not None:
self.statbar.set_function(s)
def set_history_buttons(self):
can_backward = (self._views._pos > 0)
can_forward = (self._views._pos < len(self._views._elements) - 1)
self.EnableTool(self.wx_ids['Back'], can_backward)
self.EnableTool(self.wx_ids['Forward'], can_forward)
class StatusBarWx(wx.StatusBar):
"""
A status bar is added to _FigureFrame to allow measurements and the
previously selected scroll function to be displayed as a user
convenience.
"""
def __init__(self, parent):
wx.StatusBar.__init__(self, parent, -1)
self.SetFieldsCount(2)
self.SetStatusText("None", 1)
#self.SetStatusText("Measurement: None", 2)
# self.Reposition()
def set_function(self, string):
self.SetStatusText("%s" % string, 1)
# def set_measurement(self, string):
# self.SetStatusText("Measurement: %s" % string, 2)
#< Additions for printing support: Matt Newville
class PrintoutWx(wx.Printout):
"""
Simple wrapper around wx Printout class -- all the real work
here is scaling the matplotlib canvas bitmap to the current
printer's definition.
"""
def __init__(self, canvas, width=5.5, margin=0.5, title='matplotlib'):
wx.Printout.__init__(self, title=title)
self.canvas = canvas
# width, in inches of output figure (approximate)
self.width = width
self.margin = margin
def HasPage(self, page):
# current only supports 1 page print
return page == 1
def GetPageInfo(self):
return (1, 1, 1, 1)
def OnPrintPage(self, page):
self.canvas.draw()
dc = self.GetDC()
(ppw, pph) = self.GetPPIPrinter() # printer's pixels per in
(pgw, pgh) = self.GetPageSizePixels() # page size in pixels
(dcw, dch) = dc.GetSize()
if wxc.is_phoenix:
(grw, grh) = self.canvas.GetSize()
else:
(grw, grh) = self.canvas.GetSizeTuple()
# save current figure dpi resolution and bg color,
# so that we can temporarily set them to the dpi of
# the printer, and the bg color to white
bgcolor = self.canvas.figure.get_facecolor()
fig_dpi = self.canvas.figure.dpi
# draw the bitmap, scaled appropriately
vscale = float(ppw) / fig_dpi
# set figure resolution,bg color for printer
self.canvas.figure.dpi = ppw
self.canvas.figure.set_facecolor('#FFFFFF')
renderer = RendererWx(self.canvas.bitmap, self.canvas.figure.dpi)
self.canvas.figure.draw(renderer)
self.canvas.bitmap.SetWidth(
int(self.canvas.bitmap.GetWidth() * vscale))
self.canvas.bitmap.SetHeight(
int(self.canvas.bitmap.GetHeight() * vscale))
self.canvas.draw()
# page may need additional scaling on preview
page_scale = 1.0
if self.IsPreview():
page_scale = float(dcw) / pgw
# get margin in pixels = (margin in in) * (pixels/in)
top_margin = int(self.margin * pph * page_scale)
left_margin = int(self.margin * ppw * page_scale)
# set scale so that width of output is self.width inches
# (assuming grw is size of graph in inches....)
user_scale = (self.width * fig_dpi * page_scale) / float(grw)
dc.SetDeviceOrigin(left_margin, top_margin)
dc.SetUserScale(user_scale, user_scale)
# this cute little number avoid API inconsistencies in wx
try:
dc.DrawBitmap(self.canvas.bitmap, 0, 0)
except:
try:
dc.DrawBitmap(self.canvas.bitmap, (0, 0))
except:
pass
# restore original figure resolution
self.canvas.figure.set_facecolor(bgcolor)
self.canvas.figure.dpi = fig_dpi
self.canvas.draw()
return True
#>
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureCanvas = FigureCanvasWx
FigureManager = FigureManagerWx
Toolbar = NavigationToolbar2Wx
| bsd-3-clause |
aflaxman/mpld3 | mpld3/plugins.py | 7 | 25402 | """
Plugins to add behavior to mpld3 charts
=======================================
Plugins are means of adding additional javascript features to D3-rendered
matplotlib plots. A number of plugins are defined here; it is also possible
to create nearly any imaginable behavior by defining your own custom plugin.
"""
__all__ = ['connect', 'clear', 'get_plugins', 'PluginBase',
'Reset', 'Zoom', 'BoxZoom',
'PointLabelTooltip', 'PointHTMLTooltip', 'LineLabelTooltip',
'MousePosition']
import collections
import json
import uuid
import matplotlib
from .utils import get_id
def get_plugins(fig):
"""Get the list of plugins in the figure"""
connect(fig)
return fig.mpld3_plugins
def connect(fig, *plugins):
"""Connect one or more plugins to a figure
Parameters
----------
fig : matplotlib Figure instance
The figure to which the plugins will be connected
*plugins :
Additional arguments should be plugins which will be connected
to the figure.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), '-k')
>>> plugins.connect(fig, plugins.LineLabelTooltip(lines[0]))
"""
if not isinstance(fig, matplotlib.figure.Figure):
raise ValueError("plugins.connect: first argument must be a figure")
if not hasattr(fig, 'mpld3_plugins'):
fig.mpld3_plugins = DEFAULT_PLUGINS[:]
for plugin in plugins:
fig.mpld3_plugins.append(plugin)
def clear(fig):
"""Clear all plugins from the figure, including defaults"""
fig.mpld3_plugins = []
class PluginBase(object):
def get_dict(self):
return self.dict_
def javascript(self):
if hasattr(self, "JAVASCRIPT"):
if hasattr(self, "js_args_"):
return self.JAVASCRIPT.render(self.js_args_)
else:
return self.JAVASCRIPT
else:
return ""
def css(self):
if hasattr(self, "css_"):
return self.css_
else:
return ""
class Reset(PluginBase):
"""A Plugin to add a reset button"""
dict_ = {"type": "reset"}
class MousePosition(PluginBase):
"""A Plugin to display coordinates for the current mouse position
Example
-------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, plugins.MousePosition())
>>> fig_to_html(fig)
"""
def __init__(self, fontsize=12, fmt=".3g"):
self.dict_ = {"type": "mouseposition",
"fontsize": fontsize,
"fmt": fmt}
class Zoom(PluginBase):
"""A Plugin to add zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "zoom",
"button": button,
"enabled": enabled}
class BoxZoom(PluginBase):
"""A Plugin to add box-zoom behavior to the plot
Parameters
----------
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. By default,
zoom is enabled if button == False, and disabled if button == True.
Notes
-----
Even if ``enabled`` is specified, other plugins may modify this state.
"""
def __init__(self, button=True, enabled=None):
if enabled is None:
enabled = not button
self.dict_ = {"type": "boxzoom",
"button": button,
"enabled": enabled}
class PointLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : array or None
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> plugins.connect(fig, PointLabelTooltip(points[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "tooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LineLabelTooltip(PluginBase):
"""A Plugin to enable a tooltip: text which hovers over a line.
Parameters
----------
line : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
If supplied, specify the labels for each point in points. If not
supplied, the (x, y) values will be used.
hoffset, voffset : integer
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10), 'o')
>>> plugins.connect(fig, LineLabelTooltip(lines[0]))
>>> fig_to_html(fig)
"""
def __init__(self, points, label=None,
hoffset=0, voffset=10, location="mouse"):
if location not in ["bottom left", "top left", "bottom right",
"top right", "mouse"]:
raise ValueError("invalid location: {0}".format(location))
self.dict_ = {"type": "tooltip",
"id": get_id(points),
"labels": label if label is None else [label],
"hoffset": hoffset,
"voffset": voffset,
"location": location}
class LinkedBrush(PluginBase):
"""A Plugin to enable linked brushing between plots
Parameters
----------
points : matplotlib Collection or Line2D object
A representative of the scatter plot elements to brush.
button : boolean, optional
if True (default), then add a button to enable/disable zoom behavior
enabled : boolean, optional
specify whether the zoom should be enabled by default. default=True.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from mpld3 import fig_to_html, plugins
>>> X = np.random.random((3, 100))
>>> fig, ax = plt.subplots(3, 3)
>>> for i in range(2):
... for j in range(2):
... points = ax[i, j].scatter(X[i], X[j])
>>> plugins.connect(fig, LinkedBrush(points))
>>> fig_to_html(fig)
Notes
-----
Notice that in the above example, only one of the four sets of points is
passed to the plugin. This is all that is needed: for the sake of efficient
data storage, mpld3 keeps track of which plot objects draw from the same
data.
Also note that for the linked brushing to work correctly, the data must
not contain any NaNs. The presence of NaNs makes the different data views
have different sizes, so that mpld3 is unable to link the related points.
"""
def __init__(self, points, button=True, enabled=True):
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "linkedbrush",
"button": button,
"enabled": enabled,
"id": get_id(points, suffix)}
class PointHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Collection or Line2D object
The figure element to apply the tooltip to
labels : list
The labels for each point in points, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> points = ax.plot(range(10), 'o')
>>> labels = ['<h1>{title}</h1>'.format(title=i) for i in range(10)]
>>> plugins.connect(fig, PointHTMLTooltip(points[0], labels))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("htmltooltip", HtmlTooltipPlugin);
HtmlTooltipPlugin.prototype = Object.create(mpld3.Plugin.prototype);
HtmlTooltipPlugin.prototype.constructor = HtmlTooltipPlugin;
HtmlTooltipPlugin.prototype.requiredProps = ["id"];
HtmlTooltipPlugin.prototype.defaultProps = {labels:null,
hoffset:0,
voffset:10};
function HtmlTooltipPlugin(fig, props){
mpld3.Plugin.call(this, fig, props);
};
HtmlTooltipPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id);
var labels = this.props.labels;
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(labels[i])
.style("visibility", "visible");})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");});
};
"""
def __init__(self, points, labels=None,
hoffset=0, voffset=10, css=None):
self.points = points
self.labels = labels
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
if isinstance(points, matplotlib.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "htmltooltip",
"id": get_id(points, suffix),
"labels": labels,
"hoffset": hoffset,
"voffset": voffset}
class LineHTMLTooltip(PluginBase):
"""A Plugin to enable an HTML tooltip:
formated text which hovers over points.
Parameters
----------
points : matplotlib Line2D object
The figure element to apply the tooltip to
label : string
The label for the line, as strings of unescaped HTML.
hoffset, voffset : integer, optional
The number of pixels to offset the tooltip text. Default is
hoffset = 0, voffset = 10
css : str, optional
css to be included, for styling the label html if desired
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> fig, ax = plt.subplots()
>>> lines = ax.plot(range(10))
>>> label = '<h1>line {title}</h1>'.format(title='A')
>>> plugins.connect(fig, LineHTMLTooltip(lines[0], label))
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("linehtmltooltip", LineHTMLTooltip);
LineHTMLTooltip.prototype = Object.create(mpld3.Plugin.prototype);
LineHTMLTooltip.prototype.constructor = LineHTMLTooltip;
LineHTMLTooltip.prototype.requiredProps = ["id"];
LineHTMLTooltip.prototype.defaultProps = {label:null,
hoffset:0,
voffset:10};
function LineHTMLTooltip(fig, props){
mpld3.Plugin.call(this, fig, props);
};
LineHTMLTooltip.prototype.draw = function(){
var obj = mpld3.get_element(this.props.id, this.fig);
var label = this.props.label
var tooltip = d3.select("body").append("div")
.attr("class", "mpld3-tooltip")
.style("position", "absolute")
.style("z-index", "10")
.style("visibility", "hidden");
obj.elements()
.on("mouseover", function(d, i){
tooltip.html(label)
.style("visibility", "visible");
})
.on("mousemove", function(d, i){
tooltip
.style("top", d3.event.pageY + this.props.voffset + "px")
.style("left",d3.event.pageX + this.props.hoffset + "px");
}.bind(this))
.on("mouseout", function(d, i){
tooltip.style("visibility", "hidden");})
};
"""
def __init__(self, line, label=None,
hoffset=0, voffset=10,
css=None):
self.line = line
self.label = label
self.voffset = voffset
self.hoffset = hoffset
self.css_ = css or ""
self.dict_ = {"type": "linehtmltooltip",
"id": get_id(line),
"label": label,
"hoffset": hoffset,
"voffset": voffset}
class InteractiveLegendPlugin(PluginBase):
"""A plugin for an interactive legends.
Inspired by http://bl.ocks.org/simzou/6439398
Parameters
----------
plot_elements : iterable of matplotlib elements
the elements to associate with a given legend items
labels : iterable of strings
The labels for each legend element
ax : matplotlib axes instance, optional
the ax to which the legend belongs. Default is the first
axes. The legend will be plotted to the right of the specified
axes
alpha_unsel : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is unselected.
Default is 0.2
alpha_over : float, optional
the alpha value to multiply the plot_element(s) associated alpha
with the legend item when the legend item is overlaid.
Default is 1 (no effect), 1.5 works nicely !
start_visible : boolean, optional (could be a list of booleans)
defines if objects should start selected on not.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from mpld3 import fig_to_html, plugins
>>> N_paths = 5
>>> N_steps = 100
>>> x = np.linspace(0, 10, 100)
>>> y = 0.1 * (np.random.random((N_paths, N_steps)) - 0.5)
>>> y = y.cumsum(1)
>>> fig, ax = plt.subplots()
>>> labels = ["a", "b", "c", "d", "e"]
>>> line_collections = ax.plot(x, y.T, lw=4, alpha=0.6)
>>> interactive_legend = plugins.InteractiveLegendPlugin(line_collections,
... labels,
... alpha_unsel=0.2,
... alpha_over=1.5,
... start_visible=True)
>>> plugins.connect(fig, interactive_legend)
>>> fig_to_html(fig)
"""
JAVASCRIPT = """
mpld3.register_plugin("interactive_legend", InteractiveLegend);
InteractiveLegend.prototype = Object.create(mpld3.Plugin.prototype);
InteractiveLegend.prototype.constructor = InteractiveLegend;
InteractiveLegend.prototype.requiredProps = ["element_ids", "labels"];
InteractiveLegend.prototype.defaultProps = {"ax":null,
"alpha_unsel":0.2,
"alpha_over":1.0,
"start_visible":true}
function InteractiveLegend(fig, props){
mpld3.Plugin.call(this, fig, props);
};
InteractiveLegend.prototype.draw = function(){
var alpha_unsel = this.props.alpha_unsel;
var alpha_over = this.props.alpha_over;
var legendItems = new Array();
for(var i=0; i<this.props.labels.length; i++){
var obj = {};
obj.label = this.props.labels[i];
var element_id = this.props.element_ids[i];
mpld3_elements = [];
for(var j=0; j<element_id.length; j++){
var mpld3_element = mpld3.get_element(element_id[j], this.fig);
// mpld3_element might be null in case of Line2D instances
// for we pass the id for both the line and the markers. Either
// one might not exist on the D3 side
if(mpld3_element){
mpld3_elements.push(mpld3_element);
}
}
obj.mpld3_elements = mpld3_elements;
obj.visible = this.props.start_visible[i]; // should become be setable from python side
legendItems.push(obj);
set_alphas(obj, false);
}
// determine the axes with which this legend is associated
var ax = this.props.ax
if(!ax){
ax = this.fig.axes[0];
} else{
ax = mpld3.get_element(ax, this.fig);
}
// add a legend group to the canvas of the figure
var legend = this.fig.canvas.append("svg:g")
.attr("class", "legend");
// add the rectangles
legend.selectAll("rect")
.data(legendItems)
.enter().append("rect")
.attr("height", 10)
.attr("width", 25)
.attr("x", ax.width + ax.position[0] + 25)
.attr("y",function(d,i) {
return ax.position[1] + i * 25 + 10;})
.attr("stroke", get_color)
.attr("class", "legend-box")
.style("fill", function(d, i) {
return d.visible ? get_color(d) : "white";})
.on("click", click).on('mouseover', over).on('mouseout', out);
// add the labels
legend.selectAll("text")
.data(legendItems)
.enter().append("text")
.attr("x", function (d) {
return ax.width + ax.position[0] + 25 + 40;})
.attr("y", function(d,i) {
return ax.position[1] + i * 25 + 10 + 10 - 1;})
.text(function(d) { return d.label });
// specify the action on click
function click(d,i){
d.visible = !d.visible;
d3.select(this)
.style("fill",function(d, i) {
return d.visible ? get_color(d) : "white";
})
set_alphas(d, false);
};
// specify the action on legend overlay
function over(d,i){
set_alphas(d, true);
};
// specify the action on legend overlay
function out(d,i){
set_alphas(d, false);
};
// helper function for setting alphas
function set_alphas(d, is_over){
for(var i=0; i<d.mpld3_elements.length; i++){
var type = d.mpld3_elements[i].constructor.name;
if(type =="mpld3_Line"){
var current_alpha = d.mpld3_elements[i].props.alpha;
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.select(d.mpld3_elements[i].path[0][0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("stroke-width", is_over ?
alpha_over * d.mpld3_elements[i].props.edgewidth : d.mpld3_elements[i].props.edgewidth);
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
var current_alpha = d.mpld3_elements[i].props.alphas[0];
var current_alpha_unsel = current_alpha * alpha_unsel;
var current_alpha_over = current_alpha * alpha_over;
d3.selectAll(d.mpld3_elements[i].pathsobj[0])
.style("stroke-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel))
.style("fill-opacity", is_over ? current_alpha_over :
(d.visible ? current_alpha : current_alpha_unsel));
} else{
console.log(type + " not yet supported");
}
}
};
// helper function for determining the color of the rectangles
function get_color(d){
var type = d.mpld3_elements[0].constructor.name;
var color = "black";
if(type =="mpld3_Line"){
color = d.mpld3_elements[0].props.edgecolor;
} else if((type=="mpld3_PathCollection")||
(type=="mpld3_Markers")){
color = d.mpld3_elements[0].props.facecolors[0];
} else{
console.log(type + " not yet supported");
}
return color;
};
};
"""
css_ = """
.legend-box {
cursor: pointer;
}
"""
def __init__(self, plot_elements, labels, ax=None,
alpha_unsel=0.2, alpha_over=1., start_visible=True):
self.ax = ax
if ax:
ax = get_id(ax)
# start_visible could be a list
if isinstance(start_visible, bool):
start_visible = [start_visible] * len(labels)
elif not len(start_visible) == len(labels):
raise ValueError("{} out of {} visible params has been set"
.format(len(start_visible), len(labels)))
mpld3_element_ids = self._determine_mpld3ids(plot_elements)
self.mpld3_element_ids = mpld3_element_ids
self.dict_ = {"type": "interactive_legend",
"element_ids": mpld3_element_ids,
"labels": labels,
"ax": ax,
"alpha_unsel": alpha_unsel,
"alpha_over": alpha_over,
"start_visible": start_visible}
def _determine_mpld3ids(self, plot_elements):
"""
Helper function to get the mpld3_id for each
of the specified elements.
"""
mpld3_element_ids = []
# There are two things being done here. First,
# we make sure that we have a list of lists, where
# each inner list is associated with a single legend
# item. Second, in case of Line2D object we pass
# the id for both the marker and the line.
# on the javascript side we filter out the nulls in
# case either the line or the marker has no equivalent
# D3 representation.
for entry in plot_elements:
ids = []
if isinstance(entry, collections.Iterable):
for element in entry:
mpld3_id = get_id(element)
ids.append(mpld3_id)
if isinstance(element, matplotlib.lines.Line2D):
mpld3_id = get_id(element, 'pts')
ids.append(mpld3_id)
else:
ids.append(get_id(entry))
if isinstance(entry, matplotlib.lines.Line2D):
mpld3_id = get_id(entry, 'pts')
ids.append(mpld3_id)
mpld3_element_ids.append(ids)
return mpld3_element_ids
DEFAULT_PLUGINS = [Reset(), Zoom(), BoxZoom()]
| bsd-3-clause |
YihaoLu/statsmodels | statsmodels/graphics/tests/test_mosaicplot.py | 17 | 18878 | from __future__ import division
from statsmodels.compat.python import iterkeys, zip, lrange, iteritems, range
from numpy.testing import assert_, assert_raises, dec
from numpy.testing import run_module_suite
# utilities for the tests
from statsmodels.compat.collections import OrderedDict
from statsmodels.api import datasets
import numpy as np
from itertools import product
try:
import matplotlib.pyplot as pylab
have_matplotlib = True
except:
have_matplotlib = False
import pandas
pandas_old = int(pandas.__version__.split('.')[1]) < 9
# the main drawing function
from statsmodels.graphics.mosaicplot import mosaic
# other functions to be tested for accuracy
from statsmodels.graphics.mosaicplot import _hierarchical_split
from statsmodels.graphics.mosaicplot import _reduce_dict
from statsmodels.graphics.mosaicplot import _key_splitting
from statsmodels.graphics.mosaicplot import _normalize_split
from statsmodels.graphics.mosaicplot import _split_rect
@dec.skipif(not have_matplotlib or pandas_old)
def test_data_conversion():
# It will not reorder the elements
# so the dictionary will look odd
# as it key order has the c and b
# keys swapped
import pandas
fig, ax = pylab.subplots(4, 4)
data = {'ax': 1, 'bx': 2, 'cx': 3}
mosaic(data, ax=ax[0, 0], title='basic dict', axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[0, 1], title='basic series', axes_label=False)
data = [1, 2, 3]
mosaic(data, ax=ax[0, 2], title='basic list', axes_label=False)
data = np.asarray(data)
mosaic(data, ax=ax[0, 3], title='basic array', axes_label=False)
data = {('ax', 'cx'): 1, ('bx', 'cx'): 2, ('ax', 'dx'): 3, ('bx', 'dx'): 4}
mosaic(data, ax=ax[1, 0], title='compound dict', axes_label=False)
mosaic(data, ax=ax[2, 0], title='inverted keys dict', index=[1, 0], axes_label=False)
data = pandas.Series(data)
mosaic(data, ax=ax[1, 1], title='compound series', axes_label=False)
mosaic(data, ax=ax[2, 1], title='inverted keys series', index=[1, 0])
data = [[1, 2], [3, 4]]
mosaic(data, ax=ax[1, 2], title='compound list', axes_label=False)
mosaic(data, ax=ax[2, 2], title='inverted keys list', index=[1, 0])
data = np.array([[1, 2], [3, 4]])
mosaic(data, ax=ax[1, 3], title='compound array', axes_label=False)
mosaic(data, ax=ax[2, 3], title='inverted keys array', index=[1, 0], axes_label=False)
gender = ['male', 'male', 'male', 'female', 'female', 'female']
pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
data = pandas.DataFrame({'gender': gender, 'pet': pet})
mosaic(data, ['gender'], ax=ax[3, 0], title='dataframe by key 1', axes_label=False)
mosaic(data, ['pet'], ax=ax[3, 1], title='dataframe by key 2', axes_label=False)
mosaic(data, ['gender', 'pet'], ax=ax[3, 2], title='both keys', axes_label=False)
mosaic(data, ['pet', 'gender'], ax=ax[3, 3], title='keys inverted', axes_label=False)
pylab.suptitle('testing data conversion (plot 1 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_simple():
# display a simple plot of 4 categories of data, splitted in four
# levels with increasing size for each group
# creation of the levels
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['healty', 'ill'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
# which colours should I use for the various categories?
# put it into a dict
props = {}
#males and females in blue and red
props[('male',)] = {'color': 'b'}
props[('female',)] = {'color': 'r'}
# all the groups corresponding to ill groups have a different color
for key in keys:
if 'ill' in key:
if 'male' in key:
props[key] = {'color': 'BlueViolet' , 'hatch': '+'}
else:
props[key] = {'color': 'Crimson' , 'hatch': '+'}
# mosaic of the data, with given gaps and colors
mosaic(data, gap=0.05, properties=props, axes_label=False)
pylab.suptitle('syntetic data, 4 categories (plot 2 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic():
# make the same analysis on a known dataset
# load the data and clean it a bit
affairs = datasets.fair.load_pandas()
datas = affairs.exog
# any time greater than 0 is cheating
datas['cheated'] = affairs.endog > 0
# sort by the marriage quality and give meaningful name
# [rate_marriage, age, yrs_married, children,
# religious, educ, occupation, occupation_husb]
datas = datas.sort(['rate_marriage', 'religious'])
num_to_desc = {1: 'awful', 2: 'bad', 3: 'intermediate',
4: 'good', 5: 'wonderful'}
datas['rate_marriage'] = datas['rate_marriage'].map(num_to_desc)
num_to_faith = {1: 'non religious', 2: 'poorly religious', 3: 'religious',
4: 'very religious'}
datas['religious'] = datas['religious'].map(num_to_faith)
num_to_cheat = {False: 'faithful', True: 'cheated'}
datas['cheated'] = datas['cheated'].map(num_to_cheat)
# finished cleaning
fig, ax = pylab.subplots(2, 2)
mosaic(datas, ['rate_marriage', 'cheated'], ax=ax[0, 0],
title='by marriage happiness')
mosaic(datas, ['religious', 'cheated'], ax=ax[0, 1],
title='by religiosity')
mosaic(datas, ['rate_marriage', 'religious', 'cheated'], ax=ax[1, 0],
title='by both', labelizer=lambda k:'')
ax[1, 0].set_xlabel('marriage rating')
ax[1, 0].set_ylabel('religion status')
mosaic(datas, ['religious', 'rate_marriage'], ax=ax[1, 1],
title='inter-dependence', axes_label=False)
pylab.suptitle("extramarital affairs (plot 3 of 4)")
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_mosaic_very_complex():
# make a scattermatrix of mosaic plots to show the correlations between
# each pair of variable in a dataset. Could be easily converted into a
# new function that does this automatically based on the type of data
key_name = ['gender', 'age', 'health', 'work']
key_base = (['male', 'female'], ['old', 'young'],
['healty', 'ill'], ['work', 'unemployed'])
keys = list(product(*key_base))
data = OrderedDict(zip(keys, range(1, 1 + len(keys))))
props = {}
props[('male', 'old')] = {'color': 'r'}
props[('female',)] = {'color': 'pink'}
L = len(key_base)
fig, axes = pylab.subplots(L, L)
for i in range(L):
for j in range(L):
m = set(range(L)).difference(set((i, j)))
if i == j:
axes[i, i].text(0.5, 0.5, key_name[i],
ha='center', va='center')
axes[i, i].set_xticks([])
axes[i, i].set_xticklabels([])
axes[i, i].set_yticks([])
axes[i, i].set_yticklabels([])
else:
ji = max(i, j)
ij = min(i, j)
temp_data = OrderedDict([((k[ij], k[ji]) + tuple(k[r] for r in m), v)
for k, v in iteritems(data)])
keys = list(iterkeys(temp_data))
for k in keys:
value = _reduce_dict(temp_data, k[:2])
temp_data[k[:2]] = value
del temp_data[k]
mosaic(temp_data, ax=axes[i, j], axes_label=False,
properties=props, gap=0.05, horizontal=i > j)
pylab.suptitle('old males should look bright red, (plot 4 of 4)')
#pylab.show()
@dec.skipif(not have_matplotlib)
def test_axes_labeling():
from numpy.random import rand
key_set = (['male', 'female'], ['old', 'adult', 'young'],
['worker', 'unemployed'], ['yes', 'no'])
# the cartesian product of all the categories is
# the complete set of categories
keys = list(product(*key_set))
data = OrderedDict(zip(keys, rand(len(keys))))
lab = lambda k: ''.join(s[0] for s in k)
fig, (ax1, ax2) = pylab.subplots(1, 2, figsize=(16, 8))
mosaic(data, ax=ax1, labelizer=lab, horizontal=True, label_rotation=45)
mosaic(data, ax=ax2, labelizer=lab, horizontal=False,
label_rotation=[0, 45, 90, 0])
#fig.tight_layout()
fig.suptitle("correct alignment of the axes labels")
#pylab.show()
@dec.skipif(not have_matplotlib or pandas_old)
def test_mosaic_empty_cells():
# SMOKE test see #2286
import pandas as pd
mydata = pd.DataFrame({'id2': {64: 'Angelica',
65: 'DXW_UID', 66: 'casuid01',
67: 'casuid01', 68: 'EC93_uid',
69: 'EC93_uid', 70: 'EC93_uid',
60: 'DXW_UID', 61: 'AtmosFox',
62: 'DXW_UID', 63: 'DXW_UID'},
'id1': {64: 'TGP',
65: 'Retention01', 66: 'default',
67: 'default', 68: 'Musa_EC_9_3',
69: 'Musa_EC_9_3', 70: 'Musa_EC_9_3',
60: 'default', 61: 'default',
62: 'default', 63: 'default'}})
ct = pd.crosstab(mydata.id1, mydata.id2)
fig, vals = mosaic(ct.T.unstack())
fig, vals = mosaic(mydata, ['id1','id2'])
eq = lambda x, y: assert_(np.allclose(x, y))
def test_recursive_split():
keys = list(product('mf'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m',)] = (0.0, 0.0, 0.5, 1.0)
res[('f',)] = (0.5, 0.0, 0.5, 1.0)
keys = list(product('mf', 'yao'))
data = OrderedDict(zip(keys, [1] * len(keys)))
res = _hierarchical_split(data, gap=0)
assert_(list(iterkeys(res)) == keys)
res[('m', 'y')] = (0.0, 0.0, 0.5, 1 / 3)
res[('m', 'a')] = (0.0, 1 / 3, 0.5, 1 / 3)
res[('m', 'o')] = (0.0, 2 / 3, 0.5, 1 / 3)
res[('f', 'y')] = (0.5, 0.0, 0.5, 1 / 3)
res[('f', 'a')] = (0.5, 1 / 3, 0.5, 1 / 3)
res[('f', 'o')] = (0.5, 2 / 3, 0.5, 1 / 3)
def test__reduce_dict():
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), [1] * 8))
eq(_reduce_dict(data, ('m',)), 4)
eq(_reduce_dict(data, ('m', 'o')), 2)
eq(_reduce_dict(data, ('m', 'o', 'w')), 1)
data = OrderedDict(zip(list(product('mf', 'oy', 'wn')), lrange(8)))
eq(_reduce_dict(data, ('m',)), 6)
eq(_reduce_dict(data, ('m', 'o')), 1)
eq(_reduce_dict(data, ('m', 'o', 'w')), 0)
def test__key_splitting():
# subdivide starting with an empty tuple
base_rect = {tuple(): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 1], tuple(), True, 0)
assert_(list(iterkeys(res)) == [('a',), ('b',)])
eq(res[('a',)], (0, 0, 0.5, 1))
eq(res[('b',)], (0.5, 0, 0.5, 1))
# subdivide a in two sublevel
res_bis = _key_splitting(res, ['c', 'd'], [1, 1], ('a',), False, 0)
assert_(list(iterkeys(res_bis)) == [('a', 'c'), ('a', 'd'), ('b',)])
eq(res_bis[('a', 'c')], (0.0, 0.0, 0.5, 0.5))
eq(res_bis[('a', 'd')], (0.0, 0.5, 0.5, 0.5))
eq(res_bis[('b',)], (0.5, 0, 0.5, 1))
# starting with a non empty tuple and uneven distribution
base_rect = {('total',): (0, 0, 1, 1)}
res = _key_splitting(base_rect, ['a', 'b'], [1, 2], ('total',), True, 0)
assert_(list(iterkeys(res)) == [('total',) + (e,) for e in ['a', 'b']])
eq(res[('total', 'a')], (0, 0, 1 / 3, 1))
eq(res[('total', 'b')], (1 / 3, 0, 2 / 3, 1))
def test_proportion_normalization():
# extremes should give the whole set, as well
# as if 0 is inserted
eq(_normalize_split(0.), [0.0, 0.0, 1.0])
eq(_normalize_split(1.), [0.0, 1.0, 1.0])
eq(_normalize_split(2.), [0.0, 1.0, 1.0])
# negative values should raise ValueError
assert_raises(ValueError, _normalize_split, -1)
assert_raises(ValueError, _normalize_split, [1., -1])
assert_raises(ValueError, _normalize_split, [1., -1, 0.])
# if everything is zero it will complain
assert_raises(ValueError, _normalize_split, [0.])
assert_raises(ValueError, _normalize_split, [0., 0.])
# one-element array should return the whole interval
eq(_normalize_split([0.5]), [0.0, 1.0])
eq(_normalize_split([1.]), [0.0, 1.0])
eq(_normalize_split([2.]), [0.0, 1.0])
# simple division should give two pieces
for x in [0.3, 0.5, 0.9]:
eq(_normalize_split(x), [0., x, 1.0])
# multiple division should split as the sum of the components
for x, y in [(0.25, 0.5), (0.1, 0.8), (10., 30.)]:
eq(_normalize_split([x, y]), [0., x / (x + y), 1.0])
for x, y, z in [(1., 1., 1.), (0.1, 0.5, 0.7), (10., 30., 40)]:
eq(_normalize_split(
[x, y, z]), [0., x / (x + y + z), (x + y) / (x + y + z), 1.0])
def test_false_split():
# if you ask it to be divided in only one piece, just return the original
# one
pure_square = [0., 0., 1., 1.]
conf_h = dict(proportion=[1], gap=0.0, horizontal=True)
conf_v = dict(proportion=[1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
conf_h = dict(proportion=[1], gap=0.5, horizontal=True)
conf_v = dict(proportion=[1], gap=0.5, horizontal=False)
eq(_split_rect(*pure_square, **conf_h), pure_square)
eq(_split_rect(*pure_square, **conf_v), pure_square)
# identity on a void rectangle should not give anything strange
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
conf = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), null_square)
# splitting a negative rectangle should raise error
neg_square = [0., 0., -1., 0.]
conf = dict(proportion=[1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
conf = dict(proportion=[1, 1], gap=0.5, horizontal=True)
assert_raises(ValueError, _split_rect, *neg_square, **conf)
def test_rect_pure_split():
pure_square = [0., 0., 1., 1.]
# division in two equal pieces from the perfect square
h_2split = [(0.0, 0.0, 0.5, 1.0), (0.5, 0.0, 0.5, 1.0)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 0.5), (0.0, 0.5, 1.0, 0.5)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 2 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 2 / 3)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 3, 1.0), (1 / 3, 0.0, 1 / 3, 1.0), (2 / 3, 0.0,
1 / 3, 1.0)]
conf_h = dict(proportion=[1, 1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 3), (0.0, 1 / 3, 1.0, 1 / 3), (0.0, 2 / 3,
1.0, 1 / 3)]
conf_v = dict(proportion=[1, 1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# division in three non-equal pieces from the perfect square
h_2split = [(0.0, 0.0, 1 / 4, 1.0), (1 / 4, 0.0, 1 / 2, 1.0), (3 / 4, 0.0,
1 / 4, 1.0)]
conf_h = dict(proportion=[1, 2, 1], gap=0.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
v_2split = [(0.0, 0.0, 1.0, 1 / 4), (0.0, 1 / 4, 1.0, 1 / 2), (0.0, 3 / 4,
1.0, 1 / 4)]
conf_v = dict(proportion=[1, 2, 1], gap=0.0, horizontal=False)
eq(_split_rect(*pure_square, **conf_v), v_2split)
# splitting on a void rectangle should give multiple void
null_square = [0., 0., 0., 0.]
conf = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
conf = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*null_square, **conf), [null_square, null_square])
def test_rect_deformed_split():
non_pure_square = [1., -1., 1., 0.5]
# division in two equal pieces from the perfect square
h_2split = [(1.0, -1.0, 0.5, 0.5), (1.5, -1.0, 0.5, 0.5)]
conf_h = dict(proportion=[1, 1], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 0.25), (1.0, -0.75, 1.0, 0.25)]
conf_v = dict(proportion=[1, 1], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
# division in two non-equal pieces from the perfect square
h_2split = [(1.0, -1.0, 1 / 3, 0.5), (1 + 1 / 3, -1.0, 2 / 3, 0.5)]
conf_h = dict(proportion=[1, 2], gap=0.0, horizontal=True)
eq(_split_rect(*non_pure_square, **conf_h), h_2split)
v_2split = [(1.0, -1.0, 1.0, 1 / 6), (1.0, 1 / 6 - 1, 1.0, 2 / 6)]
conf_v = dict(proportion=[1, 2], gap=0.0, horizontal=False)
eq(_split_rect(*non_pure_square, **conf_v), v_2split)
def test_gap_split():
pure_square = [0., 0., 1., 1.]
# null split
conf_h = dict(proportion=[1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), pure_square)
# equal split
h_2split = [(0.0, 0.0, 0.25, 1.0), (0.75, 0.0, 0.25, 1.0)]
conf_h = dict(proportion=[1, 1], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
# disequal split
h_2split = [(0.0, 0.0, 1 / 6, 1.0), (0.5 + 1 / 6, 0.0, 1 / 3, 1.0)]
conf_h = dict(proportion=[1, 2], gap=1.0, horizontal=True)
eq(_split_rect(*pure_square, **conf_h), h_2split)
def test_default_arg_index():
# 2116
import pandas as pd
df = pd.DataFrame({'size' : ['small', 'large', 'large', 'small', 'large',
'small'],
'length' : ['long', 'short', 'short', 'long', 'long',
'short']})
assert_raises(ValueError, mosaic, data=df, title='foobar')
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause |
zhuangjun1981/retinotopic_mapping | retinotopic_mapping/examples/analysis_retinotopicmapping/batch_MarkPatches.py | 1 | 1417 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 30 14:46:38 2014
@author: junz
"""
import os
import matplotlib.pyplot as plt
import corticalmapping.core.FileTools as ft
import corticalmapping.RetinotopicMapping as rm
trialName = '160208_M193206_Trial1.pkl'
names = [
['patch01', 'V1'],
['patch02', 'RL'],
['patch03', 'LM'],
['patch04', 'AL'],
['patch05', 'AM'],
['patch06', 'PM'],
['patch07', 'MMA'],
['patch08', 'MMP'],
['patch09', 'LLA'],
# ['patch10', 'AM'],
# ['patch11', 'LLA'],
# ['patch12', 'MMP'],
# ['patch13', 'MMP']
# ['patch14', 'MMP']
]
currFolder = os.path.dirname(os.path.realpath(__file__))
os.chdir(currFolder)
trialPath = os.path.join(currFolder,trialName)
trialDict = ft.loadFile(trialPath)
finalPatches = dict(trialDict['finalPatches'])
for i, namePair in enumerate(names):
currPatch = finalPatches.pop(namePair[0])
newPatchDict = {namePair[1]:currPatch}
finalPatches.update(newPatchDict)
trialDict.update({'finalPatchesMarked':finalPatches})
ft.saveFile(trialPath,trialDict)
trial, _ = rm.loadTrial(trialPath)
f = plt.figure(figsize=(10,10))
ax = f.add_subplot(111)
trial.plotFinalPatchBorders2(plotAxis = ax,borderWidth=2)
plt.show()
f.savefig(trialName[0:-4]+'_borders.pdf',dpi=600)
f.savefig(trialName[0:-4]+'_borders.png',dpi=300) | gpl-3.0 |
ghwatson/SpanishAcquisitionIQC | spacq/gui/display/plot/surface.py | 2 | 2280 | from matplotlib import pyplot
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from mpl_toolkits.mplot3d import axes3d
import numpy
import wx
"""
An embeddable three-dimensional surface plot.
"""
class SurfacePlot(object):
"""
A surface plot.
"""
alpha = 0.8
def __init__(self, parent, style='surface'):
self.style = style
self.figure = pyplot.figure()
self.canvas = FigureCanvas(parent, wx.ID_ANY, self.figure)
self.axes = axes3d.Axes3D(self.figure)
self.surface = None
def __del__(self):
try:
self.close()
except Exception:
pass
@property
def control(self):
"""
A drawable control.
"""
return self.canvas
def close(self):
"""
Inform pyplot that this figure is no longer required.
"""
pyplot.close(self.figure.number)
def set_surface_data(self, data):
"""
Set the surface data based on the data tuple.
"""
if self.surface is not None:
self.axes.collections.remove(self.surface)
self.surface = None
if data is None:
return
surface_data, x_bounds, y_bounds = data
# Number of values along each axis.
y_num, x_num = surface_data.shape
# The equally-spaced values along each axis.
x_values = numpy.linspace(*x_bounds, num=x_num)
y_values = numpy.linspace(*y_bounds, num=y_num)
# The meshgrid of values.
x, y = numpy.meshgrid(x_values, y_values)
if self.style == 'surface':
# Just a regular surface.
self.surface = self.axes.plot_surface(x, y, surface_data, alpha=self.alpha)
elif self.style == 'waveform':
# Waveform style shows individual waveforms nicely.
self.surface = self.axes.plot_wireframe(x, y, surface_data, cstride=100000)
surface_data = property(fset=set_surface_data)
@property
def x_label(self):
"""
The x axis label.
"""
return self.axes.get_xlabel()
@x_label.setter
def x_label(self, value):
self.axes.set_xlabel(value)
@property
def y_label(self):
"""
The y axis label.
"""
return self.axes.get_ylabel()
@y_label.setter
def y_label(self, value):
self.axes.set_ylabel(value)
@property
def z_label(self):
"""
The z axis label.
"""
return self.axes.get_zlabel()
@z_label.setter
def z_label(self, value):
self.axes.set_zlabel(value)
def redraw(self):
self.canvas.draw()
| bsd-2-clause |
yunfeilu/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
cybernet14/scikit-learn | examples/linear_model/plot_theilsen.py | 232 | 3615 | """
====================
Theil-Sen Regression
====================
Computes a Theil-Sen Regression on a synthetic dataset.
See :ref:`theil_sen_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the Theil-Sen
estimator is robust against outliers. It has a breakdown point of about 29.3%
in case of a simple linear regression which means that it can tolerate
arbitrary corrupted data (outliers) of up to 29.3% in the two-dimensional
case.
The estimation of the model is done by calculating the slopes and intercepts
of a subpopulation of all possible combinations of p subsample points. If an
intercept is fitted, p must be greater than or equal to n_features + 1. The
final slope and intercept is then defined as the spatial median of these
slopes and intercepts.
In certain cases Theil-Sen performs better than :ref:`RANSAC
<ransac_regression>` which is also a robust method. This is illustrated in the
second example below where outliers with respect to the x-axis perturb RANSAC.
Tuning the ``residual_threshold`` parameter of RANSAC remedies this but in
general a priori knowledge about the data and the nature of the outliers is
needed.
Due to the computational complexity of Theil-Sen it is recommended to use it
only for small problems in terms of number of samples and features. For larger
problems the ``max_subpopulation`` parameter restricts the magnitude of all
possible combinations of p subsample points to a randomly chosen subset and
therefore also limits the runtime. Therefore, Theil-Sen is applicable to larger
problems with the drawback of losing some of its mathematical properties since
it then works on a random subset.
"""
# Author: Florian Wilhelm -- <florian.wilhelm@gmail.com>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression, TheilSenRegressor
from sklearn.linear_model import RANSACRegressor
print(__doc__)
estimators = [('OLS', LinearRegression()),
('Theil-Sen', TheilSenRegressor(random_state=42)),
('RANSAC', RANSACRegressor(random_state=42)), ]
##############################################################################
# Outliers only in the y direction
np.random.seed(0)
n_samples = 200
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
w = 3.
c = 2.
noise = 0.1 * np.random.randn(n_samples)
y = w * x + c + noise
# 10% outliers
y[-20:] += -20 * x[-20:]
X = x[:, np.newaxis]
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 3])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
##############################################################################
# Outliers in the X direction
np.random.seed(0)
# Linear model y = 3*x + N(2, 0.1**2)
x = np.random.randn(n_samples)
noise = 0.1 * np.random.randn(n_samples)
y = 3 * x + 2 + noise
# 10% outliers
x[-20:] = 9.9
y[-20:] += 22
X = x[:, np.newaxis]
plt.figure()
plt.plot(x, y, 'k+', mew=2, ms=8)
line_x = np.array([-3, 10])
for name, estimator in estimators:
t0 = time.time()
estimator.fit(X, y)
elapsed_time = time.time() - t0
y_pred = estimator.predict(line_x.reshape(2, 1))
plt.plot(line_x, y_pred,
label='%s (fit time: %.2fs)' % (name, elapsed_time))
plt.axis('tight')
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
carefree0910/MachineLearning | f_NN/Networks.py | 1 | 12872 | import os
import sys
root_path = os.path.abspath("../")
if root_path not in sys.path:
sys.path.append(root_path)
import matplotlib.pyplot as plt
from f_NN.Layers import *
from f_NN.Optimizers import *
from Util.Bases import ClassifierBase
from Util.ProgressBar import ProgressBar
class NNVerbose:
NONE = 0
EPOCH = 1
METRICS = 2
METRICS_DETAIL = 3
DETAIL = 4
DEBUG = 5
class NaiveNN(ClassifierBase):
NaiveNNTiming = Timing()
def __init__(self, **kwargs):
super(NaiveNN, self).__init__(**kwargs)
self._layers, self._weights, self._bias = [], [], []
self._w_optimizer = self._b_optimizer = None
self._current_dimension = 0
self._params["lr"] = kwargs.get("lr", 0.001)
self._params["epoch"] = kwargs.get("epoch", 10)
self._params["optimizer"] = kwargs.get("optimizer", "Adam")
# Utils
@NaiveNNTiming.timeit(level=4)
def _add_params(self, shape):
self._weights.append(np.random.randn(*shape))
self._bias.append(np.zeros((1, shape[1])))
@NaiveNNTiming.timeit(level=4)
def _add_layer(self, layer, *args):
current, nxt = args
self._add_params((current, nxt))
self._current_dimension = nxt
self._layers.append(layer)
@NaiveNNTiming.timeit(level=1)
def _get_activations(self, x):
activations = [self._layers[0].activate(x, self._weights[0], self._bias[0])]
for i, layer in enumerate(self._layers[1:]):
activations.append(layer.activate(
activations[-1], self._weights[i + 1], self._bias[i + 1]))
return activations
@NaiveNNTiming.timeit(level=1)
def _get_prediction(self, x):
return self._get_activations(x)[-1]
# Optimizing Process
@NaiveNNTiming.timeit(level=4)
def _init_optimizers(self, optimizer, lr, epoch):
opt_fac = OptFactory()
self._w_optimizer = opt_fac.get_optimizer_by_name(
optimizer, self._weights, lr, epoch)
self._b_optimizer = opt_fac.get_optimizer_by_name(
optimizer, self._bias, lr, epoch)
@NaiveNNTiming.timeit(level=1)
def _opt(self, i, _activation, _delta):
self._weights[i] += self._w_optimizer.run(
i, _activation.T.dot(_delta)
)
self._bias[i] += self._b_optimizer.run(
i, np.sum(_delta, axis=0, keepdims=True)
)
# API
@NaiveNNTiming.timeit(level=4, prefix="[API] ")
def add(self, layer):
if not self._layers:
self._layers, self._current_dimension = [layer], layer.shape[1]
self._add_params(layer.shape)
else:
nxt = layer.shape[0]
layer.shape = (self._current_dimension, nxt)
self._add_layer(layer, self._current_dimension, nxt)
@NaiveNNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x, y, lr=None, epoch=None, optimizer=None):
if lr is None:
lr = self._params["lr"]
if epoch is None:
epoch = self._params["epoch"]
if optimizer is None:
optimizer = self._params["optimizer"]
self._init_optimizers(optimizer, lr, epoch)
layer_width = len(self._layers)
for counter in range(epoch):
self._w_optimizer.update()
self._b_optimizer.update()
activations = self._get_activations(x)
deltas = [self._layers[-1].bp_first(y, activations[-1])]
for i in range(-1, -len(activations), -1):
deltas.append(self._layers[i - 1].bp(
activations[i - 1], self._weights[i], deltas[-1]
))
for i in range(layer_width - 1, 0, -1):
self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
self._opt(0, x, deltas[-1])
@NaiveNNTiming.timeit(level=4, prefix="[API] ")
def predict(self, x, get_raw_results=False, **kwargs):
y_pred = self._get_prediction(np.atleast_2d(x))
if get_raw_results:
return y_pred
return np.argmax(y_pred, axis=1)
class NN(NaiveNN):
NNTiming = Timing()
def __init__(self, **kwargs):
super(NN, self).__init__(**kwargs)
self._available_metrics = {
key: value for key, value in zip(["acc", "f1-score"], [NN.acc, NN.f1_score])
}
self._metrics, self._metric_names, self._logs = [], [], {}
self.verbose = None
self._params["batch_size"] = kwargs.get("batch_size", 256)
self._params["train_rate"] = kwargs.get("train_rate", None)
self._params["metrics"] = kwargs.get("metrics", None)
self._params["record_period"] = kwargs.get("record_period", 100)
self._params["verbose"] = kwargs.get("verbose", 1)
# Utils
@NNTiming.timeit(level=1)
def _get_prediction(self, x, name=None, batch_size=1e6, verbose=None):
if verbose is None:
verbose = self.verbose
single_batch = batch_size / np.prod(x.shape[1:]) # type: float
single_batch = int(single_batch)
if not single_batch:
single_batch = 1
if single_batch >= len(x):
return self._get_activations(x).pop()
epoch = int(len(x) / single_batch)
if not len(x) % single_batch:
epoch += 1
name = "Prediction" if name is None else "Prediction ({})".format(name)
sub_bar = ProgressBar(max_value=epoch, name=name, start=False)
if verbose >= NNVerbose.METRICS:
sub_bar.start()
rs, count = [self._get_activations(x[:single_batch]).pop()], single_batch
if verbose >= NNVerbose.METRICS:
sub_bar.update()
while count < len(x):
count += single_batch
if count >= len(x):
rs.append(self._get_activations(x[count - single_batch:]).pop())
else:
rs.append(self._get_activations(x[count - single_batch:count]).pop())
if verbose >= NNVerbose.METRICS:
sub_bar.update()
return np.vstack(rs)
@NNTiming.timeit(level=4, prefix="[API] ")
def _preview(self):
if not self._layers:
rs = "None"
else:
rs = (
"Input : {:<10s} - {}\n".format("Dimension", self._layers[0].shape[0]) +
"\n".join(
["Layer : {:<10s} - {}".format(
_layer.name, _layer.shape[1]
) for _layer in self._layers[:-1]]
) + "\nCost : {:<10s}".format(self._layers[-1].name)
)
print("=" * 30 + "\n" + "Structure\n" + "-" * 30 + "\n" + rs + "\n" + "=" * 30)
print("Optimizer")
print("-" * 30)
print(self._w_optimizer)
print("=" * 30)
@NNTiming.timeit(level=2)
def _append_log(self, x, y, y_classes, name):
y_pred = self._get_prediction(x, name)
y_pred_classes = np.argmax(y_pred, axis=1)
for i, metric in enumerate(self._metrics):
self._logs[name][i].append(metric(y_classes, y_pred_classes))
self._logs[name][-1].append(self._layers[-1].calculate(y, y_pred) / len(y))
@NNTiming.timeit(level=3)
def _print_metric_logs(self, data_type):
print()
print("=" * 47)
for i, name in enumerate(self._metric_names):
print("{:<16s} {:<16s}: {:12.8}".format(
data_type, name, self._logs[data_type][i][-1]))
print("{:<16s} {:<16s}: {:12.8}".format(
data_type, "loss", self._logs[data_type][-1][-1]))
print("=" * 47)
@NNTiming.timeit(level=1, prefix="[API] ")
def fit(self, x, y, lr=None, epoch=None, batch_size=None, train_rate=None,
optimizer=None, metrics=None, record_period=None, verbose=None):
if lr is None:
lr = self._params["lr"]
if epoch is None:
epoch = self._params["epoch"]
if optimizer is None:
optimizer = self._params["optimizer"]
if batch_size is None:
batch_size = self._params["batch_size"]
if train_rate is None:
train_rate = self._params["train_rate"]
if metrics is None:
metrics = self._params["metrics"]
if record_period is None:
record_period = self._params["record_period"]
if verbose is None:
verbose = self._params["verbose"]
self.verbose = verbose
self._init_optimizers(optimizer, lr, epoch)
layer_width = len(self._layers)
self._preview()
if train_rate is not None:
train_rate = float(train_rate)
train_len = int(len(x) * train_rate)
shuffle_suffix = np.random.permutation(len(x))
x, y = x[shuffle_suffix], y[shuffle_suffix]
x_train, y_train = x[:train_len], y[:train_len]
x_test, y_test = x[train_len:], y[train_len:]
else:
x_train = x_test = x
y_train = y_test = y
y_train_classes = np.argmax(y_train, axis=1)
y_test_classes = np.argmax(y_test, axis=1)
train_len = len(x_train)
batch_size = min(batch_size, train_len)
do_random_batch = train_len > batch_size
train_repeat = 1 if not do_random_batch else int(train_len / batch_size) + 1
if metrics is None:
metrics = []
self._metrics = self.get_metrics(metrics)
self._metric_names = [_m.__name__ for _m in metrics]
self._logs = {
name: [[] for _ in range(len(metrics) + 1)] for name in ("train", "test")
}
bar = ProgressBar(max_value=max(1, epoch // record_period), name="Epoch", start=False)
if self.verbose >= NNVerbose.EPOCH:
bar.start()
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
for counter in range(epoch):
if self.verbose >= NNVerbose.EPOCH and counter % record_period == 0:
sub_bar.start()
for _ in range(train_repeat):
if do_random_batch:
batch = np.random.choice(train_len, batch_size)
x_batch, y_batch = x_train[batch], y_train[batch]
else:
x_batch, y_batch = x_train, y_train
self._w_optimizer.update()
self._b_optimizer.update()
activations = self._get_activations(x_batch)
deltas = [self._layers[-1].bp_first(y_batch, activations[-1])]
for i in range(-1, -len(activations), -1):
deltas.append(
self._layers[i - 1].bp(activations[i - 1], self._weights[i], deltas[-1])
)
for i in range(layer_width - 1, 0, -1):
self._opt(i, activations[i - 1], deltas[layer_width - i - 1])
self._opt(0, x_batch, deltas[-1])
if self.verbose >= NNVerbose.EPOCH:
if sub_bar.update() and self.verbose >= NNVerbose.METRICS_DETAIL:
self._append_log(x_train, y_train, y_train_classes, "train")
self._append_log(x_test, y_test, y_test_classes, "test")
self._print_metric_logs("train")
self._print_metric_logs("test")
if self.verbose >= NNVerbose.EPOCH:
sub_bar.update()
if (counter + 1) % record_period == 0:
self._append_log(x_train, y_train, y_train_classes, "train")
self._append_log(x_test, y_test, y_test_classes, "test")
if self.verbose >= NNVerbose.METRICS:
self._print_metric_logs("train")
self._print_metric_logs("test")
if self.verbose >= NNVerbose.EPOCH:
bar.update(counter // record_period + 1)
sub_bar = ProgressBar(max_value=train_repeat * record_period - 1, name="Iteration", start=False)
def draw_logs(self):
metrics_log, loss_log = {}, {}
for key, value in sorted(self._logs.items()):
metrics_log[key], loss_log[key] = value[:-1], value[-1]
for i, name in enumerate(sorted(self._metric_names)):
plt.figure()
plt.title("Metric Type: {}".format(name))
for key, log in sorted(metrics_log.items()):
xs = np.arange(len(log[i])) + 1
plt.plot(xs, log[i], label="Data Type: {}".format(key))
plt.legend(loc=4)
plt.show()
plt.close()
plt.figure()
plt.title("Cost")
for key, loss in sorted(loss_log.items()):
xs = np.arange(len(loss)) + 1
plt.plot(xs, loss, label="Data Type: {}".format(key))
plt.legend()
plt.show()
| mit |
aabadie/scikit-learn | examples/mixture/plot_gmm.py | 122 | 3265 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians
obtained with Expectation Maximisation (``GaussianMixture`` class) and
Variational Inference (``BayesianGaussianMixture`` class models with
a Dirichlet process prior).
Both models have access to five components with which to fit the data. Note
that the Expectation Maximisation model will necessarily use all five
components while the Variational Inference model will effectively only use as
many as are needed for a good fit. Here we can see that the Expectation
Maximisation model splits some components arbitrarily, because it is trying to
fit too many components, while the Dirichlet Process model adapts it number of
state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
color_iter = itertools.cycle(['navy', 'c', 'cornflowerblue', 'gold',
'darkorange'])
def plot_results(X, Y_, means, covariances, index, title):
splot = plt.subplot(2, 1, 1 + index)
for i, (mean, covar, color) in enumerate(zip(
means, covariances, color_iter)):
v, w = linalg.eigh(covar)
v = 2. * np.sqrt(2.) * np.sqrt(v)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180. * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-9., 5.)
plt.ylim(-3., 6.)
plt.xticks(())
plt.yticks(())
plt.title(title)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a Gaussian mixture with EM using five components
gmm = mixture.GaussianMixture(n_components=5, covariance_type='full').fit(X)
plot_results(X, gmm.predict(X), gmm.means_, gmm.covariances_, 0,
'Gaussian Mixture')
# Fit a Dirichlet process Gaussian mixture using five components
dpgmm = mixture.BayesianGaussianMixture(n_components=5,
covariance_type='full').fit(X)
plot_results(X, dpgmm.predict(X), dpgmm.means_, dpgmm.covariances_, 1,
'Bayesian Gaussian Mixture with a Dirichlet process prior')
plt.show()
| bsd-3-clause |
balazssimon/ml-playground | udemy/lazyprogrammer/reinforcement-learning-python/approx_mc_prediction.py | 1 | 2661 | import numpy as np
import matplotlib.pyplot as plt
from grid_world import standard_grid, negative_grid
from iterative_policy_evaluation import print_values, print_policy
# NOTE: this is only policy evaluation, not optimization
# we'll try to obtain the same result as our other MC script
from monte_carlo_random import random_action, play_game, SMALL_ENOUGH, GAMMA, ALL_POSSIBLE_ACTIONS
LEARNING_RATE = 0.001
if __name__ == '__main__':
# use the standard grid again (0 for every step) so that we can compare
# to iterative policy evaluation
grid = standard_grid()
# print rewards
print("rewards:")
print_values(grid.rewards, grid)
# state -> action
# found by policy_iteration_random on standard_grid
# MC method won't get exactly this, but should be close
# values:
# ---------------------------
# 0.43| 0.56| 0.72| 0.00|
# ---------------------------
# 0.33| 0.00| 0.21| 0.00|
# ---------------------------
# 0.25| 0.18| 0.11| -0.17|
# policy:
# ---------------------------
# R | R | R | |
# ---------------------------
# U | | U | |
# ---------------------------
# U | L | U | L |
policy = {
(2, 0): 'U',
(1, 0): 'U',
(0, 0): 'R',
(0, 1): 'R',
(0, 2): 'R',
(1, 2): 'U',
(2, 1): 'L',
(2, 2): 'U',
(2, 3): 'L',
}
# initialize theta
# our model is V_hat = theta.dot(x)
# where x = [row, col, row*col, 1] - 1 for bias term
theta = np.random.randn(4) / 2
def s2x(s):
return np.array([s[0] - 1, s[1] - 1.5, s[0]*s[1] - 3, 1])
# repeat until convergence
deltas = []
t = 1.0
for it in range(20000):
if it % 100 == 0:
t += 0.01
alpha = LEARNING_RATE/t
# generate an episode using pi
biggest_change = 0
states_and_returns = play_game(grid, policy)
seen_states = set()
for s, G in states_and_returns:
# check if we have already seen s
# called "first-visit" MC policy evaluation
if s not in seen_states:
old_theta = theta.copy()
x = s2x(s)
V_hat = theta.dot(x)
# grad(V_hat) wrt theta = x
theta += alpha*(G - V_hat)*x
biggest_change = max(biggest_change, np.abs(old_theta - theta).sum())
seen_states.add(s)
deltas.append(biggest_change)
plt.plot(deltas)
plt.show()
# obtain predicted values
V = {}
states = grid.all_states()
for s in states:
if s in grid.actions:
V[s] = theta.dot(s2x(s))
else:
# terminal state or state we can't otherwise get to
V[s] = 0
print("values:")
print_values(V, grid)
print("policy:")
print_policy(policy, grid)
| apache-2.0 |
plissonf/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
fxia22/pointGAN | show_gan_rnn.py | 1 | 2043 | from __future__ import print_function
from show3d_balls import *
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointGen, PointGenR
import torch.nn.functional as F
import matplotlib.pyplot as plt
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
gen = PointGenR()
gen.load_state_dict(torch.load(opt.model))
#sim_noise = Variable(torch.randn(5, 2, 20))
#
#sim_noises = Variable(torch.zeros(5, 15, 20))
#
#for i in range(15):
# x = i/15.0
# sim_noises[:,i,:] = sim_noise[:,0,:] * x + sim_noise[:,1,:] * (1-x)
#
#points = gen(sim_noises)
#point_np = points.transpose(2,1).data.numpy()
sim_noise = Variable(torch.randn(5, 6, 20))
sim_noises = Variable(torch.zeros(5, 30 * 5,20))
for j in range(5):
for i in range(30):
x = (1-i/30.0)
sim_noises[:,i + 30 * j,:] = sim_noise[:,j,:] * x + sim_noise[:,(j+1) % 5,:] * (1-x)
points = gen(sim_noises)
point_np = points.transpose(2,1).data.numpy()
print(point_np.shape)
for i in range(150):
print(i)
frame = showpoints_frame(point_np[i])
plt.imshow(frame)
plt.axis('off')
plt.savefig('%s/%04d.png' %('out_rgan', i), bbox_inches='tight')
plt.clf()
#showpoints(point_np)
#sim_noise = Variable(torch.randn(5, 1000, 20))
#points = gen(sim_noise)
#point_np = points.transpose(2,1).data.numpy()
#print(point_np.shape)
#choice = np.random.choice(2500, 2048, replace=False)
#print(point_np[:, choice, :].shape)
#showpoints(point_np)
#np.savez('rgan.npz', points = point_np[:, choice, :])
| mit |
shikhardb/scikit-learn | examples/linear_model/plot_iris_logistic.py | 283 | 1678 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logistic Regression 3-class Classifier
=========================================================
Show below is a logistic-regression classifiers decision boundaries on the
`iris <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The
datapoints are colored according to their labels.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
h = .02 # step size in the mesh
logreg = linear_model.LogisticRegression(C=1e5)
# we create an instance of Neighbours Classifier and fit the data.
logreg.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = logreg.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
claesenm/HPOlib | HPOlib/Plotting/plotTrace_perExp.py | 5 | 6055 | #!/usr/bin/env python
##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from argparse import ArgumentParser
import cPickle
import itertools
import sys
from matplotlib.pyplot import tight_layout, figure, subplots_adjust, subplot, savefig, show
import matplotlib.gridspec
import numpy as np
from HPOlib.Plotting import plot_util
__authors__ = ["Katharina Eggensperger", "Matthias Feurer"]
__contact__ = "automl.org"
def plot_optimization_trace_cv(trial_list, name_list, optimum=0, title="",
log=True, save="", y_max=0, y_min=0):
markers =plot_util.get_plot_markers()
colors = plot_util.get_plot_colors()
linestyles = itertools.cycle(['-'])
size = 1
ratio = 5
gs = matplotlib.gridspec.GridSpec(ratio, 1)
fig = figure(1, dpi=100)
fig.suptitle(title, fontsize=16)
ax1 = subplot(gs[0:ratio, :])
ax1.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
min_val = sys.maxint
max_val = -sys.maxint
max_trials = 0
fig.suptitle(title, fontsize=16)
# Plot the average error and std
for i in range(len(name_list)):
m = markers.next()
c = colors.next()
l = linestyles.next()
leg = False
for tr in trial_list[i]:
if log:
tr = np.log10(tr)
x = range(1, len(tr)+1)
y = tr
if not leg:
ax1.plot(x, y, color=c, linewidth=size, linestyle=l, label=name_list[i][0])
leg = True
ax1.plot(x, y, color=c, linewidth=size, linestyle=l)
min_val = min(min_val, min(tr))
max_val = max(max_val, max(tr))
max_trials = max(max_trials, len(tr))
# Maybe plot on logscale
ylabel = ""
if log:
ax1.set_ylabel("log10(Minfunction value)" + ylabel)
else:
ax1.set_ylabel("Minfunction value" + ylabel)
# Descript and label the stuff
leg = ax1.legend(loc='best', fancybox=True)
leg.get_frame().set_alpha(0.5)
ax1.set_xlabel("#Function evaluations")
if y_max == y_min:
# Set axes limits
ax1.set_ylim([min_val-0.1*abs((max_val-min_val)), max_val+0.1*abs((max_val-min_val))])
else:
ax1.set_ylim([y_min, y_max])
ax1.set_xlim([0, max_trials + 1])
tight_layout()
subplots_adjust(top=0.85)
if save != "":
savefig(save, dpi=100, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches="tight", pad_inches=0.1)
else:
show()
def main(pkl_list, name_list, autofill, optimum=0, save="", title="", log=False,
y_min=0, y_max=0):
trial_list = list()
for i in range(len(pkl_list)):
tmp_trial_list = list()
max_len = -sys.maxint
for pkl in pkl_list[i]:
fh = open(pkl, "r")
trials = cPickle.load(fh)
fh.close()
trace = plot_util.get_Trace_cv(trials)
tmp_trial_list.append(trace)
max_len = max(max_len, len(trace))
trial_list.append(list())
for tr in tmp_trial_list:
# if len(tr) < max_len:
# tr.extend([tr[-1] for idx in range(abs(max_len - len(tr)))])
trial_list[-1].append(np.array(tr))
plot_optimization_trace_cv(trial_list, name_list, optimum, title=title, log=log,
save=save, y_min=y_min, y_max=y_max)
if save != "":
sys.stdout.write("Saved plot to " + save + "\n")
else:
sys.stdout.write("..Done\n")
if __name__ == "__main__":
prog = "python plotTraceWithStd.py WhatIsThis <oneOrMorePickles> [WhatIsThis <oneOrMorePickles>]"
description = "Plot a Trace with std for multiple experiments"
parser = ArgumentParser(description=description, prog=prog)
# Options for specific benchmarks
parser.add_argument("-o", "--optimum", type=float, dest="optimum",
default=0, help="If not set, the optimum is supposed to be zero")
# Options which are available only for this plot
parser.add_argument("-a", "--autofill", action="store_true", dest="autofill",
default=False, help="Fill trace automatically")
# General Options
parser.add_argument("-l", "--log", action="store_true", dest="log",
default=False, help="Plot on log scale")
parser.add_argument("--max", dest="max", type=float,
default=0, help="Maximum of the plot")
parser.add_argument("--min", dest="min", type=float,
default=0, help="Minimum of the plot")
parser.add_argument("-s", "--save", dest="save",
default="", help="Where to save plot instead of showing it?")
parser.add_argument("-t", "--title", dest="title",
default="", help="Optional supertitle for plot")
args, unknown = parser.parse_known_args()
sys.stdout.write("\nFound " + str(len(unknown)) + " arguments\n")
pkl_list_main, name_list_main = plot_util.get_pkl_and_name_list(unknown)
main(pkl_list=pkl_list_main, name_list=name_list_main, autofill=args.autofill, optimum=args.optimum,
save=args.save, title=args.title, log=args.log, y_min=args.min, y_max=args.max)
| gpl-3.0 |
GarrettSmith/Nearness | graphchi/conf/adminhtml/plots/plotter.py | 3 | 1235 | #!/usr/bin/python
import sys
import os
import matplotlib
import numpy
import matplotlib
matplotlib.use('AGG')
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.ticker import FormatStrFormatter
def getArg(param, default=""):
if (sys.argv.count(param) == 0): return default
i = sys.argv.index(param)
return sys.argv[i + 1]
lastsecs = int(getArg("lastsecs", 240))
fname = sys.argv[1]
try:
tdata = numpy.loadtxt(fname, delimiter=" ")
except:
exit(0)
if len(tdata.shape) < 2 or tdata.shape[0] < 2 or tdata.shape[1] < 2:
print "Too small data - do not try to plot yet."
exit(0)
times = tdata[:, 0]
values = tdata[:, 1]
lastt = max(times)
#majorFormatter = FormatStrFormatter('%.2f')
fig = plt.figure(figsize=(3.5, 2.0))
plt.plot(times[times > lastt - lastsecs], values[times > lastt - lastsecs])
plt.gca().xaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
plt.xlim([max(0, lastt - lastsecs), lastt])
#plt.ylim([lastt - lastsecs, lastt])
plt.gca().yaxis.set_major_locator( MaxNLocator(nbins = 7, prune = 'lower') )
#plt.gca().yaxis.set_major_formatter(majorFormatter)
plt.savefig(fname.replace(".dat", ".png"), format="png", bbox_inches='tight')
| gpl-3.0 |
sukritranjan/ranjansasselov2016b | compute_UV_doses.py | 1 | 29816 | # -*- coding: iso-8859-1 -*-
"""
This code is used to weigh the UV radiances we compute by biological action spectra.
"""
########################
###Import useful libraries
########################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import pdb
from matplotlib.pyplot import cm
from scipy import interpolate as interp
import scipy.integrate
########################
###Set physical constants
########################
hc=1.98645e-9 #value of h*c in erg*nm
def cm2inch(cm): #function to convert cm to inches; useful for complying with Astrobiology size guidelines
return cm/2.54
########################
###Decide which bits of the calculation will be run
########################
plotactionspec=False #if true, plots the action spectra we are using.
plotactionspec_talk=False #if true, plots the action spectra we are using...but, optimized for a talk instead of a paper
calculatealbaz=False #if true, generates the table for the albedo and zenith angle study
calculateco2=False #if true, generates the table for the co2 study
calculatealtgas=True #if true, generates the table for the alternate gas study
########################
###Helper functions: I/O
########################
def get_UV(filename):
"""
Input: filename (including path)
Output: (wave_leftedges, wav_rightedges, surface radiance) in units of (nm, nm, photons/cm2/sec/nm)
"""
wav_leftedges, wav_rightedges, wav, toa_intensity, surface_flux, surface_intensity, surface_intensity_diffuse, surface_intensity_direct=np.genfromtxt(filename, skip_header=1, skip_footer=0, usecols=(0, 1, 2,3,4,6,7,8), unpack=True)
surface_intensity_photons=surface_intensity*(wav/(hc))
return wav_leftedges, wav_rightedges, surface_intensity_photons
########################
###Helper functions: UV Dosimeters
########################
def integrated_radiance(wav_left, wav_right, surf_int, leftlim, rightlim):
"""
Computes the surface radiance integrated from leftlim to rightlim. Does this by doing a trapezoid sum. NOTE: The method I have chosen works only so long as the limits line up with the bin edges!
wav_left: left edge of wavelength bin, in nm
wav_right: right edge of wavelength bin, in nm
surf_int: total surface intensity (radiance, hemispherically-integrated) in photons/cm2/s/nm, in bin defined by wav_left and wav_right
produceplots: if True, shows plots of what it is computing
returnxy: if True, returns x,y for action spectrum.
"""
allowed_inds=np.where((wav_left>=leftlim) & (wav_right<=rightlim))
delta_wav=wav_right[allowed_inds]-wav_left[allowed_inds]
surf_int_integrated=np.sum(surf_int[allowed_inds]*delta_wav) #integration converts from photons/cm2/s/nm to photons/cm2/s
return surf_int_integrated
def tricyano_aqe_prodrate(wav_left, wav_right, surf_int, lambda0, produceplots, returnxy):
"""
Weights the input surface intensities by the action spectrum for the photoproduction of aquated electrons from Ritson+2012 and Patel+2015, i.e. irradiation of tricyano cuprate. The action spectrum is composed of the absorption spectrum multiplied by an assumed quantum yield function. We assume the QY function to be a step function, stepping from 0 at wavelengths longer than lambda0 to 0.06 at wavelengths shorter than lambda0. We choose 0.06 for the step function to match the estimate found by Horvath+1984; we note this value may be pH sensitive. Empirically, we know that lambda0>254 nm, but that's about it.
This process is an eustressor for abiogenesis.
wav_left: left edge of wavelength bin, in nm
wav_right: right edge of wavelength bin, in nm
surf_int: total surface intensity (radiance, hemispherically-integrated) in photons/cm2/s/nm, in bin defined by wav_left and wav_right
lambda0: value assume for lambda0.
produceplots: if True, shows plots of what it is computing
returnxy: if True, returns x,y for action spectrum.
"""
####Step 1: reduce input spectrum to match bounds of available dataset.
int_min=190.0 #This lower limit of integration is set by the limits of the cucn3 absorption dataset (left edge of bin)
int_max=351.0 #This upper limit of integration is set by the limits of the cucn3 absorption dataset (right edge of bin)
allowed_inds=np.where((wav_left>=int_min) & (wav_right<=int_max)) #indices that correspond to included data
wav_left=wav_left[allowed_inds]
wav_right=wav_right[allowed_inds]
surf_int=surf_int[allowed_inds]
delta_wav=wav_right-wav_left #size of wavelength bins in nm
####Step 2: form the action spectrum from the absorption spectrum and QY curve.
#Import the tricyanocuprate absorption spectrum
importeddata=np.genfromtxt('./Raw_Data/Magnani_Data/CuCN3_XC.dat', skip_header=2)
cucn3_wav=importeddata[:,0] #wav in nm
cucn3_molabs=importeddata[:,1] #molar absorptivities in L/(mol*cm), decadic
cucn3_molabs_func=interp.interp1d(cucn3_wav, cucn3_molabs, kind='linear') #functionalized form of cucn3 molar absorption
#does not matter if you use decadic or natural logarithmic as constant factors normalize out anyway
#Formulate the step-function quantum yield curve
def qy_stepfunc(wav, lambda0): #step function, for the photoionization model
"""Returns 1 for wav<=lambda0 and 0 for wav>lambda0"""
qy=np.zeros(np.size(wav))# initialize all to zero
inds=np.where(wav<=lambda0) #indices where the wavelength is below the threshold
qy[inds]=qy[inds]+0.06 #increase the QE to 1 at the indices where the wavelength is below the threshold
return qy
#Integrate these quantities to match the input spectral resolution
qy_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the QY integrated over the surface intensity wavelength bins
cucn3_molabs_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the QY integrated over the surface intensity wavelength bins
for ind in range(0, len(wav_left)):
leftedge=wav_left[ind]
rightedge=wav_right[ind]
cucn3_molabs_dist[ind]=scipy.integrate.quad(cucn3_molabs_func, leftedge, rightedge, epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
qy_dist[ind]=scipy.integrate.quad(qy_stepfunc, leftedge, rightedge, args=(lambda0), epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
action_spectrum=cucn3_molabs_dist*qy_dist
#Normalize action spectrum to 1 at 195 (arbitrary)
action_spectrum=action_spectrum*(1./(np.interp(190., 0.5*(wav_left+wav_right), action_spectrum)))
####Step 3: Compute action-spectrum weighted total intensity
weighted_surface_intensity=surf_int*action_spectrum
total_weighted_radiance=np.sum(weighted_surface_intensity*delta_wav) #units: photons/cm2/s
####Step 4 (Optional): Plot various components of action spectrum to show the multiplication
if produceplots:
legendfontsize=12
axisfontsize=12
##Plot ribonucleotide absorption and interpolation
fig1, axarr=plt.subplots(3,2,sharex=True, figsize=(8., 10.5)) #specify figure size (width, height) in inches
axarr[0,0].bar(wav_left, surf_int,width=delta_wav, color='black', alpha=0.5, log=True)
axarr[0,0].set_ylim([1e10,1e16])
axarr[0,0].legend(loc=2, prop={'size':legendfontsize})
axarr[0,0].yaxis.grid(True)
axarr[0,0].xaxis.grid(True)
axarr[0,0].set_ylabel('Surface Radiance \n(photons cm$^{-2}$s$^{-1}$nm$^{-1}$)', fontsize=axisfontsize)
#axarr[0,0].title.set_position([0.5, 1.11])
#axarr[0,0].text(0.5, 1.1, r'a(i)', transform=axarr[0].transAxes, va='top')
axarr[1,0].bar(wav_left, cucn3_molabs_dist,width=delta_wav, color='black', alpha=0.5, log=True)
#axarr[1,0].set_ylim([-0.1, 1.1])
axarr[1,0].legend(loc=6, prop={'size':legendfontsize})
axarr[1,0].yaxis.grid(True)
axarr[1,0].xaxis.grid(True)
axarr[1,0].set_ylabel('CuCN3 Molar Absorptivity\n(M$^{-1}$cm$^{-1}$)', fontsize=axisfontsize)
#axarr[1,0].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[2,0].bar(wav_left, qy_dist,width=delta_wav, color='black', alpha=0.5)
axarr[2,0].set_ylim([-0.01, 0.06])
axarr[2,0].legend(loc=6, prop={'size':legendfontsize})
axarr[2,0].yaxis.grid(True)
axarr[2,0].xaxis.grid(True)
axarr[2,0].set_ylabel('Quantum Efficiency \n(reductions absorption$^{-1}$)', fontsize=axisfontsize)
#axarr[2,0].text(0.5, 1.10, r'c(i)', fontsize=12,transform=axarr[2].transAxes, va='top')
axarr[0,1].bar(wav_left, action_spectrum,width=delta_wav, color='black', alpha=0.5)
#axarr[0,1].set_ylim([-0.1, 1.1])
axarr[0,1].legend(loc=6, prop={'size':legendfontsize})
axarr[0,1].yaxis.grid(True)
axarr[0,1].xaxis.grid(True)
axarr[0,1].set_ylabel('Action Spectrum', fontsize=axisfontsize)
#axarr[0,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[1,1].bar(wav_left, weighted_surface_intensity,width=delta_wav, color='black', alpha=0.5)
#axarr[1,1].set_ylim([-0.1, 1.1])
axarr[1,1].legend(loc=6, prop={'size':legendfontsize})
axarr[1,1].yaxis.grid(True)
axarr[1,1].xaxis.grid(True)
axarr[1,1].set_ylabel('Weighted Surface Radiance', fontsize=axisfontsize)
#axarr[1,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
#plt.savefig('/home/sranjan/Python/UV/Plots/ritson_assumed_qe_v3.pdf', orientation='portrait',papertype='letter', format='pdf')
plt.show()
if returnxy:
return 0.5*(wav_left+wav_right), action_spectrum
else:
return total_weighted_radiance
def ump_glycosidic_photol(wav_left, wav_right, surf_int, lambda0, produceplots, returnxy):
"""
Weights the input surface intensities by the action spectrum for cleavage of the glycosidic bond in UMP (the U-RNA monomer), aka base release. We form this spectrum by convolving the pH=7.6 absorption spectrum for Uridine-3'-(2')-phosporic acid (i.e. uridylic acid, UMP) from Voet et al (1963) with an assumed QY curve. The QY curve is based on the work of Gurzadyan and Gorner (1994); they measure (wavelength, QY) for N-glycosidic bond cleavage in UMP in anoxic aqueous solution (Ar-suffused) to be (193 nm, 4.3e-3) and (254 nm, (2-3)e-5). Specifically, we assume that QY=4.3e-3 for lambda<=lambda_0 and QY=2.5e-5 for lambda>lambda_0. natural choices of lambda_0 are 194, 254, and 230 (first two: empirical limits. Last: end of pi-pi* absorption bad, Sinsheimer+1949 suggest it is onset of irreversible photolytic damage).
This process is a stressor for abiogenesis.
wav_left: left edge of wavelength bin, in nm
wav_right: right edge of wavelength bin, in nm
surf_int: total surface intensity (radiance, hemispherically-integrated) in photons/cm2/s/nm, in bin defined by wav_left and wav_right
lambda0: value assume for lambda0.
produceplots: if True, shows plots of what it is computing
returnxy: if True, returns x,y for action spectrum.
"""
####Step 1: reduce input spectrum to match bounds of available dataset (absorption).
int_min=184.0 #This lower limit of integration is set by the limits of the cucn3 absorption dataset (left edge of bin)
int_max=299.0 #This upper limit of integration is set by the limits of the cucn3 absorption dataset (right edge of bin)
allowed_inds=np.where((wav_left>=int_min) & (wav_right<=int_max)) #indices that correspond to included data
wav_left=wav_left[allowed_inds]
wav_right=wav_right[allowed_inds]
surf_int=surf_int[allowed_inds]
delta_wav=wav_right-wav_left #size of wavelength bins in nm
####Step 2: form the action spectrum from the absorption spectrum and QY curve.
#Import the UMP absorption spectrum from Voet et al 1963
importeddata=np.genfromtxt('./Raw_Data/Voet_Data/ribouridine_pH_7.3_v2.txt', skip_header=0, delimiter=',')
ump_wav=importeddata[:,0] #wav in nm
ump_molabs=importeddata[:,1] #molar absorptivities\times 10^{3}, i.e. in units of 10^{-3} L/(mol*cm), decadic (I think -- unit scheme unclear in paper. Not important since normalized out)
ump_molabs_func=interp.interp1d(ump_wav, ump_molabs, kind='linear') #functionalized form of molar absorption
#does not matter if you use decadic or natural logarithmic as constant factors normalize out anyway
#Formulate the step-function quantum yield curve
def qy_stepfunc(wav, lambda0): #step function, for the photoionization model
"""QY based on work of Gurzadyan and Gorner 1994"""
qy=np.zeros(np.size(wav))# initialize all to zero
inds1=np.where(wav<=lambda0) #indices where the wavelength is below the threshold
inds2=np.where(wav>lambda0) #indices where the wavelength is below the threshold
qy[inds1]=qy[inds1]+4.3e-3 #High QY for lambda<=lambda0
qy[inds2]=qy[inds2]+2.5e-5 #Low QY for lambda>lambda0
return qy
#Integrate these quantities to match the input spectral resolution
qy_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the QY integrated over the surface intensity wavelength bins
ump_molabs_dist=np.zeros(np.shape(wav_left))#initialize variable to hold the UMP absorption integrated over the surface intensity wavelength bins
for ind in range(0, len(wav_left)):
leftedge=wav_left[ind]
rightedge=wav_right[ind]
ump_molabs_dist[ind]=scipy.integrate.quad(ump_molabs_func, leftedge, rightedge, epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
qy_dist[ind]=scipy.integrate.quad(qy_stepfunc, leftedge, rightedge, args=(lambda0),epsabs=0, epsrel=1e-5)[0]/(rightedge-leftedge)
action_spectrum=ump_molabs_dist*qy_dist
#Normalize action spectrum to 1 at 195 (arbitrary)
action_spectrum=action_spectrum*(1./(np.interp(190., 0.5*(wav_left+wav_right), action_spectrum)))
####Step 3: Compute action-spectrum weighted total intensity
weighted_surface_intensity=surf_int*action_spectrum
total_weighted_radiance=np.sum(weighted_surface_intensity*delta_wav) #units: photons/cm2/s
####Step 4 (Optional): Plot various components of action spectrum to show the multiplication
if produceplots:
legendfontsize=12
axisfontsize=12
##Plot ribonucleotide absorption and interpolation
fig1, axarr=plt.subplots(3,2,sharex=True, figsize=(8., 10.5)) #specify figure size (width, height) in inches
axarr[0,0].bar(wav_left, surf_int,width=delta_wav, color='black', alpha=0.5, log=True)
axarr[0,0].set_ylim([1e10,1e16])
axarr[0,0].legend(loc=2, prop={'size':legendfontsize})
axarr[0,0].yaxis.grid(True)
axarr[0,0].xaxis.grid(True)
axarr[0,0].set_ylabel('Surface Radiance \n(photons cm$^{-2}$s$^{-1}$nm$^{-1}$)', fontsize=axisfontsize)
#axarr[0,0].title.set_position([0.5, 1.11])
#axarr[0,0].text(0.5, 1.1, r'a(i)', transform=axarr[0].transAxes, va='top')
axarr[1,0].bar(wav_left, ump_molabs_dist,width=delta_wav, color='black', alpha=0.5, log=False)
#axarr[1,0].set_ylim([-0.1, 1.1])
axarr[1,0].legend(loc=6, prop={'size':legendfontsize})
axarr[1,0].yaxis.grid(True)
axarr[1,0].xaxis.grid(True)
axarr[1,0].set_ylabel('UMP Molar Absorptivity\n(M$^{-1}$cm$^{-1}$)', fontsize=axisfontsize)
#axarr[1,0].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[2,0].bar(wav_left, qy_dist,width=delta_wav, color='black', alpha=0.5, log=True)
axarr[2,0].set_ylim([1e-5, 1e-2])
axarr[2,0].legend(loc=6, prop={'size':legendfontsize})
axarr[2,0].yaxis.grid(True)
axarr[2,0].xaxis.grid(True)
axarr[2,0].set_ylabel('Quantum Efficiency \n(reductions absorption$^{-1}$)', fontsize=axisfontsize)
#axarr[2,0].text(0.5, 1.10, r'c(i)', fontsize=12,transform=axarr[2].transAxes, va='top')
axarr[0,1].bar(wav_left, action_spectrum,width=delta_wav, color='black', alpha=0.5)
#axarr[0,1].set_ylim([-0.1, 1.1])
axarr[0,1].legend(loc=6, prop={'size':legendfontsize})
axarr[0,1].yaxis.grid(True)
axarr[0,1].xaxis.grid(True)
axarr[0,1].set_ylabel('Action Spectrum', fontsize=axisfontsize)
#axarr[0,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
axarr[1,1].bar(wav_left, weighted_surface_intensity,width=delta_wav, color='black', alpha=0.5)
#axarr[1,1].set_ylim([-0.1, 1.1])
axarr[1,1].legend(loc=6, prop={'size':legendfontsize})
axarr[1,1].yaxis.grid(True)
axarr[1,1].xaxis.grid(True)
axarr[1,1].set_ylabel('Weighted Surface Radiance', fontsize=axisfontsize)
#axarr[1,1].text(0.5, 1.10, r'b(i)', fontsize=12, transform=axarr[1].transAxes, va='top')
#plt.savefig('/home/sranjan/Python/UV/Plots/ritson_assumed_qe_v3.pdf', orientation='portrait',papertype='letter', format='pdf')
plt.show()
if returnxy:
return 0.5*(wav_left+wav_right), action_spectrum
else:
return total_weighted_radiance
########################
###Plot UV Dosimeters
########################
if plotactionspec:
#Set up wavelength scale
wave_left=np.arange(100., 500.)
wave_right=np.arange(101., 501.)
wave_centers=0.5*(wave_left+wave_right)
surf_int=np.ones(np.shape(wave_centers)) #for our purposes here, this is a thunk.
#Extract action spectra
wav_gly_193, actspec_gly_193=ump_glycosidic_photol(wave_left, wave_right, surf_int, 193., False, True)
wav_gly_230, actspec_gly_230=ump_glycosidic_photol(wave_left, wave_right, surf_int, 230., False, True)
wav_gly_254, actspec_gly_254=ump_glycosidic_photol(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_254, actspec_aqe_254=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_300, actspec_aqe_300=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 300., False, True)
#####Plot action spectra
#Initialize Figure
fig, (ax1)=plt.subplots(1, figsize=(cm2inch(16.5),6), sharex=True)
colorseq=iter(cm.rainbow(np.linspace(0,1,5)))
#Plot Data
ax1.plot(wav_gly_193,actspec_gly_193, linestyle='-',linewidth=2, color=next(colorseq), label=r'UMP Gly Bond Cleavage ($\lambda_0=193$)')
ax1.plot(wav_gly_230,actspec_gly_230, linestyle='-',linewidth=2, color=next(colorseq), label=r'UMP Gly Bond Cleavage ($\lambda_0=230$)')
ax1.plot(wav_gly_254,actspec_gly_254, linestyle='-',linewidth=2, color=next(colorseq), label=r'UMP Gly Bond Cleavage ($\lambda_0=254$)')
ax1.plot(wav_aqe_254,actspec_aqe_254, linestyle='-',linewidth=2, color=next(colorseq), label=r'CuCN$_{3}$$^{2-}$ Photoionization ($\lambda_0=254$)')
ax1.plot(wav_aqe_300,actspec_aqe_300, linestyle='--',linewidth=2, color=next(colorseq), label=r'CuCN$_{3}$$^{2-}$ Photoionization ($\lambda_0=300$)')
#####Finalize and save figure
ax1.set_title(r'Action Spectra')
ax1.set_xlim([180.,360.])
ax1.set_xlabel('nm')
ax1.set_ylabel(r'Relative Sensitivity')
ax1.set_yscale('log')
ax1.set_ylim([1e-6, 1e2])
#ax1.legend(bbox_to_anchor=[0, 1.1, 1,1], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=10)
ax1.legend(loc='upper right', ncol=1, fontsize=10)
plt.tight_layout(rect=(0,0,1,1))
plt.savefig('./Plots/actionspectra.eps', orientation='portrait',papertype='letter', format='eps')
if plotactionspec_talk:
#Set up wavelength scale
wave_left=np.arange(100., 500.)
wave_right=np.arange(101., 501.)
wave_centers=0.5*(wave_left+wave_right)
surf_int=np.ones(np.shape(wave_centers)) #for our purposes here, this is a thunk.
#Extract action spectra
wav_gly_193, actspec_gly_193=ump_glycosidic_photol(wave_left, wave_right, surf_int, 193., False, True)
wav_gly_230, actspec_gly_230=ump_glycosidic_photol(wave_left, wave_right, surf_int, 230., False, True)
wav_gly_254, actspec_gly_254=ump_glycosidic_photol(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_254, actspec_aqe_254=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 254., False, True)
wav_aqe_300, actspec_aqe_300=tricyano_aqe_prodrate(wave_left, wave_right, surf_int, 300., False, True)
#####Plot action spectra
#Initialize Figure
fig, (ax1)=plt.subplots(1, figsize=(10,9), sharex=True)
colorseq=iter(cm.rainbow(np.linspace(0,1,5)))
#Plot Data
ax1.plot(wav_gly_193,actspec_gly_193, linestyle='-',linewidth=3, color=next(colorseq), label=r'UMP-193')
ax1.plot(wav_gly_230,actspec_gly_230, linestyle='-',linewidth=3, color=next(colorseq), label=r'UMP-230')
ax1.plot(wav_gly_254,actspec_gly_254, linestyle='-',linewidth=3, color=next(colorseq), label=r'UMP-254')
ax1.plot(wav_aqe_254,actspec_aqe_254, linestyle='-',linewidth=3, color=next(colorseq), label=r'CuCN3-254')
ax1.plot(wav_aqe_300,actspec_aqe_300, linestyle='--',linewidth=3, color=next(colorseq), label=r'CuCN3-300')
#####Finalize and save figure
ax1.set_title(r'Action Spectra', fontsize=24)
ax1.set_xlim([180.,360.])
ax1.set_xlabel('nm',fontsize=24)
ax1.set_ylabel(r'Relative Sensitivity', fontsize=24)
ax1.set_yscale('log')
ax1.set_ylim([1e-6, 1e2])
ax1.legend(bbox_to_anchor=[0, 1.1, 1,0.5], loc=3, ncol=2, mode='expand', borderaxespad=0., fontsize=24)
#ax1.legend(loc='upper right', ncol=1, fontsize=16)
ax1.xaxis.set_tick_params(labelsize=24)
ax1.yaxis.set_tick_params(labelsize=24)
plt.tight_layout(rect=(0,0,1,0.75))
plt.savefig('./TalkFigs/actionspectra.pdf', orientation='portrait',papertype='letter', format='pdf')
########################
###Set "base" values to normalize the alb-zen, co2, and alt-gas dosimeters by
########################
#Use the TOA flux in order to get a good, physically understandable denominator.
wav_leftedges, wav_rightedges, wav, toa_intensity=np.genfromtxt('./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a=0.2_z=60.dat', skip_header=1, skip_footer=0, usecols=(0, 1,2, 3), unpack=True)
toa_intensity_photons=toa_intensity*(wav/(hc))
#Compute base doses
intrad100_165_base=integrated_radiance(wav_leftedges, wav_rightedges, toa_intensity_photons, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300_base=integrated_radiance(wav_leftedges, wav_rightedges, toa_intensity_photons, 200., 300.) #This is just an empirical gauge.
umpgly_193_base=ump_glycosidic_photol(wav_leftedges, wav_rightedges, toa_intensity_photons, 193., False, False)
umpgly_230_base=ump_glycosidic_photol(wav_leftedges, wav_rightedges, toa_intensity_photons,230., False, False)
umpgly_254_base=ump_glycosidic_photol(wav_leftedges, wav_rightedges, toa_intensity_photons, 254., False, False)
tricyano254_base=tricyano_aqe_prodrate(wav_leftedges, wav_rightedges, toa_intensity_photons, 254., False, False)
tricyano300_base=tricyano_aqe_prodrate(wav_leftedges, wav_rightedges, toa_intensity_photons, 300., False, False)
########################
###Run code for albedo, zenith angle
########################
if calculatealbaz:
#Evaluate only two zenith angles (to show range of variation)
zenithangles=['66.5', '0']
albedos=['tundra', 'ocean', 'desert', 'oldsnow', 'newsnow']
for zenind in range(0, len(zenithangles)):
zenithangle=zenithangles[zenind]
for albind in range(0, len(albedos)):
albedo=albedos[albind]
datafile='./TwoStreamOutput/AlbZen/rugheimer_earth_epoch0_a='+albedo+'_z='+zenithangle+'.dat'
left, right, surface_int=get_UV(datafile)
intrad100_165=integrated_radiance(left, right, surface_int, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300=integrated_radiance(left, right, surface_int, 200., 300.) #This is just an empirical gauge.
umpgly_193=ump_glycosidic_photol(left, right, surface_int, 193., False, False)
umpgly_230=ump_glycosidic_photol(left, right, surface_int,230., False, False)
umpgly_254=ump_glycosidic_photol(left, right, surface_int, 254., False, False)
tricyano254=tricyano_aqe_prodrate(left, right, surface_int, 254., False, False)
tricyano300=tricyano_aqe_prodrate(left, right, surface_int, 300., False, False)
line=np.array([zenithangle, albedo, intrad100_165/intrad100_165_base,intrad200_300/intrad200_300_base, umpgly_193/umpgly_193_base, umpgly_230/umpgly_230_base, umpgly_254/umpgly_254_base, tricyano254/tricyano254_base, tricyano300/tricyano300_base])
if (albind==0 and zenind==0):
albzentable=line #need to initialize in this case
else:
albzentable=np.vstack((albzentable, line))
#Save output
f=open('./Doses/albzen_uv_doses.dat','w')
f.write('All Dosimeters Normalized to Space Radiation Case\n')
np.savetxt(f, albzentable, delimiter=' ', fmt='%s', newline='\n', header='Zenith Angle & Albedo & Radiance (100-165 nm) & Radiance (200-300 nm) & UMP Gly Cleavage (lambda0=193nm) & UMP Gly Cleavage (lambda0=230nm) & UMP Gly Cleavage (lambda0=254nm) & CuCN3 Photoionization (lambda0=254 nm) & CuCN3 Photoionization (lambda0=300 nm)\n')
f.close()
########################
###Run code for varying CO2 levels
########################
if calculateco2:
N_co2_rugh=2.09e24 #column density of CO2 in Rugheimer base model (cm**-2)
co2multiples=np.array([0., 1.e-6,1.e-5, 1.e-4, 1.e-3, 0.00893, 1.e-2, 1.e-1, 0.6, 1., 1.33, 1.e1, 46.6, 1.e2, 470., 1.e3])
zenithangles=['0', '66.5']
albedos=['newsnow', 'tundra']
for surfind in range(0, len(zenithangles)):
albedo=albedos[surfind]
zenithangle=zenithangles[surfind]
for multind in range(0, len(co2multiples)):
multiple=co2multiples[multind]
colden_co2=N_co2_rugh*multiple
datafile='./TwoStreamOutput/CO2lim/surface_intensities_co2limits_co2multiple='+str(multiple)+'_a='+albedo+'_z='+zenithangle+'.dat'
left, right, surface_int=get_UV(datafile)
intrad100_165=integrated_radiance(left, right, surface_int, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300=integrated_radiance(left, right, surface_int, 200., 300.) #This is just an empirical gauge.
umpgly_193=ump_glycosidic_photol(left, right, surface_int, 193., False, False)
umpgly_230=ump_glycosidic_photol(left, right, surface_int,230., False, False)
umpgly_254=ump_glycosidic_photol(left, right, surface_int, 254., False, False)
tricyano254=tricyano_aqe_prodrate(left, right, surface_int, 254., False, False)
tricyano300=tricyano_aqe_prodrate(left, right, surface_int, 300., False, False)
#print intrad200_300
#pdb.set_trace()
line=np.array([zenithangle, albedo, colden_co2, intrad100_165/intrad100_165_base,intrad200_300/intrad200_300_base, umpgly_193/umpgly_193_base, umpgly_230/umpgly_230_base, umpgly_254/umpgly_254_base, tricyano254/tricyano254_base, tricyano300/tricyano300_base])
if (multind==0 and surfind==0):
co2table=line #need to initialize in this case
else:
co2table=np.vstack((co2table, line))
#Save Output
f=open('./Doses/co2_uv_doses.dat','w')
f.write('All Dosimeters Normalized to Space Radiation Case\n')
np.savetxt(f, co2table, delimiter=' ', fmt='%s', newline='\n', header='Zenith Angle & Albedo & Radiance (100-165 nm) & Radiance (200-300 nm) & UMP Gly Cleavage (lambda0=193nm) & UMP Gly Cleavage (lambda0=230nm) & UMP Gly Cleavage (lambda0=254nm) & CuCN3 Photoionization (lambda0=254 nm) & CuCN3 Photoionization (lambda0=300 nm)\n')
f.close()
########################
###Run code for alternate gas absorption.
########################
if calculatealtgas:
#####Set up info about the files to extract # All are the maximum possible natural surface radiance case (z=0, albedo=fresh snow) aka "max"
N_tot=2.0925e25#total column density of Rugheimer+2015 model in cm**-2
gaslist=['h2o', 'ch4', 'so2', 'o2', 'o3', 'h2s'] #list of gases we are doing this for
base_abundances=np.array([4.657e-3, 1.647e-6, 3.548e-11, 2.241e-6, 8.846e-11, 7.097e-11]) #molar concentration of each of these gases in the Rugheimer model.
gasmultiples={}#dict holding the multiples of the molar concentration we are using
gasmultiples['h2o']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['ch4']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['so2']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7])
gasmultiples['o2']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['o3']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5])
gasmultiples['h2s']=np.array([1.e-5, 1.e-4, 1.e-3, 1.e-2, 1.e-1, 1., 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7])
#####In a loop, extract the files and compute the statistics
for gasind in range(0, len(gaslist)):
gas=gaslist[gasind]
base_abundance=base_abundances[gasind]
multiples=gasmultiples[gas]
for multind in range(0, len(multiples)):
multiple=multiples[multind]
colden_X=base_abundance*multiple*N_tot #total column density of gas X
datafile='./TwoStreamOutput/gaslim/surface_intensities_'+gas+'limits_'+gas+'multiple='+str(multiple)+'_a=newsnow_z=0.dat'
left, right, surface_int=get_UV(datafile)
intrad100_165=integrated_radiance(left, right, surface_int, 100, 165.) #This measures the flux vulnerable to activity
intrad200_300=integrated_radiance(left, right, surface_int, 200., 300.) #This is just an empirical gauge.
umpgly_193=ump_glycosidic_photol(left, right, surface_int, 193., False, False)
umpgly_230=ump_glycosidic_photol(left, right, surface_int,230., False, False)
umpgly_254=ump_glycosidic_photol(left, right, surface_int, 254., False, False)
tricyano254=tricyano_aqe_prodrate(left, right, surface_int, 254., False, False)
tricyano300=tricyano_aqe_prodrate(left, right, surface_int, 300., False, False)
line=np.array([gas, colden_X, intrad100_165/intrad100_165_base,intrad200_300/intrad200_300_base, umpgly_193/umpgly_193_base, umpgly_230/umpgly_230_base, umpgly_254/umpgly_254_base, tricyano254/tricyano254_base, tricyano300/tricyano300_base])
if (multind==0):
altgastable=line #need to initialize in this case
else:
altgastable=np.vstack((altgastable, line))
f=open('./Doses/'+gas+'_uv_doses.dat','w')
f.write('All Dosimeters Normalized to Space Radiation Case\n')
np.savetxt(f, altgastable, delimiter=' & ', fmt='%s', newline='\n', header='Gas & Column Density (cm-2) & Radiance (100-165 nm) & Radiance (200-300 nm) & UMP Gly Cleavage (lambda0=193nm) & UMP Gly Cleavage (lambda0=230nm) & UMP Gly Cleavage (lambda0=254nm) & CuCN3 Photoionization (lambda0=254 nm) & CuCN3 Photoionization (lambda0=300 nm)\n')
f.close()
#Wrap Up
########################
###Wrap Up
########################
plt.show()
| mit |
dtkav/naclports | ports/ipython-ppapi/kernel.py | 7 | 12026 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple shell that uses the IPython messaging system."""
# Override platform information.
import platform
platform.system = lambda: "pnacl"
platform.release = lambda: "chrome"
import time
import json
import logging
import sys
import Queue
import thread
stdin_input = Queue.Queue()
shell_input = Queue.Queue()
stdin_output = Queue.Queue()
shell_output = Queue.Queue()
iopub_output = Queue.Queue()
sys_stdout = sys.stdout
sys_stderr = sys.stderr
def emit(s):
print >> sys_stderr, "EMITTING: %s" % (s)
time.sleep(1)
import IPython
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.utils.traitlets import Type, Dict, Instance
from IPython.core.displayhook import DisplayHook
from IPython.utils import py3compat
from IPython.utils.py3compat import builtin_mod
from IPython.utils.jsonutil import json_clean, encode_images
from IPython.core.displaypub import DisplayPublisher
from IPython.config.configurable import Configurable
# module defined in shell.cc for communicating via pepper API
from pyppapi import nacl_instance
def CreateMessage(msg_type, parent_header=None, content=None):
if parent_header is None:
parent_header = {}
if content is None:
content = {}
return {
'header': {'msg_type': msg_type},
'parent_header': parent_header,
'content': content,
'msg_type': msg_type,
}
class MsgOutStream(object):
"""Class to overrides stderr and stdout."""
def __init__(self, stream_name):
self._stream_name = stream_name
self._parent_header = {}
def SetParentHeader(self, parent_header):
self._parent_header = parent_header
def close(self):
pass
def flush(self):
pass
def write(self, string):
iopub_output.put(CreateMessage('stream', parent_header=self._parent_header,
content={'name': self._stream_name, 'data': string}))
def writelines(self, sequence):
for string in sequence:
self.write(string)
# override sys.stdout and sys.stderr to broadcast on iopub
stdout_stream = MsgOutStream('stdout')
stderr_stream = MsgOutStream('stderr')
sys.stdout = stdout_stream
sys.stderr = stderr_stream
class PepperShellDisplayHook(DisplayHook):
parent_header = Dict({})
def set_parent_header(self, parent_header):
"""Set the parent for outbound messages."""
self.parent_header = parent_header
def start_displayhook(self):
self.content = {}
def write_output_prompt(self):
self.content['execution_count'] = self.prompt_count
def write_format_data(self, format_dict, md_dict=None):
self.content['data'] = encode_images(format_dict)
self.content['metadata'] = md_dict
def finish_displayhook(self):
sys.stdout.flush()
sys.stderr.flush()
iopub_output.put(CreateMessage('pyout', parent_header=self.parent_header,
content=self.content))
self.content = None
class PepperDisplayPublisher(DisplayPublisher):
parent_header = Dict({})
def set_parent_header(self, parent_header):
self.parent_header = parent_header
def _flush_streams(self):
"""flush IO Streams prior to display"""
sys.stdout.flush()
sys.stderr.flush()
def publish(self, source, data, metadata=None):
self._flush_streams()
if metadata is None:
metadata = {}
self._validate_data(source, data, metadata)
content = {}
content['source'] = source
content['data'] = encode_images(data)
content['metadata'] = metadata
iopub_output.put(CreateMessage('display_data', content=json_clean(content),
parent_header=self.parent_header))
def clear_output(self, stdout=True, stderr=True, other=True):
content = dict(stdout=stdout, stderr=stderr, other=other)
if stdout:
sys.stdout.write('\r')
if stderr:
sys.stderr.write('\r')
self._flush_streams()
iopub_output.put(CreateMessage('clear_output', content=content,
parent_header=self.parent_header))
class PepperInteractiveShell(InteractiveShell):
"""A subclass of InteractiveShell for the Pepper Messagin API."""
displayhook_class = Type(PepperShellDisplayHook)
display_pub_class = Type(PepperDisplayPublisher)
@staticmethod
def enable_gui(gui):
pass
InteractiveShellABC.register(PepperInteractiveShell)
class PepperKernel(Configurable):
shell = Instance('IPython.core.interactiveshell.InteractiveShellABC')
shell_class = Type(PepperInteractiveShell)
def __init__(self):
self.shell = self.shell_class.instance(parent=self)
self.shell.run_cell("""
import os
matplotlib_config_dir = '/mplconfigdir'
os.environ['XDG_CONFIG_HOME'] = matplotlib_config_dir
os.environ['TMP'] = ''
import matplotlib
import matplotlib.cbook
""")
shell = PepperKernel().shell
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
def _complete(msg):
c = msg['content']
try:
cpos = int(c['cursor_pos'])
except:
# If we don't get something that we can convert to an integer, at
# least attempt the completion guessing the cursor is at the end of
# the text, if there's any, and otherwise of the line
cpos = len(c['text'])
if cpos==0:
cpos = len(c['line'])
return shell.complete(c['text'], c['line'], cpos)
# Special message to indicate the NaCl kernel is ready.
iopub_output.put(CreateMessage('status', content={'execution_state': 'nacl_ready'}))
def _no_raw_input(self):
"""Raise StdinNotImplentedError if active frontend doesn't support
stdin."""
raise StdinNotImplementedError("raw_input was called, but this "
"frontend does not support stdin.")
def _raw_input(prompt, parent_header):
# Flush output before making the request.
sys.stderr.flush()
sys.stdout.flush()
# flush the stdin socket, to purge stale replies
while True:
try:
stdin_input.get_nowait()
except Queue.Empty:
break
# Send the input request.
content = json_clean(dict(prompt=prompt))
stdin_output.put(CreateMessage('input_request', content=content,
parent_header=parent_header))
# Await a response.
while True:
try:
reply = stdin_input.get()
except Exception:
print "Invalid Message"
except KeyboardInterrupt:
# re-raise KeyboardInterrupt, to truncate traceback
raise KeyboardInterrupt
else:
break
try:
value = py3compat.unicode_to_str(reply['content']['value'])
except:
print "Got bad raw_input reply: "
print reply
value = ''
if value == '\x04':
# EOF
raise EOFError
return value
def main_loop():
execution_count = 1
while 1:
iopub_output.put(CreateMessage('status', content={'execution_state': 'idle'}))
msg = shell_input.get()
iopub_output.put(CreateMessage('status', content={'execution_state': 'busy'}))
if not 'header' in msg:
continue
request_header = msg['header']
if not 'msg_type' in request_header:
continue
msg_type = request_header['msg_type']
if msg_type == 'execute_request':
try:
content = msg[u'content']
code = content[u'code']
silent = content[u'silent']
store_history = content.get(u'store_history', not silent)
except:
self.log.error("Got bad msg: ")
self.log.error("%s", msg)
continue
# Replace raw_input. Note that is not sufficient to replace
# raw_input in the user namespace.
if content.get('allow_stdin', False):
raw_input = lambda prompt='': _raw_input(prompt, request_header)
input = lambda prompt='': eval(raw_input(prompt))
else:
raw_input = input = lambda prompt='' : _no_raw_input()
if py3compat.PY3:
_sys_raw_input = builtin_mod.input
builtin_mod.input = raw_input
else:
_sys_raw_input = builtin_mod.raw_input
_sys_eval_input = builtin_mod.input
builtin_mod.raw_input = raw_input
builtin_mod.input = input
# Let output streams know which message the output is for
stdout_stream.SetParentHeader(request_header)
stderr_stream.SetParentHeader(request_header)
shell.displayhook.set_parent_header(request_header)
shell.display_pub.set_parent_header(request_header)
status = 'ok'
content = {}
try:
shell.run_cell(msg['content']['code'],
store_history=store_history,
silent=silent)
except Exception, ex:
status = 'error'
logging.exception('Exception occured while running cell')
finally:
# Restore raw_input.
if py3compat.PY3:
builtin_mod.input = _sys_raw_input
else:
builtin_mod.raw_input = _sys_raw_input
builtin_mod.input = _sys_eval_input
content = {'status': status,
'execution_count': execution_count}
if status == 'ok':
content['payload'] = []
content['user_variables'] = {}
content['user_expressions'] = {}
elif status == 'error':
content['ename'] = type(ex).__name__
content['evalue'] = str(ex)
content['traceback'] = []
execution_count += 1
if status == 'error':
iopub_output.put(CreateMessage('pyerr', parent_header=request_header,
content={
'execution_count': execution_count,
'ename': type(ex).__name__,
'evalue': str(ex),
'traceback': []
}
))
shell_output.put(CreateMessage('execute_reply', parent_header=request_header,
content=content))
elif msg_type == 'complete_request':
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
txt, matches = _complete(msg)
matches = {'matches' : matches,
'matched_text' : txt,
'status' : 'ok'}
matches = json_clean(matches)
shell_output.put(CreateMessage('complete_reply',
parent_header = request_header,
content = matches))
elif msg_type == 'object_info_request':
# Taken from IPython 2.x branch, IPython/kernel/zmq/ipykernel.py
content = msg['content']
object_info = shell.object_inspect(content['oname'],
detail_level = content.get('detail_level', 0))
# Before we send this object over, we scrub it for JSON usage
oinfo = json_clean(object_info)
shell_output.put(CreateMessage('object_info_reply',
parent_header = request_header,
content = oinfo))
elif msg_type == 'restart':
# break out of this loop, ending this program.
# The main event loop in shell.cc will then
# run this program again.
break
elif msg_type == 'kill':
# Raise an exception so that the function
# running this script will return -1, resulting
# in no restart of this script.
raise RuntimeError
thread.start_new_thread(main_loop, ())
def deal_message(msg):
channel = msg['stream']
content = json.loads(msg['json'])
queues = {'shell': shell_input, 'stdin': stdin_input}
queue = queues[channel]
queue.put(content)
def send_message(stream, msg):
nacl_instance.send_raw_object({
'stream': stream,
'json': json.dumps(msg)
})
while 1:
msg = nacl_instance.wait_for_message(timeout=1, sleeptime=10000)
try:
deal_message(msg)
except:
pass
output_streams = [
(stdin_output, 'stdin'),
(shell_output, 'shell'),
(iopub_output, 'iopub')
]
for msg_queue, stream in output_streams:
msg = None
try:
msg = msg_queue.get_nowait()
send_message(stream, msg)
except Queue.Empty:
pass
| bsd-3-clause |
aquar25/losslessh264 | plot_prior_misses.py | 40 | 1124 | # Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
| bsd-2-clause |
lamotriz/sistemas-de-aterramento | src/agilent_u2531a.py | 1 | 14700 | # -*- coding: utf-8 -*-
# Comunicacao com a placa agilent U2531A
#
# UFC - Universidade de Federal do Ceará
#
# Responsáveis:
# Felipe Bandeira da Silva
# Francisco Alexander
#
from __future__ import division
import platform
#if platform.system() == 'Windows':
# import visa
#else:
# import visa_linux_emulation as visa
try:
import visa
except:
# Durante o processo de instalação normal usando o NSIS, o path do windows
# não estava atualizado com o Python, portanto não era possível, durante a instalação,
# a execução do pip para instalar o "pyvisa" que requer por natureza, várias
# dependências que são simplesmene tratadas pelo pip. Portanto para a primeira
# utilização do programa é necessário a utilização da internet.
#
# Para que tudo funcione corretamente e necessario pyvisa 1.4
#import pip
#pip.main(['install', 'pyvisa'])
import subprocess
print u"aviso: instalando o PyVISA 1.4"
subprocess.call(['pip', 'install', 'PyVISA==1.4'])
print u"aviso: instalacao finalizada"
import visa
import matplotlib.pyplot as plt
from time import sleep, time, asctime, localtime
import numpy as np
###############################################################################
# Constantes para correçao. As mesmas usadas pelo programa feito no LabView
###############################################################################
FATOR_CORRECAO_TENSAO = 100
FATOR_CORRECAO_CORRENTE = 2.71
# 0 - nao mostra as mensagens
# 1 - mostras as mensagens para debug
DEBUG = 0
# um pequeno pulso inicial é visto no inicio da
# aquisição, puro ruido. Para que o sinal seja
# visualizado corretamento foi necessário aumentar
# o número de aquisições. Isso implica em uma
# aquisição mais demorada.
#QUANTIDADE_PONTOS = 50000
QUANTIDADE_PONTOS = 800000
###############################################################################
# testBit() returns a nonzero result, 2**offset, if the bit at 'offset' is one.
def testBit(int_type, offset):
mask = 1 << offset
return(int_type & mask)
# setBit() returns an integer with the bit at 'offset' set to 1.
def setBit(int_type, offset):
mask = 1 << offset
return(int_type | mask)
# clearBit() returns an integer with the bit at 'offset' cleared.
def clearBit(int_type, offset):
mask = ~(1 << offset)
return(int_type & mask)
# toggleBit() returns an integer with the bit at 'offset' inverted, 0 -> 1 and 1 -> 0.
def toggleBit(int_type, offset):
mask = 1 << offset
return(int_type ^ mask)
def lerEndian(data):
"""
Converte um sequencia de dados em valores de 2 bytes
A sequencia de entrada é dada no formato little-endian
com entrada do 13 bit para o carry.
Entrada:
data = string pura com informacoes do bloco de bytes
Saída:
t = tamanho do vetor de bytes
v = valores em um vetor
"""
raw = data[10:]
valores = []
passo = 0
for i in raw:
if passo == 0:
lsb = i
passo = 1
elif passo == 1:
msb = i
passo = 0
num = ((ord(msb)<<8)+(ord(lsb)))>>2
#print hex(num)
valores.append(num)
return [len(valores), valores]
def ler2Endian(data):
"""
Ler um bloco de bytes composto por duas leitura simultaneas do canal.
"""
raw = data[10:]
A = []
B = []
passo = 0
for i in raw:
if passo == 0:
lsb = i
passo = 1
elif passo == 1:
msb = i
passo = 2
A.append(((ord(msb)<<8)+(ord(lsb)))>>2)
elif passo == 2:
lsb = i
passo = 3
elif passo == 3:
msb = i
passo = 0
B.append(((ord(msb)<<8)+(ord(lsb)))>>2)
return [len(A), A, B]
def convBIP(raw, range_ad=10, resolution=14):
v = []
for i in raw:
v.append( (2*i)/(2**resolution) * range_ad )
return v
def convUNI(raw, range_ad=10, resolution=14):
v = []
for i in raw:
# se o 13 bit do byte for 1 então o número é "negativo"
# a conversão unipolar é dada por
# MAX = 1FFF
# MAX/2 = 0000
# 0 = 2000
if testBit(i, 13) > 0:
valor = clearBit(i, 13) - (2**14)/2
v.append( (valor/(2**resolution) + 0.5)*range_ad )
else:
v.append( (i/(2**resolution) + 0.5)*range_ad )
return v
def lerTensaoCorrente(ag):
"""
Faz a leitura de dois canais de forma simultanea
Canal 101(corrente) e 102(tensão)
"""
# reseta a placa a de aquisição
ag.write("*CLS")
ag.write("*RST")
ag.write("ROUT:ENAB 0,(@103, 104)") # desabilita os canais 103 e 104
ag.write("ROUT:ENAB 1,(@101, 102)") # habilita os canais 101 e 102
ag.write("ROUT:CHAN:RANG 10,(@101, 102)") # coloca no mesmo nivel que o programa da National
ag.write("ROUT:CHAN:POL UNIP,(@101, 102)") # unipolar
ag.write("ACQ:SRAT 2000000") # frequencia de amostragem
#ag.write("ACQ:POIN 2000000")
#ag.write("ACQ:POIN 50000") # número de pontos para aquisição
ag.write("ACQ:POIN %d" % QUANTIDADE_PONTOS)
####################
# inicia aquisicao #
####################
ag.write("DIG")
disparaTensao(ag)
#ag.write("DIG")
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.2) # espera um tempo até que amostra fique pronta
# Uma pequena mudança no capacitor do primeiro 555
# faz com que o set e reset necessitem de um tempo
# maior para que ambos acontecam.
sleep(.2)
retiraTensao(ag)
ag.write("WAV:DATA?")
dados = ag.read()
t, I, V = ler2Endian(dados)
V = convUNI(V, 10)
I = convUNI(I, 10)
return [dados, V, I]
def lerTensao(ag):
"""
Ler apenas o canal de tensão da fonte. Canal 102
Com toda a sequencia de acionamento do set e reset.
"""
# reset
ag.write("*CLS")
ag.write("*RST")
# inicia a leitura do canal 102 tensao
ag.write("ROUT:ENAB 0,(@103, 101, 104)")
ag.write("ROUT:ENAB 1,(@102)")
ag.write("ROUT:CHAN:RANG 10,(@102)") # coloca no mesmo nivel que o programa da National
ag.write("ROUT:CHAN:POL UNIP,(@102)")
ag.write("ACQ:SRAT 2000000")
#ag.write("ACQ:POIN 2000000")
#ag.write("ACQ:POIN 50000")
# um pequeno pulso inicial é visto no inicio da
# aquisição, puro ruido. Para que o sinal seja
# visualizado corretamento foi necessário aumentar
# o número de aquisições. Isso implica em uma
# aquisição mais demorada.
ag.write("ACQ:POIN %d" % (QUANTIDADE_PONTOS))
# inicia aquisicao
ag.write("DIG")
disparaTensao(ag)
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.5)
ag.write("WAV:DATA?")
dados = ag.read()
sleep(.2)
retiraTensao(ag)
#print dados
t, R = lerEndian(dados)
V = convUNI(R, 10)
plt.grid()
plt.plot(range(0, t), V)
plt.show()
return t, V
def lerCorrente(ag):
"""
Ler apenas o canal de corrente da fonte. Canal 101
Com toda a sequencia de acionamento do set e reset.
"""
# reset
ag.write("*CLS")
ag.write("*RST")
# inicia a leitura do canal 101 corrente
ag.write("ROUT:ENAB 0,(@103, 102, 104)")
ag.write("ROUT:ENAB 1,(@101)")
ag.write("ROUT:CHAN:RANG 10,(@101)")
ag.write("ROUT:CHAN:POL UNIP,(@101)")
ag.write("ACQ:SRAT 2000000")
ag.write("ACQ:POIN 2000000")
# inicia aquisicao
ag.write("DIG")
disparaTensao(ag)
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.5)
ag.write("WAV:DATA?")
dados = ag.read()
sleep(.2)
retiraTensao(ag)
#print dados
t, R = lerEndian(dados)
V = convUNI(R, 10)
plt.grid()
plt.plot(range(0, t), V)
plt.show()
return t, V
def lerCanal103(ag):
"""
Este canal foi usado para os testes iniciais da conversão
do análogico digital. Não sendo mais necessário.
As funçoes para leitura de tensão e corrente são identicas
a esta funçao. Mudando apenas o canal.
"""
# reset
ag.write("*CLS")
ag.write("*RST")
# inicia a leitura do canal 103
ag.write("ROUT:ENAB 0,(@101, 102, 104)")
ag.write("ROUT:ENAB 1,(@103)")
ag.write("ROUT:CHAN:RANG 10,(@103)")
#ag.write("ROUT:CHAN:POL BIP,(@103)")
ag.write("ROUT:CHAN:POL UNIP,(@103)")
ag.write("ACQ:SRAT 2000000")
ag.write("ACQ:POIN 2000000")
# inicia aquisicao
ag.write("DIG")
# espera o fim
disparaTensao(ag)
while True:
ag.write("WAV:COMP?")
if ag.read() == 'YES':
break
sleep(0.1)
ag.write("WAV:DATA?")
dados = ag.read()
sleep(.2)
retiraTensao(ag)
#print dados
t, R = lerEndian(dados)
V = convUNI(R)
plt.grid()
plt.plot(range(0, t), V)
return t, V
def disparaTensao(ag):
"""
Envia um pulso de alta tensão para o sistema de aterramento.
Acionando para isto o primeiro 555.
Os pulso não deve ser enviando em um curto intervalo de tempo
já que a fonte não foi projetada para tal situaçao.
Portanto deve-se tormar cuidado no acionamento sequencia.
SET - Pino 68 na placa U2901-60602
RESET - Pino 34 na placa U2901-60602
"""
ag.write("CONF:DIG:DIR OUTP,(@501)")
ag.write("SOUR:DIG:DATA 1,(@501)")
return 0
def retiraTensao(ag):
"""
Reseta a fonte. Habilitando a mesma para um novo envio
de um pulso de alta tensão.
"""
ag.write("CONF:DIG:DIR OUTP,(@501)")
ag.write("SOUR:DIG:DATA 0,(@501)") # desabilita o set
sleep(0.1) # espera um tempo para resetar
ag.write("SOUR:DIG:DATA 2,(@501)") # reseta a fonte
sleep(0.1) # espera um tempo para entrar em repouso
ag.write("SOUR:DIG:DATA 0,(@501)") # entra em repouso
return 0
def pltTensaoCorrente(V, I):
t1 = np.arange(0, len(V))
plt.figure(1)
plt.title("Leitura do U2531A")
plt.subplot(211)
plt.plot(t1, V)
plt.subplot(212)
plt.plot(t1, I)
plt.show()
def aplicaCorrecoes(V, I):
V = np.array(V)
V = FATOR_CORRECAO_TENSAO * V
I = np.array(I)
I = FATOR_CORRECAO_CORRENTE * I
return [V, I]
def sequenciaAquisicoes(ag, quantidade, local="C:\\Temp", rotulo = '0'):
"""
Faz um aquisiçao sequencial dos canais de tensão e corrente.
ag = objeto usada para o controle da placa
"""
print "Iniciando aquisicao sequencial"
print "Equipamento = ", ag
print "quantidade = ", quantidade
print "Tempo de inicio = ", asctime()
tempoInicio = time()
contagem = quantidade
plt.figure(1)
while quantidade > 0:
print "Atual = ", quantidade
tempoIndividual = time()
# inicia aquisição
raw, V, I = lerTensaoCorrente(ag)
V, I = aplicaCorrecoes(V, I)
# não é uma boa ideia plotar desta forma
#pltTensaoCorrente(V, I)
plt.subplot(211)
plt.plot(np.arange(0, len(V)), V)
plt.subplot(212)
plt.plot(np.arange(0, len(I)), I)
salvaTensaoTXT(local, rotulo, contagem-quantidade+1, V)
salvaCorrenteTXT(local, rotulo, contagem-quantidade+1, I)
print "Individual = ", time()-tempoIndividual
quantidade -=1
total = time()-tempoInicio
print 'Completo em [seg]: ', total
plt.show()
return 0
def salvaTensaoTXT(local, rotulo, posicao, V):
"""
Salva o vetor tensão em um arquivo com nome formatado para isso
"""
nomeCompleto = local+"\\"+rotulo+"V"+str(posicao)+".txt"
return salvaTXT(nomeCompleto, V)
def salvaCorrenteTXT(local, rotulo, posicao, I):
"""
Salva o vetor corrente em um arquivo com nome formatado para isso
"""
nomeCompleto = local+"\\"+rotulo+"I"+str(posicao)+".txt"
return salvaTXT(nomeCompleto, I)
def salvaTXT(caminhoCompleto, vetor):
"""
Salva em um arquivo txt os valores de um vetor
onde a primeira coluna informa o indice e a segunda
coluna informa o valor para o indice.
"""
try:
arquivo = open(caminhoCompleto, 'w')
except:
print 'erro: nao foi possivel escrever no arquivo'
print ' : ', caminhoCompleto
return -1
#for i in range(len(vetor)):
# string = "%d %f\n" % (i, float(vetor[i]))
# arquivo.write(string)
for i in vetor:
arquivo.write(i)
arquivo.close()
# escrita finalizada com sucesso
return 0
def buscaAgilent():
"""
Busca o equipamento conectado a porta usb do computador
Retornando o objeto a ser usado pelas funções de controle
da placa de aquisiçao da agilent.
"""
listaInstrumentos = visa.get_instruments_list() # adquiri a lista de equipamentos conectados ao computador
listaAgilent = listaInstrumentos[0] # pega o primeiro equipamento
print 'Lista de instrumentos:'
print listaAgilent # espera-se que o equipamento seja da agilent
ag = visa.instrument(listaAgilent) # cria um objeto a ser manipulado e passado para as outras funções
identificacao = ag.ask("*IDN?")
print identificacao
return ag
###############################################################################
# MAIN #
###############################################################################
if __name__ == '__main__':
print 'Agilente U3125A'
ag = buscaAgilent()
##############################
# leitura de apenas um canal #
##############################
#lerCanal103(ag)
#lerTensao(ag)
#lerCorrente(ag)
##########################
# leitura de dois canais #
##########################
raw, V, I = lerTensaoCorrente(ag)
V, I = aplicaCorrecoes(V, I)
pltTensaoCorrente(V, I)
#########################
# Aquisiçoes sequencial #
#########################
# 60 aquisições
# local onde é salvo "C:\Temp"
#sequenciaAquisicoes(ag, 10)
| apache-2.0 |
xyguo/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 53 | 2668 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_val_score(pipeline, X[:, np.newaxis], y,
scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
cgarrard/osgeopy-code | Chapter13/listing13_4.py | 1 | 1939 | # Script to draw world countries as patches.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
from osgeo import ogr
def order_coords(coords, clockwise):
"""Orders coordinates."""
total = 0
x1, y1 = coords[0]
for x, y in coords[1:]:
total += (x - x1) * (y + y1)
x1, y1 = x, y
x, y = coords[0]
total += (x - x1) * (y + y1)
is_clockwise = total > 0
if clockwise != is_clockwise:
coords.reverse()
return coords
def make_codes(n):
"""Makes a list of path codes."""
codes = [Path.LINETO] * n
codes[0] = Path.MOVETO
return codes
def plot_polygon_patch(poly, color):
"""Plots a polygon as a patch."""
# Outer clockwise path.
coords = poly.GetGeometryRef(0).GetPoints()
coords = order_coords(coords, True)
codes = make_codes(len(coords))
for i in range(1, poly.GetGeometryCount()):
# Inner counter-clockwise paths.
coords2 = poly.GetGeometryRef(i).GetPoints()
coords2 = order_coords(coords2, False)
codes2 = make_codes(len(coords2))
# Concatenate the paths.
coords = np.concatenate((coords, coords2))
codes = np.concatenate((codes, codes2))
# Add the patch to the plot
path = Path(coords, codes)
patch = patches.PathPatch(path, facecolor=color)
plt.axes().add_patch(patch)
# Loop through all of the features in the countries layer and create
# patches for the polygons.
ds = ogr.Open(r'D:\osgeopy-data\global\ne_110m_admin_0_countries.shp')
lyr = ds.GetLayer(0)
for row in lyr:
geom = row.geometry()
if geom.GetGeometryType() == ogr.wkbPolygon:
plot_polygon_patch(geom, 'yellow')
elif geom.GetGeometryType() == ogr.wkbMultiPolygon:
for i in range(geom.GetGeometryCount()):
plot_polygon_patch(geom.GetGeometryRef(i), 'yellow')
plt.axis('equal')
plt.show()
| mit |
phev8/ward-metrics | wardmetrics/visualisations.py | 1 | 16641 | import matplotlib.pyplot as plt
def plot_events_with_segment_scores(segment_results, ground_truth_events, detected_events, use_datetime_x=False, show=True):
"""
Test
:param segment_results:
:param ground_truth_events:
:param detected_events:
:param use_datetime_x:
:param show:
:return:
"""
fig = plt.figure(figsize=(10, 3))
a = 3
# TODO: convert times to datetime if flag is set
# write y axis labels for ground truth and detections
plt.yticks([0.2, 0.5, 0.8], ["detections", "segment score", "actual events"])
plt.ylim([0, 1])
for d in detected_events:
plt.axvspan(d[0], d[1], 0, 0.5)
for gt in ground_truth_events:
plt.axvspan(gt[0], gt[1], 0.5, 1)
for s in segment_results:
color = "black"
index_of_cat = 4
if s[index_of_cat] == "TP":
color = "green"
elif s[index_of_cat] == "FP":
color = "red"
elif s[index_of_cat] == "FN":
color = "yellow"
elif s[index_of_cat] == "TN":
color = "blue"
# TODO: format text nicely
plt.text((s[1]+s[0])/2, 0.8, s[2], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.2, s[3], horizontalalignment='center', verticalalignment='center')
plt.text((s[1]+s[0])/2, 0.5, s[5], horizontalalignment='center', verticalalignment='center')
plt.axvspan(s[0], s[1], 0.4, 0.6, color=color)
plt.axvline(s[0], color="black")
plt.axvline(s[1], color="black")
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_events_with_event_scores(gt_event_scores, detected_event_scores, ground_truth_events, detected_events, show=True):
fig = plt.figure(figsize=(10, 3))
for i in range(len(detected_events)):
d = detected_events[i]
plt.axvspan(d[0], d[1], 0, 0.5)
plt.text((d[1] + d[0]) / 2, 0.2, detected_event_scores[i], horizontalalignment='center', verticalalignment='center')
for i in range(len(ground_truth_events)):
gt = ground_truth_events[i]
plt.axvspan(gt[0], gt[1], 0.5, 1)
plt.text((gt[1] + gt[0]) / 2, 0.8, gt_event_scores[i], horizontalalignment='center', verticalalignment='center')
plt.tight_layout()
if show:
plt.show()
else:
plt.draw()
def plot_twoset_metrics(results, startangle=120):
fig1, axarr = plt.subplots(1, 2)
# plot positive rates:
labels_1 = ["tpr", "us", "ue", "fr", "dr"]
values_1 = [
results["tpr"],
results["us"],
results["ue"],
results["fr"],
results["dr"]
]
axarr[0].pie(values_1, labels=labels_1, autopct='%1.0f%%', startangle=startangle)
axarr[0].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
# plot negative rates:
labels_2 = ["1-fpr", "os", "oe", "mr", "ir"]
values_2 = [
1-results["fpr"],
results["os"],
results["oe"],
results["mr"],
results["ir"]
]
axarr[1].pie(values_2, labels=labels_2, autopct='%1.0f%%', startangle=startangle)
axarr[1].axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
# TODO: add title
plt.show()
def plot_segment_counts(results):
# TODO: add title
labels = results.keys()
values = []
for label in labels:
values.append(results[label])
#explode = (0, 0.1, 0, 0) # only "explode" the 2nd slice (i.e. 'Hogs')
total = sum(values)
fig1, ax1 = plt.subplots()
ax1.pie(values, labels=labels, autopct=lambda p: '{:.0f}'.format(p * total / 100), startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
plt.show()
def plot_event_analysis_diagram(event_results, **kwargs):
""" Plot the event analysis diagram (EAD) for the given results
Visualisation of the distribution of specific error types either with the actual event count or
showing the percentage of the total events. Elements of the plot can be adjusted (like color, fontsize etc.)
Args:
event_results (dictionary): Dictionary containing event counts for "total_gt", "total_det", "D", "F", "FM", "M",
"C", "M'", "FM'", "F'", "I'" as returned by core_methods.event_metrics' third value
Keyword Arguments:
fontsize (int): Size of the text inside the bar plot (Reduce the value if some event types are too short)
use_percentage (bool): whether percentage values or to show actual event counts on the chart (default: False)
show (bool): whether to call plt.show (blocking) or plt.draw() for later displaying (default: True)
color_deletion: any matplotlib color for deletion events
color_fragmented: any matplotlib color for fragmented ground truth events
color_fragmented_merged: any matplotlib color for merged and fragmented ground truth events
color_merged: any matplotlib color for merged ground truth events
color_correct: any matplotlib color for correct events
color_merging: any matplotlib color for merging detection events
color_merging_fragmenting: any matplotlib color for merging and fragmenting detection events
color_fragmenting: any matplotlib color for merging detection events
color_insertion: any matplotlib color for insertion events
Returns:
matplotlib Figure: matplotlib figure reference
"""
fig = plt.figure(figsize=(10, 2))
total = event_results["total_gt"] + event_results["total_det"] - event_results["C"]
# Layout settings:
y_min = 0.3
y_max = 0.7
width = 0.02
text_x_offset = 0
text_y_pos_1 = 0.55
text_y_pos_2 = 0.4
fontsize = kwargs.pop('fontsize', 10)
fontsize_extern = 12
use_percentage = kwargs.pop('use_percentage', False)
# Color settings:
cmap = plt.get_cmap("Paired")
color_deletion = kwargs.pop('color_deletion', cmap(4))
color_fragmented = kwargs.pop('color_fragmented', cmap(6))
color_fragmented_merged = kwargs.pop('color_fragmented_merged', cmap(0))
color_merged = kwargs.pop('color_merged', cmap(8))
color_correct = kwargs.pop('color_correct', cmap(3))
color_merging = kwargs.pop('color_merging', cmap(9))
color_merging_fragmenting = kwargs.pop('color_merging_fragmenting', cmap(1))
color_fragmenting = kwargs.pop('color_fragmenting', cmap(7))
color_insertion = kwargs.pop('color_insertion', cmap(5))
# Show deletions:
current_score = "D"
current_x_start = 0
current_x_end = event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_deletion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, "{:.0f}".format(event_results[current_score]*100/event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented events:
current_score = "F"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmented and merged events:
current_score = "FM"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmented_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merged events:
current_score = "M"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merged)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show correct events:
current_score = "C"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_correct)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_gt"]) + "%/" + "{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show merging detections:
current_score = "M'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting and merging detections:
current_score = "FM'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_merging_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show fragmenting detections:
current_score = "F'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_fragmenting)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Show insertions:
current_score = "I'"
current_x_start = current_x_end
current_x_end += event_results[current_score]
plt.axvspan(current_x_start, current_x_end, y_min, y_max, color=color_insertion)
if event_results[current_score] > 0:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_1, current_score, fontsize=fontsize,
horizontalalignment='center', verticalalignment='center')
if use_percentage:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2,
"{:.0f}".format(event_results[current_score] * 100 / event_results["total_det"]) + "%",
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
else:
plt.text((current_x_start + current_x_end) / 2 - text_x_offset, text_y_pos_2, str(event_results[current_score]),
fontsize=fontsize, horizontalalignment='center', verticalalignment='center')
# Draw line for total events:
plt.axvspan(0, event_results["total_gt"], y_max, y_max + width, color="black")
plt.axvspan( total - event_results["total_det"], total, y_min, y_min - width, color="black")
plt.text((0 + event_results["total_gt"]) / 2, 0.8, "Actual events (total=" + str(event_results["total_gt"]) + ")",
fontsize=fontsize_extern, horizontalalignment='center', verticalalignment='center')
plt.text((2*total - event_results["total_det"]) / 2, 0.18, "Detected events (total=" + str(event_results["total_det"]) + ")",
horizontalalignment='center', fontsize=fontsize_extern, verticalalignment='center')
plt.tight_layout()
if kwargs.pop('show', True):
plt.show()
else:
plt.draw()
return fig
| mit |
cloud9ers/gurumate | environment/share/doc/ipython/examples/parallel/options/mcpricer.py | 2 | 3552 | # <nbformat>2</nbformat>
# <markdowncell>
# # Parallel Monto-Carlo options pricing
# <markdowncell>
# ## Problem setup
# <codecell>
from __future__ import print_function
import sys
import time
from IPython.parallel import Client
import numpy as np
from mckernel import price_options
from matplotlib import pyplot as plt
# <codecell>
cluster_profile = "default"
price = 100.0 # Initial price
rate = 0.05 # Interest rate
days = 260 # Days to expiration
paths = 10000 # Number of MC paths
n_strikes = 6 # Number of strike values
min_strike = 90.0 # Min strike price
max_strike = 110.0 # Max strike price
n_sigmas = 5 # Number of volatility values
min_sigma = 0.1 # Min volatility
max_sigma = 0.4 # Max volatility
# <codecell>
strike_vals = np.linspace(min_strike, max_strike, n_strikes)
sigma_vals = np.linspace(min_sigma, max_sigma, n_sigmas)
# <markdowncell>
# ## Parallel computation across strike prices and volatilities
# <markdowncell>
# The Client is used to setup the calculation and works with all engines.
# <codecell>
c = Client(profile=cluster_profile)
# <markdowncell>
# A LoadBalancedView is an interface to the engines that provides dynamic load
# balancing at the expense of not knowing which engine will execute the code.
# <codecell>
view = c.load_balanced_view()
# <codecell>
print("Strike prices: ", strike_vals)
print("Volatilities: ", sigma_vals)
# <markdowncell>
# Submit tasks for each (strike, sigma) pair.
# <codecell>
t1 = time.time()
async_results = []
for strike in strike_vals:
for sigma in sigma_vals:
ar = view.apply_async(price_options, price, strike, sigma, rate, days, paths)
async_results.append(ar)
# <codecell>
print("Submitted tasks: ", len(async_results))
# <markdowncell>
# Block until all tasks are completed.
# <codecell>
c.wait(async_results)
t2 = time.time()
t = t2-t1
print("Parallel calculation completed, time = %s s" % t)
# <markdowncell>
# ## Process and visualize results
# <markdowncell>
# Get the results using the `get` method:
# <codecell>
results = [ar.get() for ar in async_results]
# <markdowncell>
# Assemble the result into a structured NumPy array.
# <codecell>
prices = np.empty(n_strikes*n_sigmas,
dtype=[('ecall',float),('eput',float),('acall',float),('aput',float)]
)
for i, price in enumerate(results):
prices[i] = tuple(price)
prices.shape = (n_strikes, n_sigmas)
# <markdowncell>
# Plot the value of the European call in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['ecall'])
plt.axis('tight')
plt.colorbar()
plt.title('European Call')
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <markdowncell>
# Plot the value of the Asian call in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['acall'])
plt.axis('tight')
plt.colorbar()
plt.title("Asian Call")
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <markdowncell>
# Plot the value of the European put in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['eput'])
plt.axis('tight')
plt.colorbar()
plt.title("European Put")
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <markdowncell>
# Plot the value of the Asian put in (volatility, strike) space.
# <codecell>
plt.figure()
plt.contourf(sigma_vals, strike_vals, prices['aput'])
plt.axis('tight')
plt.colorbar()
plt.title("Asian Put")
plt.xlabel("Volatility")
plt.ylabel("Strike Price")
# <codecell>
plt.show()
| lgpl-3.0 |
mattilyra/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
cactusbin/nyt | matplotlib/lib/matplotlib/tests/test_text.py | 2 | 6893 | from __future__ import print_function
import numpy as np
import matplotlib
from matplotlib.testing.decorators import image_comparison, knownfailureif, cleanup
import matplotlib.pyplot as plt
import warnings
from nose.tools import with_setup
@image_comparison(baseline_images=['font_styles'])
def test_font_styles():
from matplotlib import _get_data_path
data_path = _get_data_path()
def find_matplotlib_font(**kw):
prop = FontProperties(**kw)
path = findfont(prop, directory=data_path)
return FontProperties(fname=path)
from matplotlib.font_manager import FontProperties, findfont
warnings.filterwarnings('ignore','findfont: Font family \[\'Foo\'\] '+ \
'not found. Falling back to .',
UserWarning,
module='matplotlib.font_manager')
fig = plt.figure()
ax = plt.subplot( 1, 1, 1 )
normalFont = find_matplotlib_font( family = "sans-serif",
style = "normal",
variant = "normal",
size = 14,
)
ax.annotate( "Normal Font", (0.1, 0.1), xycoords='axes fraction',
fontproperties = normalFont )
boldFont = find_matplotlib_font( family = "Foo",
style = "normal",
variant = "normal",
weight = "bold",
stretch = 500,
size = 14,
)
ax.annotate( "Bold Font", (0.1, 0.2), xycoords='axes fraction',
fontproperties = boldFont )
boldItemFont = find_matplotlib_font( family = "sans serif",
style = "italic",
variant = "normal",
weight = 750,
stretch = 500,
size = 14,
)
ax.annotate( "Bold Italic Font", (0.1, 0.3), xycoords='axes fraction',
fontproperties = boldItemFont )
lightFont = find_matplotlib_font( family = "sans-serif",
style = "normal",
variant = "normal",
weight = 200,
stretch = 500,
size = 14,
)
ax.annotate( "Light Font", (0.1, 0.4), xycoords='axes fraction',
fontproperties = lightFont )
condensedFont = find_matplotlib_font( family = "sans-serif",
style = "normal",
variant = "normal",
weight = 500,
stretch = 100,
size = 14,
)
ax.annotate( "Condensed Font", (0.1, 0.5), xycoords='axes fraction',
fontproperties = condensedFont )
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['multiline'])
def test_multiline():
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title("multiline\ntext alignment")
plt.text(0.2, 0.5, "TpTpTp\n$M$\nTpTpTp", size=20,
ha="center", va="top")
plt.text(0.5, 0.5, "TpTpTp\n$M^{M^{M^{M}}}$\nTpTpTp", size=20,
ha="center", va="top")
plt.text(0.8, 0.5, "TpTpTp\n$M_{q_{q_{q}}}$\nTpTpTp", size=20,
ha="center", va="top")
plt.xlim(0, 1)
plt.ylim(0, 0.8)
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['antialiased'], extensions=['png'])
def test_antialiasing():
matplotlib.rcParams['text.antialiased'] = True
fig = plt.figure(figsize=(5.25, 0.75))
fig.text(0.5, 0.75, "antialiased", horizontalalignment='center',
verticalalignment='center')
fig.text(0.5, 0.25, "$\sqrt{x}$", horizontalalignment='center',
verticalalignment='center')
# NOTE: We don't need to restore the rcParams here, because the
# test cleanup will do it for us. In fact, if we do it here, it
# will turn antialiasing back off before the images are actually
# rendered.
def test_afm_kerning():
from matplotlib.afm import AFM
from matplotlib.font_manager import findfont
fn = findfont("Helvetica", fontext="afm")
with open(fn, 'rb') as fh:
afm = AFM(fh)
assert afm.string_width_height('VAVAVAVAVAVA') == (7174.0, 718)
@image_comparison(baseline_images=['text_contains'], extensions=['png'])
def test_contains():
import matplotlib.backend_bases as mbackend
fig = plt.figure()
ax = plt.axes()
mevent = mbackend.MouseEvent('button_press_event', fig.canvas, 0.5,
0.5, 1, None)
xs = np.linspace(0.25, 0.75, 30)
ys = np.linspace(0.25, 0.75, 30)
xs, ys = np.meshgrid(xs, ys)
txt = plt.text(0.48, 0.52, 'hello world', ha='center', fontsize=30,
rotation=30)
# uncomment to draw the text's bounding box
# txt.set_bbox(dict(edgecolor='black', facecolor='none'))
# draw the text. This is important, as the contains method can only work
# when a renderer exists.
plt.draw()
for x, y in zip(xs.flat, ys.flat):
mevent.x, mevent.y = plt.gca().transAxes.transform_point([x, y])
contains, _ = txt.contains(mevent)
color = 'yellow' if contains else 'red'
# capture the viewLim, plot a point, and reset the viewLim
vl = ax.viewLim.frozen()
ax.plot(x, y, 'o', color=color)
ax.viewLim.set(vl)
@image_comparison(baseline_images=['titles'])
def test_titles():
# left and right side titles
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
ax.set_title("left title", loc="left")
ax.set_title("right title", loc="right")
ax.set_xticks([])
ax.set_yticks([])
@image_comparison(baseline_images=['text_alignment'])
def test_alignment():
fig = plt.figure()
ax = plt.subplot(1, 1, 1)
x = 0.1
for rotation in (0, 30):
for alignment in ('top', 'bottom', 'baseline', 'center'):
ax.text(x, 0.5, alignment + " Tj", va=alignment, rotation=rotation,
bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
ax.text(x, 1.0, r'$\sum_{i=0}^{j}$', va=alignment, rotation=rotation)
x += 0.1
ax.plot([0, 1], [0.5, 0.5])
ax.plot([0, 1], [1.0, 1.0])
ax.set_xlim([0, 1])
ax.set_ylim([0, 1.5])
ax.set_xticks([])
ax.set_yticks([])
| unlicense |
spectralDNS/shenfun | docs/paper/CG/CGpaper_dirichlet.py | 1 | 8842 | """
This script has been used to compute the Dirichlet results of the paper
Efficient spectral-Galerkin methods for second-order equations using different Chebyshev bases
The results have been computed using Python 3.9 and Shenfun 3.1.1.
The generalized Chebyshev-Tau results are computed with dedalus,
and are as such not part of this script.
"""
import sympy as sp
import numpy as np
import scipy.sparse.linalg as lin
import array_to_latex as a2l
from time import time
x = sp.Symbol('x', real=True)
fe = {}
rnd = {}
func = {}
def matvec(u_hat, f_hat, A, B, alpha, method):
"""Compute matrix vector product
Parameters
----------
u_hat : Function
The solution array
f_hat : Function
The right hand side array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev, la
if method == 1:
if alpha == 0:
A.scale *= -1
f_hat = A.matvec(u_hat, f_hat)
A.scale *= -1
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
f_hat = sol.matvec(u_hat, f_hat)
else:
if alpha == 0:
A.scale *= -1
f_hat = A.matvec(u_hat, f_hat)
A.scale *= -1
else:
M = alpha*B - A
f_hat = M.matvec(u_hat, f_hat)
return f_hat
def get_solver(A, B, alpha, method):
"""Return optimal solver for given method
Parameters
----------
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import chebyshev, la
if method == 2:
if alpha == 0:
sol = la.TDMA(A*(-1))
else:
sol = la.PDMA(alpha*B - A)
elif method == 1:
if alpha == 0:
A.scale = -1
sol = chebyshev.la.ADD_Solve(A)
else:
sol = chebyshev.la.Helmholtz(A, B, -1, alpha)
elif method in (0, 3, 4):
if alpha == 0:
sol = chebyshev.la.TwoDMA(A*(-1))
else:
sol = chebyshev.la.FDMA(alpha*B-A)
elif method == 5:
if alpha == 0:
AA = A*(-1)
sol = AA.solve
else:
sol = la.TDMA(alpha*B-A)
else:
raise NotImplementedError
return sol
def solve(f_hat, u_hat, A, B, alpha, method):
"""Solve (alpha*B-A)u_hat = f_hat
Parameters
----------
f_hat : Function
The right hand side array
u_hat : Function
The solution array
A : SparseMatrix
The stiffness matrix
B : SparseMatrix
The mass matrix
alpha : number
The weight of the mass matrix
method : int
The chosen method
"""
from shenfun import extract_bc_matrices, Function
if isinstance(B, list):
u_hat.set_boundary_dofs()
bc_mat = extract_bc_matrices([B])
B = B[0]
w0 = Function(u_hat.function_space())
f_hat -= alpha*bc_mat[0].matvec(u_hat, w0)
sol = get_solver(A, B, alpha, method)
if method == 1 and alpha != 0:
u_hat = sol(u_hat, f_hat)
else:
u_hat = sol(f_hat, u_hat)
return u_hat
def main(N, method=0, alpha=0, returntype=0):
from shenfun import FunctionSpace, TrialFunction, TestFunction, \
inner, div, grad, chebyshev, SparseMatrix, Function, Array
global fe
basis = {0: ('ShenDirichlet', 'Heinrichs'),
1: ('ShenDirichlet', 'ShenDirichlet'),
2: ('Heinrichs', 'Heinrichs'),
3: ('DirichletU', 'ShenDirichlet'),
4: ('Orthogonal', 'ShenDirichlet'), # Quasi-Galerkin
5: ('ShenDirichlet', 'ShenDirichlet'), # Legendre
}
test, trial = basis[method]
if returntype == 2:
ue = sp.sin(100*sp.pi*x)
family = 'C' if method < 5 else 'L'
kw = {}
scaled = True if method in (0, 5) else False
if scaled:
kw['scaled'] = True
ST = FunctionSpace(N, family, basis=test, **kw)
TS = FunctionSpace(N, family, basis=trial, **kw)
wt = {0: 1, 1: 1, 2: 1, 3: 1-x**2, 4: 1, 5: 1}[method]
u = TrialFunction(TS)
v = TestFunction(ST)
A = inner(v*wt, div(grad(u)))
B = inner(v*wt, u)
if method == 4:
# Quasi
Q2 = chebyshev.quasi.QIGmat(N)
A = Q2*A
B = Q2*B
if method == 3:
k = np.arange(N-2)
K = SparseMatrix({0: 1/((k+1)*(k+2)*2)}, (N-2, N-2))
A[0] *= K[0]
A[2] *= K[0][:-2]
B[-2] *= K[0][2:]
B[0] *= K[0]
B[2] *= K[0][:-2]
B[4] *= K[0][:-4]
if returntype == 0:
M = alpha*B.diags()-A.diags()
con = np.linalg.cond(M.toarray())
elif returntype == 1:
# Use rnd to get the same random numbers for all methods
buf = rnd.get(N, np.random.random(N))
if not N in rnd:
rnd[N] = buf
v = Function(TS, buffer=buf)
v[-2:] = 0
u_hat = Function(TS)
f_hat = Function(TS)
f_hat = matvec(v, f_hat, A, B, alpha, method)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
con = np.abs(u_hat-v).max()
elif returntype == 2:
fe = alpha*ue - ue.diff(x, 2)
f_hat = Function(ST)
fj = Array(ST, buffer=fe)
if wt != 1:
fj *= np.sin((np.arange(N)+0.5)*np.pi/N)**2
f_hat = ST.scalar_product(fj, f_hat, fast_transform=True)
if method == 4:
f_hat[:-2] = Q2.diags('csc')*f_hat
if method == 3:
f_hat[:-2] *= K[0]
sol = get_solver(A, B, alpha, method)
u_hat = Function(TS)
u_hat = solve(f_hat, u_hat, A, B, alpha, method)
uj = Array(TS)
uj = TS.backward(u_hat, uj, fast_transform=True)
ua = Array(TS, buffer=ue)
con = np.sqrt(inner(1, (uj-ua)**2))
return con
if __name__ == '__main__':
import matplotlib.pyplot as plt
import argparse
import os
import sys
parser = argparse.ArgumentParser(description='Solve the Helmholtz problem with Dirichlet boundary conditions')
parser.add_argument('--return_type', action='store', type=int, required=True)
parser.add_argument('--include_legendre', action='store_true')
parser.add_argument('--verbose', '-v', action='count', default=0)
parser.add_argument('--plot', action='store_true')
parser.add_argument('--numba', action='store_true')
args = parser.parse_args()
if args.numba:
try:
import numba
os.environ['SHENFUN_OPTIMIZATION'] = 'NUMBA'
except ModuleNotFoundError:
os.warning('Numba not found - using Cython')
cond = []
if args.return_type == 2:
N = (2**4,2**6, 2**8, 2**12, 2**16, 2**20)
elif args.return_type == 1:
N = (2**4, 2**12, 2**20)
else:
N = (32, 64, 128, 256, 512, 1024, 2048)
M = 6 if args.include_legendre else 5
alphas = (0, 1000)
if args.return_type in (0, 2):
for alpha in alphas:
cond.append([])
if args.verbose > 0:
print('alpha =', alpha)
for basis in range(M): # To include Legendre use --include_legendre (takes hours for N=2**20)
if args.verbose > 1:
print('Method =', basis)
cond[-1].append([])
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1][-1].append(main(n, basis, alpha, args.return_type))
linestyle = {0: 'solid', 1: 'dashed', 2: 'dotted'}
for i in range(len(cond)):
plt.loglog(N, cond[i][0], 'b',
N, cond[i][1], 'r',
N, cond[i][2], 'k',
N, cond[i][3], 'm',
N, cond[i][4], 'y',
linestyle=linestyle[i])
if args.include_legendre:
plt.loglog(N, cond[i][5], 'y', linestyle=linestyle[i])
a2l.to_ltx(np.array(cond)[i], frmt='{:6.2e}', print_out=True, mathform=False)
else:
for basis in range(M):
cond.append([])
if args.verbose > 1:
print('Method =', basis)
for alpha in alphas:
if args.verbose > 0:
print('alpha =', alpha)
for n in N:
if args.verbose > 2:
print('N =', n)
cond[-1].append(main(n, basis, alpha, args.return_type))
a2l.to_ltx(np.array(cond), frmt='{:6.2e}', print_out=True, mathform=False)
if args.plot:
plt.show()
| bsd-2-clause |
tracierenea/gnuradio | gr-filter/examples/channelize.py | 58 | 7003 | #!/usr/bin/env python
#
# Copyright 2009,2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
import sys, time
try:
from gnuradio import analog
except ImportError:
sys.stderr.write("Error: Program requires gr-analog.\n")
sys.exit(1)
try:
import scipy
from scipy import fftpack
except ImportError:
sys.stderr.write("Error: Program requires scipy (see: www.scipy.org).\n")
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
sys.stderr.write("Error: Program requires matplotlib (see: matplotlib.sourceforge.net).\n")
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 2000000 # number of samples to use
self._fs = 1000 # initial sampling rate
self._M = M = 9 # Number of channels to channelize
self._ifs = M*self._fs # initial sampling rate
# Create a set of taps for the PFB channelizer
self._taps = filter.firdes.low_pass_2(1, self._ifs, 475.50, 50,
attenuation_dB=100,
window=filter.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._M))
print "Number of taps: ", len(self._taps)
print "Number of channels: ", self._M
print "Taps per channel: ", tpc
# Create a set of signals at different frequencies
# freqs lists the frequencies of the signals that get stored
# in the list "signals", which then get summed together
self.signals = list()
self.add = blocks.add_cc()
freqs = [-70, -50, -30, -10, 10, 20, 40, 60, 80]
for i in xrange(len(freqs)):
f = freqs[i] + (M/2-M+i+1)*self._fs
self.signals.append(analog.sig_source_c(self._ifs, analog.GR_SIN_WAVE, f, 1))
self.connect(self.signals[i], (self.add,i))
self.head = blocks.head(gr.sizeof_gr_complex, self._N)
# Construct the channelizer filter
self.pfb = filter.pfb.channelizer_ccf(self._M, self._taps, 1)
# Construct a vector sink for the input signal to the channelizer
self.snk_i = blocks.vector_sink_c()
# Connect the blocks
self.connect(self.add, self.head, self.pfb)
self.connect(self.add, self.snk_i)
# Use this to play with the channel mapping
#self.pfb.set_channel_map([5,6,7,8,0,1,2,3,4])
# Create a vector sink for each of M output channels of the filter and connect it
self.snks = list()
for i in xrange(self._M):
self.snks.append(blocks.vector_sink_c())
self.connect((self.pfb, i), self.snks[i])
def main():
tstart = time.time()
tb = pfb_top_block()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig_in = pylab.figure(1, figsize=(16,9), facecolor="w")
fig1 = pylab.figure(2, figsize=(16,9), facecolor="w")
fig2 = pylab.figure(3, figsize=(16,9), facecolor="w")
Ns = 1000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
fs = tb._ifs
# Plot the input signal on its own figure
d = tb.snk_i.data()[Ns:Ne]
spin_f = fig_in.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(X))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
pin_f = spin_f.plot(f_in, X_in, "b")
spin_f.set_xlim([min(f_in), max(f_in)+1])
spin_f.set_ylim([-200.0, 50.0])
spin_f.set_title("Input Signal", weight="bold")
spin_f.set_xlabel("Frequency (Hz)")
spin_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
spin_t = fig_in.add_subplot(2, 1, 2)
pin_t = spin_t.plot(t_in, x_in.real, "b")
pin_t = spin_t.plot(t_in, x_in.imag, "r")
spin_t.set_xlabel("Time (s)")
spin_t.set_ylabel("Amplitude")
Ncols = int(scipy.floor(scipy.sqrt(tb._M)))
Nrows = int(scipy.floor(tb._M / Ncols))
if(tb._M % Ncols != 0):
Nrows += 1
# Plot each of the channels outputs. Frequencies on Figure 2 and
# time signals on Figure 3
fs_o = tb._fs
Ts_o = 1.0/fs_o
Tmax_o = len(d)*Ts_o
for i in xrange(len(tb.snks)):
# remove issues with the transients at the beginning
# also remove some corruption at the end of the stream
# this is a bug, probably due to the corner cases
d = tb.snks[i].data()[Ns:Ne]
sp1_f = fig1.add_subplot(Nrows, Ncols, 1+i)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs_o,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(X))
f_o = scipy.arange(-fs_o/2.0, fs_o/2.0, fs_o/float(X_o.size))
p2_f = sp1_f.plot(f_o, X_o, "b")
sp1_f.set_xlim([min(f_o), max(f_o)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title(("Channel %d" % i), weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
x_o = scipy.array(d)
t_o = scipy.arange(0, Tmax_o, Ts_o)
sp2_o = fig2.add_subplot(Nrows, Ncols, 1+i)
p2_o = sp2_o.plot(t_o, x_o.real, "b")
p2_o = sp2_o.plot(t_o, x_o.imag, "r")
sp2_o.set_xlim([min(t_o), max(t_o)+1])
sp2_o.set_ylim([-2, 2])
sp2_o.set_title(("Channel %d" % i), weight="bold")
sp2_o.set_xlabel("Time (s)")
sp2_o.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
robwarm/gpaw-symm | tools/niflheim-agts.py | 1 | 5426 | import os
import sys
import glob
import shutil
import subprocess
def cmd(c):
x = os.system(c)
assert x == 0, c
def fail(subject, email=None, filename='/dev/null', mailer='mail'):
assert mailer in ['mailx', 'mail', 'mutt']
import os
if email is not None:
if filename == '/dev/null':
assert os.system('mail -s "%s" %s < %s' %
(subject, email, filename)) == 0
else: # attachments
filenames = filename.split()
if mailer == 'mailx': # new mailx (12?)
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('echo | mail %s -s "%s" %s' %
(attach, subject, email)) == 0
elif mailer == 'mail': # old mailx (8?)
attach = '('
for f in filenames:
ext = os.path.splitext(f)[-1]
if ext:
flog = os.path.basename(f).replace(ext, '.log')
else:
flog = f
attach += 'uuencode %s %s&&' % (f, flog)
# remove final &&
attach = attach[:-2]
attach += ')'
assert os.system('%s | mail -s "%s" %s' %
(attach, subject, email)) == 0
else: # mutt
attach = ''
for f in filenames:
attach += ' -a %s ' % f
# send with empty body
assert os.system('mutt %s -s "%s" %s < /dev/null' %
(attach, subject, email)) == 0
raise SystemExit
if '--dir' in sys.argv:
i = sys.argv.index('--dir')
dir = os.path.abspath(sys.argv[i+1])
else:
dir = 'agts'
if '--email' in sys.argv:
i = sys.argv.index('--email')
email = sys.argv[i+1]
else:
email = None
assert os.path.isdir(dir)
gpawdir = os.path.join(dir, 'gpaw')
# remove the old run directory
if os.path.isdir(dir):
shutil.rmtree(dir)
os.mkdir(dir)
os.chdir(dir)
cmd('svn checkout https://svn.fysik.dtu.dk/projects/gpaw/trunk gpaw')
# a version of gpaw is needed for imports from within this script!
cmd("\
cd " + gpawdir + "&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
python setup.py build_ext 2>&1 > build_ext.log")
# import gpaw from where it was installed
sys.path.insert(0, gpawdir)
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
module load openmpi/1.3.3-1.el5.fys.open64.4.2.3 && \
module load hdf5/1.8.6-5.el5.fys.open64.4.2.3.openmpi.1.3.3 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-xeon-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > thul.log' | ssh thul bash")
cmd("echo '\
cd '" + gpawdir + "'&& \
source /home/camp/modulefiles.sh&& \
module load NUMPY&& \
module load open64/4.2.3-0 && \
python setup.py --remove-default-flags --customize=\
doc/install/Linux/Niflheim/el5-opteron-open64-acml-4.4.0-acml-4.4.0-hdf-SL-2.0.1.py \
build_ext 2>&1 > fjorm.log' | ssh fjorm bash")
cmd("""wget --no-check-certificate --quiet \
http://wiki.fysik.dtu.dk/gpaw-files/gpaw-setups-latest.tar.gz && \
tar xzf gpaw-setups-latest.tar.gz && \
rm gpaw-setups-latest.tar.gz && \
mv gpaw-setups-[0-9]* gpaw/gpaw-setups""")
cmd('svn export https://svn.fysik.dtu.dk/projects/ase/trunk ase')
# ase needed
sys.path.insert(0, '%s/ase' % dir)
from gpaw.test.big.agts import AGTSQueue
from gpaw.test.big.niflheim import NiflheimCluster
queue = AGTSQueue()
queue.collect()
cluster = NiflheimCluster(asepath=os.path.join(dir, 'ase'),
setuppath=os.path.join(gpawdir, 'gpaw-setups'))
# Example below is confusing: job.script must NOT be the *.agts.py script,
# but the actual python script to be run!
# testsuite.agts.py does both: see gpaw/test/big/miscellaneous/testsuite.agts.py
#queue.jobs = [job for job in queue.jobs if job.script == 'testsuite.agts.py']
nfailed = queue.run(cluster)
gfiles = os.path.join(dir, 'gpaw-files')
if not os.path.isdir(gfiles):
os.mkdir(gfiles)
queue.copy_created_files(gfiles)
# make files readable by go
files = glob.glob(gfiles + '/*')
for f in files:
os.chmod(f, 0644)
from gpaw.version import version
subject = 'AGTS GPAW %s: ' % str(version)
# Send mail:
sfile = os.path.join(dir, 'status.log')
attach = sfile
if not nfailed:
subject += ' succeeded'
fail(subject, email, attach, mailer='mutt')
else:
subject += ' failed'
# attach failed tests error files
ft = [l.split()[0] for l in open(sfile).readlines() if 'FAILED' in l]
for t in ft:
ef = glob.glob(os.path.join(dir, t) + '.e*')
for f in ef:
attach += ' ' + f
fail(subject, email, attach, mailer='mutt')
if 0:
# Analysis:
import matplotlib
matplotlib.use('Agg')
from gpaw.test.big.analysis import analyse
user = os.environ['USER']
analyse(queue,
'../analysis/analyse.pickle', # file keeping history
'../analysis', # Where to dump figures
rev=niflheim.revision,
#mailto='gpaw-developers@listserv.fysik.dtu.dk',
mailserver='servfys.fysik.dtu.dk',
attachment='status.log')
| gpl-3.0 |
duncanmmacleod/gwpy | gwpy/plot/axes.py | 1 | 21895 | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Extension of `~matplotlib.axes.Axes` for gwpy
"""
import warnings
from functools import wraps
from math import log
from numbers import Number
import numpy
from astropy.time import Time
from matplotlib import rcParams
from matplotlib.artist import allow_rasterization
from matplotlib.axes import Axes as _Axes
from matplotlib.axes._base import _process_plot_var_args
from matplotlib.collections import PolyCollection
from matplotlib.lines import Line2D
from matplotlib.projections import register_projection
from . import (Plot, colorbar as gcbar)
from .colors import format_norm
from .gps import GPS_SCALES
from .legend import HandlerLine2D
from ..time import to_gps
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
def log_norm(func):
"""Wrap ``func`` to handle custom gwpy keywords for a LogNorm colouring
"""
@wraps(func)
def decorated_func(*args, **kwargs):
norm, kwargs = format_norm(kwargs)
kwargs['norm'] = norm
return func(*args, **kwargs)
return decorated_func
def xlim_as_gps(func):
"""Wrap ``func`` to handle pass limit inputs through `gwpy.time.to_gps`
"""
@wraps(func)
def wrapped_func(self, left=None, right=None, **kw):
if right is None and numpy.iterable(left):
left, right = left
kw['left'] = left
kw['right'] = right
gpsscale = self.get_xscale() in GPS_SCALES
for key in ('left', 'right'):
if gpsscale:
try:
kw[key] = numpy.longdouble(str(to_gps(kw[key])))
except TypeError:
pass
return func(self, **kw)
return wrapped_func
def restore_grid(func):
"""Wrap ``func`` to preserve the Axes current grid settings.
"""
@wraps(func)
def wrapped_func(self, *args, **kwargs):
try:
grid = (
self.xaxis._minor_tick_kw["gridOn"],
self.xaxis._major_tick_kw["gridOn"],
self.yaxis._minor_tick_kw["gridOn"],
self.yaxis._major_tick_kw["gridOn"],
)
except KeyError: # matplotlib < 3.3.3
grid = (self.xaxis._gridOnMinor, self.xaxis._gridOnMajor,
self.yaxis._gridOnMinor, self.yaxis._gridOnMajor)
try:
return func(self, *args, **kwargs)
finally:
# reset grid
self.xaxis.grid(grid[0], which="minor")
self.xaxis.grid(grid[1], which="major")
self.yaxis.grid(grid[2], which="minor")
self.yaxis.grid(grid[3], which="major")
return wrapped_func
# -- new Axes -----------------------------------------------------------------
class Axes(_Axes):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# handle Series in `ax.plot()`
self._get_lines = PlotArgsProcessor(self)
# reset data formatters (for interactive plots) to support
# GPS time display
self.fmt_xdata = self._fmt_xdata
self.fmt_ydata = self._fmt_ydata
@allow_rasterization
def draw(self, *args, **kwargs):
labels = {}
for ax in (self.xaxis, self.yaxis):
if ax.get_scale() in GPS_SCALES and ax.isDefault_label:
labels[ax] = ax.get_label_text()
trans = ax.get_transform()
epoch = float(trans.get_epoch())
unit = trans.get_unit_name()
iso = Time(epoch, format='gps', scale='utc').iso
utc = iso.rstrip('0').rstrip('.')
ax.set_label_text('Time [{0!s}] from {1!s} UTC ({2!r})'.format(
unit, utc, epoch))
try:
super().draw(*args, **kwargs)
finally:
for ax in labels: # reset labels
ax.isDefault_label = True
# -- auto-gps helpers -----------------------
def _fmt_xdata(self, x):
if self.get_xscale() in GPS_SCALES:
return str(to_gps(x))
return self.xaxis.get_major_formatter().format_data_short(x)
def _fmt_ydata(self, y):
if self.get_yscale() in GPS_SCALES:
return str(to_gps(y))
return self.yaxis.get_major_formatter().format_data_short(y)
set_xlim = xlim_as_gps(_Axes.set_xlim)
def set_epoch(self, epoch):
"""Set the epoch for the current GPS scale.
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
Parameters
----------
epoch : `float`, `str`
GPS-compatible time or date object, anything parseable by
:func:`~gwpy.time.to_gps` is fine.
"""
scale = self.get_xscale()
return self.set_xscale(scale, epoch=epoch)
def get_epoch(self):
"""Return the epoch for the current GPS scale/
This method will fail if the current X-axis scale isn't one of
the GPS scales. See :ref:`gwpy-plot-gps` for more details.
"""
return self.get_xaxis().get_transform().get_epoch()
# -- overloaded plotting methods ------------
def scatter(self, x, y, c=None, **kwargs):
# scatter with auto-sorting by colour
try:
if c is None:
raise ValueError
c_array = numpy.asanyarray(c, dtype=float)
except ValueError: # no colour array
pass
else:
c_sort = kwargs.pop('c_sort', True)
if c_sort:
sortidx = c_array.argsort()
x = numpy.asarray(x)[sortidx]
y = numpy.asarray(y)[sortidx]
c = numpy.asarray(c)[sortidx]
return super().scatter(x, y, c=c, **kwargs)
scatter.__doc__ = _Axes.scatter.__doc__.replace(
'marker :',
'c_sort : `bool`, optional, default: True\n'
' Sort scatter points by `c` array value, if given.\n\n'
'marker :',
)
@log_norm
def imshow(self, array, *args, **kwargs):
"""Display an image, i.e. data on a 2D regular raster.
If ``array`` is a :class:`~gwpy.types.Array2D` (e.g. a
:class:`~gwpy.spectrogram.Spectrogram`), then the defaults are
_different_ to those in the upstream
:meth:`~matplotlib.axes.Axes.imshow` method. Namely, the defaults are
- ``origin='lower'`` (coordinates start in lower-left corner)
- ``aspect='auto'`` (pixels are not forced to be square)
- ``interpolation='none'`` (no image interpolation is used)
In all other usage, the defaults from the upstream matplotlib method
are unchanged.
Parameters
----------
array : array-like or PIL image
The image data.
*args, **kwargs
All arguments and keywords are passed to the inherited
:meth:`~matplotlib.axes.Axes.imshow` method.
See also
--------
matplotlib.axes.Axes.imshow
for details of the image rendering
"""
if hasattr(array, "yspan"): # Array2D
return self._imshow_array2d(array, *args, **kwargs)
image = super().imshow(array, *args, **kwargs)
self.autoscale(enable=None, axis='both', tight=None)
return image
def _imshow_array2d(self, array, origin='lower', interpolation='none',
aspect='auto', **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.imshow`
"""
# NOTE: If you change the defaults for this method, please update
# the docstring for `imshow` above.
# calculate extent
extent = tuple(array.xspan) + tuple(array.yspan)
if self.get_xscale() == 'log' and extent[0] == 0.:
extent = (1e-300,) + extent[1:]
if self.get_yscale() == 'log' and extent[2] == 0.:
extent = extent[:2] + (1e-300,) + extent[3:]
kwargs.setdefault('extent', extent)
return self.imshow(array.value.T, origin=origin, aspect=aspect,
interpolation=interpolation, **kwargs)
@restore_grid
@log_norm
def pcolormesh(self, *args, **kwargs):
"""Create a pseudocolor plot with a non-regular rectangular grid.
When using GWpy, this method can be called with a single argument
that is an :class:`~gwpy.types.Array2D`, for which the ``X`` and ``Y``
coordinate arrays will be determined from the indexing.
In all other usage, all ``args`` and ``kwargs`` are passed directly
to :meth:`~matplotlib.axes.Axes.pcolormesh`.
Notes
-----
Unlike the upstream :meth:`matplotlib.axes.Axes.pcolormesh`,
this method respects the current grid settings.
See also
--------
matplotlib.axes.Axes.pcolormesh
"""
if len(args) == 1 and hasattr(args[0], "yindex"): # Array2D
return self._pcolormesh_array2d(*args, **kwargs)
return super().pcolormesh(*args, **kwargs)
def _pcolormesh_array2d(self, array, *args, **kwargs):
"""Render an `~gwpy.types.Array2D` using `Axes.pcolormesh`
"""
x = numpy.concatenate((array.xindex.value, array.xspan[-1:]))
y = numpy.concatenate((array.yindex.value, array.yspan[-1:]))
xcoord, ycoord = numpy.meshgrid(x, y, copy=False, sparse=True)
return self.pcolormesh(xcoord, ycoord, array.value.T, *args, **kwargs)
def hist(self, x, *args, **kwargs):
x = numpy.asarray(x)
# re-format weights as array if given as float
weights = kwargs.get('weights', None)
if isinstance(weights, Number):
kwargs['weights'] = numpy.ones_like(x) * weights
# calculate log-spaced bins on-the-fly
if (kwargs.pop('logbins', False) and
not numpy.iterable(kwargs.get('bins', None))):
nbins = kwargs.get('bins', None) or rcParams.get('hist.bins', 30)
# get range
hrange = kwargs.pop('range', None)
if hrange is None:
try:
hrange = numpy.min(x), numpy.max(x)
except ValueError as exc:
if str(exc).startswith('zero-size array'): # no data
exc.args = ('cannot generate log-spaced histogram '
'bins for zero-size array, '
'please pass `bins` or `range` manually',)
raise
# log-scale the axis and extract the base
if kwargs.get('orientation') == 'horizontal':
self.set_yscale('log', nonposy='clip')
logbase = self.yaxis._scale.base
else:
self.set_xscale('log', nonposx='clip')
logbase = self.xaxis._scale.base
# generate the bins
kwargs['bins'] = numpy.logspace(
log(hrange[0], logbase), log(hrange[1], logbase),
nbins+1, endpoint=True)
return super().hist(x, *args, **kwargs)
hist.__doc__ = _Axes.hist.__doc__.replace(
'color :',
'logbins : boolean, optional\n'
' If ``True``, use logarithmically-spaced histogram bins.\n\n'
' Default is ``False``\n\n'
'color :')
# -- new plotting methods -------------------
def plot_mmm(self, data, lower=None, upper=None, **kwargs):
"""Plot a `Series` as a line, with a shaded region around it.
The ``data`` `Series` is drawn, while the ``lower`` and ``upper``
`Series` are plotted lightly below and above, with a fill
between them and the ``data``.
All three `Series` should have the same `~Series.index` array.
Parameters
----------
data : `~gwpy.types.Series`
Data to plot normally.
lower : `~gwpy.types.Series`
Lower boundary (on Y-axis) for shade.
upper : `~gwpy.types.Series`
Upper boundary (on Y-axis) for shade.
**kwargs
Any other keyword arguments acceptable for
:meth:`~matplotlib.Axes.plot`.
Returns
-------
artists : `tuple`
All of the drawn artists:
- `~matplotlib.lines.Line2d` for ``data``,
- `~matplotlib.lines.Line2D` for ``lower``, if given
- `~matplotlib.lines.Line2D` for ``upper``, if given
- `~matplitlib.collections.PolyCollection` for shading
See also
--------
matplotlib.axes.Axes.plot
for a full description of acceptable ``*args`` and ``**kwargs``
"""
alpha = kwargs.pop('alpha', .1)
# plot mean
line, = self.plot(data, **kwargs)
out = [line]
# modify keywords for shading
kwargs.update({
'label': '',
'linewidth': line.get_linewidth() / 2,
'color': line.get_color(),
'alpha': alpha * 2,
})
# plot lower and upper Series
fill = [data.xindex.value, data.value, data.value]
for i, bound in enumerate((lower, upper)):
if bound is not None:
out.extend(self.plot(bound, **kwargs))
fill[i+1] = bound.value
# fill between
out.append(self.fill_between(
*fill, alpha=alpha, color=kwargs['color'],
rasterized=kwargs.get('rasterized', True)))
return out
def tile(self, x, y, w, h, color=None,
anchor='center', edgecolors='face', linewidth=0.8,
**kwargs):
"""Plot rectanguler tiles based onto these `Axes`.
``x`` and ``y`` give the anchor point for each tile, with
``w`` and ``h`` giving the extent in the X and Y axis respectively.
Parameters
----------
x, y, w, h : `array_like`, shape (n, )
Input data
color : `array_like`, shape (n, )
Array of amplitudes for tile color
anchor : `str`, optional
Anchor point for tiles relative to ``(x, y)`` coordinates, one of
- ``'center'`` - center tile on ``(x, y)``
- ``'ll'`` - ``(x, y)`` defines lower-left corner of tile
- ``'lr'`` - ``(x, y)`` defines lower-right corner of tile
- ``'ul'`` - ``(x, y)`` defines upper-left corner of tile
- ``'ur'`` - ``(x, y)`` defines upper-right corner of tile
**kwargs
Other keywords are passed to
:meth:`~matplotlib.collections.PolyCollection`
Returns
-------
collection : `~matplotlib.collections.PolyCollection`
the collection of tiles drawn
Examples
--------
>>> import numpy
>>> from matplotlib import pyplot
>>> import gwpy.plot # to get gwpy's Axes
>>> x = numpy.arange(10)
>>> y = numpy.arange(x.size)
>>> w = numpy.ones_like(x) * .8
>>> h = numpy.ones_like(x) * .8
>>> fig = pyplot.figure()
>>> ax = fig.gca()
>>> ax.tile(x, y, w, h, anchor='ll')
>>> pyplot.show()
"""
# get color and sort
if color is not None and kwargs.get('c_sort', True):
sortidx = color.argsort()
x = x[sortidx]
y = y[sortidx]
w = w[sortidx]
h = h[sortidx]
color = color[sortidx]
# define how to make a polygon for each tile
if anchor == 'll':
def _poly(x, y, w, h):
return ((x, y), (x, y+h), (x+w, y+h), (x+w, y))
elif anchor == 'lr':
def _poly(x, y, w, h):
return ((x-w, y), (x-w, y+h), (x, y+h), (x, y))
elif anchor == 'ul':
def _poly(x, y, w, h):
return ((x, y-h), (x, y), (x+w, y), (x+w, y-h))
elif anchor == 'ur':
def _poly(x, y, w, h):
return ((x-w, y-h), (x-w, y), (x, y), (x, y-h))
elif anchor == 'center':
def _poly(x, y, w, h):
return ((x-w/2., y-h/2.), (x-w/2., y+h/2.),
(x+w/2., y+h/2.), (x+w/2., y-h/2.))
else:
raise ValueError("Unrecognised tile anchor {!r}".format(anchor))
# build collection
cmap = kwargs.pop('cmap', rcParams['image.cmap'])
coll = PolyCollection((_poly(*tile) for tile in zip(x, y, w, h)),
edgecolors=edgecolors, linewidth=linewidth,
**kwargs)
if color is not None:
coll.set_array(color)
coll.set_cmap(cmap)
out = self.add_collection(coll)
self.autoscale_view()
return out
# -- overloaded auxiliary methods -----------
def legend(self, *args, **kwargs):
# handle deprecated keywords
linewidth = kwargs.pop("linewidth", None)
if linewidth:
warnings.warn(
"the linewidth keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"please update your code to use a custom legend handler, "
"e.g. gwpy.plot.legend.HandlerLine2D.",
DeprecationWarning,
)
alpha = kwargs.pop("alpha", None)
if alpha:
kwargs.setdefault("framealpha", alpha)
warnings.warn(
"the alpha keyword to gwpy.plot.Axes.legend has been "
"deprecated and will be removed in a future release; "
"use framealpha instead.",
DeprecationWarning,
)
# build custom handler
handler_map = kwargs.setdefault("handler_map", dict())
if isinstance(handler_map, dict):
handler_map.setdefault(Line2D, HandlerLine2D(linewidth or 6))
# create legend
return super().legend(*args, **kwargs)
legend.__doc__ = _Axes.legend.__doc__.replace(
"Call signatures",
""".. note::
This method uses a custom default legend handler for
`~matplotlib.lines.Line2D` objects, with increased linewidth relative
to the upstream :meth:`~matplotlib.axes.Axes.legend` method.
To disable this, pass ``handler_map=None``, or create and pass your
own handler class. See :ref:`gwpy-plot-legend` for more details.
Call signatures""",
)
def colorbar(self, mappable=None, **kwargs):
"""Add a `~matplotlib.colorbar.Colorbar` to these `Axes`
Parameters
----------
mappable : matplotlib data collection, optional
collection against which to map the colouring, default will
be the last added mappable artist (collection or image)
fraction : `float`, optional
fraction of space to steal from these `Axes` to make space
for the new axes, default is ``0.`` if ``use_axesgrid=True``
is given (default), otherwise default is ``.15`` to match
the upstream matplotlib default.
**kwargs
other keyword arguments to be passed to the
:meth:`Plot.colorbar` generator
Returns
-------
cbar : `~matplotlib.colorbar.Colorbar`
the newly added `Colorbar`
See also
--------
Plot.colorbar
"""
fig = self.get_figure()
if kwargs.get('use_axesgrid', True):
kwargs.setdefault('fraction', 0.)
if kwargs.get('fraction', 0.) == 0.:
kwargs.setdefault('use_axesgrid', True)
mappable, kwargs = gcbar.process_colorbar_kwargs(
fig, mappable=mappable, ax=self, **kwargs)
if isinstance(fig, Plot):
# either we have created colorbar Axes using axesgrid1, or
# the user already gave use_axesgrid=False, so we forcefully
# disable axesgrid here in case fraction == 0., which causes
# gridspec colorbars to fail.
kwargs['use_axesgrid'] = False
return fig.colorbar(mappable, **kwargs)
# override default Axes with this one by registering a projection with the
# same name
register_projection(Axes)
# -- overload Axes.plot() to handle Series ------------------------------------
class PlotArgsProcessor(_process_plot_var_args):
"""This class controls how ax.plot() works
"""
def __call__(self, *args, **kwargs):
"""Find `Series` data in `plot()` args and unwrap
"""
newargs = []
while args:
# strip first argument
this, args = args[:1], args[1:]
# it its a 1-D Series, then parse it as (xindex, value)
if hasattr(this[0], "xindex") and this[0].ndim == 1:
this = (this[0].xindex.value, this[0].value)
# otherwise treat as normal (must be a second argument)
else:
this += args[:1]
args = args[1:]
# allow colour specs
if args and isinstance(args[0], str):
this += args[0],
args = args[1:]
newargs.extend(this)
return super().__call__(*newargs, **kwargs)
| gpl-3.0 |
phronesis-mnemosyne/census-schema-alignment | wit/wit/dev/authorship-embedding.py | 1 | 4685 | import pandas as pd
import urllib2
from pprint import pprint
from matplotlib import pyplot as plt
from bs4 import BeautifulSoup
from hashlib import md5
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
pd.set_option('display.max_rows', 50)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 120)
np.set_printoptions(linewidth=250)
# May need to add things here to make this run the same way each time
np.random.seed(123)
# --
num_features = 10000 # Words
max_len = 100 # Words
formatter = KerasFormatter(num_features, max_len)
# --
# Load data
orig = pd.read_csv('/Users/BenJohnson/projects/laundering/sec/edward/analysis/crowdsar/crowdsar_user.csv', sep = '|', header = None)
orig.columns = ('hash', 'obj')
orig['id'] = 0
# Get
frequent_posters = orig.hash.value_counts().head(100).index
nfrequent_posters = orig.hash.value_counts().head(100).tail(25).index
sub = orig[orig.hash.isin(frequent_posters)]
sel = np.random.uniform(0, 1, sub.shape[0]) > .9
sub = sub[sel].drop_duplicates()
sel2 = np.random.uniform(0, 1, sub.shape[0]) > .5
df = sub[sel2]
tdf = sub[~sel2]
tdf2 = orig[orig.hash.isin(nfrequent_posters)].drop_duplicates()
sel3 = np.random.uniform(0, 1, tdf2.shape[0]) > .9
tdf2 = tdf2[sel3]
# --
train = make_triplet_train(df, N = 500)
trn, trn_levs = formatter.format(train, ['obj'], 'hash')
awl, awl_levs = formatter.format(train.drop_duplicates(), ['obj'], 'hash')
# tst, tst_levs = formatter.format(tdf, ['obj'], 'hash')
out, out_levs = formatter.format(tdf2, ['obj'], 'hash')
# --
# Define model
recurrent_size = 64
dense_size = 16
model = Sequential()
model.add(Embedding(num_features, recurrent_size))
model.add(LSTM(recurrent_size, return_sequences = True))
model.add(LSTM(recurrent_size))
model.add(Dense(dense_size))
model.add(Activation('unit_norm'))
model.compile(loss = 'triplet_cosine', optimizer = 'adam')
# --
# Train model
for i in range(60):
ms = modsel(train.shape[0], N = 3)
fitting = model.fit(
trn['x'][0][ms], trn['x'][0][ms],
nb_epoch = 3,
batch_size = 3 * 250,
shuffle = False
)
json_string = model.to_json()
open('author2_architecture.json', 'w').write(json_string)
model.save_weights('author2_weights.h5')
tr_preds = model.predict(awl['x'][0], verbose = True, batch_size = 250)
colors = awl['y'].argmax(1)
plt.scatter(tr_preds[:,0], tr_preds[:,1], c = colors)
plt.show()
# ------------------------------------------------
# Load pretrained model
#
# from keras.models import model_from_json
# model = model_from_json(open('author_architecture.json').read())
# model.load_weights('author_weights.h5')
# <<
shp = awl['y'].shape[1]
amax = awl['y'].argmax(1)
sims = np.zeros( (awl['y'].shape[1], awl['y'].shape[1]) )
tmps = [tr_preds[amax == i] for i in range(shp)]
for i in range(shp):
print i
a = tmps[i]
for j in range(shp):
b = tmps[j]
mn = np.mean(np.dot(a, b.T) > .8)
sims[i][j] = mn
np.mean(np.max(sims, 0) - np.diag(sims))
np.mean(np.max(sims, 0) - sims)
np.mean(sims.argmax(1) == np.arange(sims.shape[0]))
# >>
ts_preds = model.predict(tst['x'][0], verbose = True, batch_size = 250)
tmpsel = np.random.choice(ts_preds.shape[0], 5000)
sim = np.dot(ts_preds[tmpsel], tr_preds.T)
np.mean(tst['y'].argmax(1)[tmpsel] == awl['y'].argmax(1)[sim.argmax(1)])
tdf[]
# --
out_preds = model.predict(out['x'][0], verbose = True, batch_size = 250)
outsims = np.dot(out_preds, out_preds.T)
shp = out['y'].shape[1]
amax = out['y'].argmax(1)
sims = np.zeros( (out['y'].shape[1], out['y'].shape[1]) )
tmps = [out_preds[amax == i] for i in range(shp)]
for i in range(shp):
print i
a = tmps[i]
for j in range(shp):
b = tmps[j]
mn = np.mean(np.dot(a, b.T) > .8)
sims[i][j] = mn
sims.argmax(1) == np.arange(sims.shape[0])
np.fill_diagonal(outsims, 0)
rowmax = outsims.argmax(1)
by_user = map(lambda K: np.mean(amax[rowmax[amax == K]] == K), range(out['y'].shape[1]))
pprint(by_user)
# >>
from sklearn.cluster import KMeans
lens = np.array(tdf2.obj.apply(lambda x: len(str(x))))
km = KMeans(n_clusters = 26)
cl = km.fit_predict(out_preds[lens > 100])
amax = out['y'][lens > 100].argmax(1)
pd.crosstab(cl, amax)
# <<
# --
out_preds = model.predict(out['x'][0], verbose = True, batch_size = 250)
sel = np.random.uniform(0, 1, out_preds.shape[0]) > .5
outsims = np.dot(out_preds[sel], out_preds[~sel].T)
amax1 = out['y'].argmax(1)[sel]
amax2 = out['y'].argmax(1)[~sel]
conf = pd.crosstab(amax1, amax2[outsims.argmax(1)])
np.mean(np.array(conf).argmax(1) == range(conf.shape[0]))
| apache-2.0 |
vbraga/irismatch | src/iris_detection.py | 2 | 1707 | #!/usr/bin/env python2.7
# Imported from https://gitorious.org/hough-circular-transform
# License: GPLv3
# Date: Fri, Mar 7 2014
import matplotlib.pyplot as plt
import matplotlib.patches as plt_patches
import houghcirculartransform as hct
import numpy as np
def detect_iris(filename):
"""
Example function call:
For a trickier example, load 'test2.png'!
(Can't fint the circle? Try the debug mode!)
((Pro tip: try lowering the threshold...))
"""
CH = hct.CircularHough()
raw_image = plt.imread(filename)
raw_image = raw_image[:,:,0] # get the first channel
print "[DEBUG] Image shape is: " + str(raw_image.shape)
min_size = min(raw_image.shape)
# 0.1 to 0.8 from Daugman paper
lower_bound = int(0.1 * min_size) / 2
upper_bound = int(0.8 * min_size) / 2
accumulator, radii = CH(raw_image, radii=np.arange(lower_bound, upper_bound, 3), threshold=0.01, binary=True, method='fft')
print "[DEBUG] Calling imshow"
plt.imshow(raw_image)
plt.title('Raw image (inverted)')
# Add appropriate circular patch to figure (thanks to MZ!):
for i, r in enumerate(radii):
# [Vitor] where i is the accumulator index and r is the radius
# accumulator a list of points
point = np.unravel_index(accumulator[i].argmax(), accumulator[i].shape)
try:
blob_circ = plt_patches.Circle((point[1], point[0]), r, fill=False, ec='red')
plt.gca().add_patch(blob_circ)
except ValueError:
print point, r
continue
# Fix axis distortion:
plt.axis('image')
plt.show()
if __name__ == '__main__':
detect_iris("../working-db/003L_3.png")
| gpl-2.0 |
rubikloud/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
bendalab/thunderfish | thunderfish/pulseplots.py | 3 | 38137 | """
Plot and save key steps in pulses.py for visualizing the alorithm.
"""
import glob
import numpy as np
from scipy import stats
from matplotlib import rcParams, gridspec, ticker
import matplotlib.pyplot as plt
try:
from matplotlib.colors import colorConverter as cc
except ImportError:
import matplotlib.colors as cc
try:
from matplotlib.colors import to_hex
except ImportError:
from matplotlib.colors import rgb2hex as to_hex
from matplotlib.patches import ConnectionPatch, Rectangle
from matplotlib.lines import Line2D
import warnings
def warn(*args, **kwargs):
"""
Ignore all warnings.
"""
pass
warnings.warn=warn
# plotting parameters and colors:
rcParams['font.family'] = 'monospace'
cmap = plt.get_cmap("Dark2")
c_g = cmap(0)
c_o = cmap(1)
c_grey = cmap(7)
cmap_pts = [cmap(2), cmap(3)]
def darker(color, saturation):
""" Make a color darker.
From bendalab/plottools package.
Parameters
----------
color: dict or matplotlib color spec
A matplotlib color (hex string, name color string, rgb tuple)
or a dictionary with an 'color' or 'facecolor' key.
saturation: float
The smaller the saturation, the darker the returned color.
A saturation of 0 returns black.
A saturation of 1 leaves the color untouched.
A saturation of 2 returns white.
Returns
-------
color: string or dictionary
The darker color as a hexadecimal RGB string (e.g. '#rrggbb').
If `color` is a dictionary, a copy of the dictionary is returned
with the value of 'color' or 'facecolor' set to the darker color.
"""
try:
c = color['color']
cd = dict(**color)
cd['color'] = darker(c, saturation)
return cd
except (KeyError, TypeError):
try:
c = color['facecolor']
cd = dict(**color)
cd['facecolor'] = darker(c, saturation)
return cd
except (KeyError, TypeError):
if saturation > 2:
sauration = 2
if saturation > 1:
return lighter(color, 2.0-saturation)
if saturation < 0:
saturation = 0
r, g, b = cc.to_rgb(color)
rd = r*saturation
gd = g*saturation
bd = b*saturation
return to_hex((rd, gd, bd)).upper()
def lighter(color, lightness):
"""Make a color lighter
From bendalab/plottools package.
Parameters
----------
color: dict or matplotlib color spec
A matplotlib color (hex string, name color string, rgb tuple)
or a dictionary with an 'color' or 'facecolor' key.
lightness: float
The smaller the lightness, the lighter the returned color.
A lightness of 0 returns white.
A lightness of 1 leaves the color untouched.
A lightness of 2 returns black.
Returns
-------
color: string or dict
The lighter color as a hexadecimal RGB string (e.g. '#rrggbb').
If `color` is a dictionary, a copy of the dictionary is returned
with the value of 'color' or 'facecolor' set to the lighter color.
"""
try:
c = color['color']
cd = dict(**color)
cd['color'] = lighter(c, lightness)
return cd
except (KeyError, TypeError):
try:
c = color['facecolor']
cd = dict(**color)
cd['facecolor'] = lighter(c, lightness)
return cd
except (KeyError, TypeError):
if lightness > 2:
lightness = 2
if lightness > 1:
return darker(color, 2.0-lightness)
if lightness < 0:
lightness = 0
r, g, b = cc.to_rgb(color)
rl = r + (1.0-lightness)*(1.0 - r)
gl = g + (1.0-lightness)*(1.0 - g)
bl = b + (1.0-lightness)*(1.0 - b)
return to_hex((rl, gl, bl)).upper()
def xscalebar(ax, x, y, width, wunit=None, wformat=None, ha='left', va='bottom',
lw=None, color=None, capsize=None, clw=None, **kwargs):
"""Horizontal scale bar with label.
From bendalab/plottools package.
Parameters
----------
ax: matplotlib axes
Axes where to draw the scale bar.
x: float
x-coordinate where to draw the scale bar in relative units of the axes.
y: float
y-coordinate where to draw the scale bar in relative units of the axes.
width: float
Length of the scale bar in units of the data's x-values.
wunit: string or None
Optional unit of the data's x-values.
wformat: string or None
Optional format string for formatting the label of the scale bar
or simply a string used for labeling the scale bar.
ha: 'left', 'right', or 'center'
Scale bar aligned left, right, or centered to (x, y)
va: 'top' or 'bottom'
Label of the scale bar either above or below the scale bar.
lw: int, float, None
Line width of the scale bar.
color: matplotlib color
Color of the scalebar.
capsize: float or None
If larger then zero draw cap lines at the ends of the bar.
The length of the lines is given in points (same unit as linewidth).
clw: int, float, None
Line width of the cap lines.
kwargs: key-word arguments
Passed on to `ax.text()` used to print the scale bar label.
"""
ax.autoscale(False)
# ax dimensions:
pixelx = np.abs(np.diff(ax.get_window_extent().get_points()[:,0]))[0]
pixely = np.abs(np.diff(ax.get_window_extent().get_points()[:,1]))[0]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
unitx = xmax - xmin
unity = ymax - ymin
dxu = np.abs(unitx)/pixelx
dyu = np.abs(unity)/pixely
# transform x, y from relative units to axis units:
x = xmin + x*unitx
y = ymin + y*unity
# bar length:
if wformat is None:
wformat = '%.0f'
if width < 1.0:
wformat = '%.1f'
try:
ls = wformat % width
width = float(ls)
except TypeError:
ls = wformat
# bar:
if ha == 'left':
x0 = x
x1 = x+width
elif ha == 'right':
x0 = x-width
x1 = x
else:
x0 = x-0.5*width
x1 = x+0.5*width
# line width:
if lw is None:
lw = 2
# color:
if color is None:
color = 'k'
# scalebar:
lh = ax.plot([x0, x1], [y, y], '-', color=color, lw=lw,
solid_capstyle='butt', clip_on=False)
# get y position of line in figure pixel coordinates:
ly = np.array(lh[0].get_window_extent(ax.get_figure().canvas.get_renderer()))[0,1]
# caps:
if capsize is None:
capsize = 0
if clw is None:
clw = 0.5
if capsize > 0.0:
dy = capsize*dyu
ax.plot([x0, x0], [y-dy, y+dy], '-', color=color, lw=clw,
solid_capstyle='butt', clip_on=False)
ax.plot([x1, x1], [y-dy, y+dy], '-', color=color, lw=clw,
solid_capstyle='butt', clip_on=False)
# label:
if wunit:
ls += u'\u2009%s' % wunit
if va == 'top':
th = ax.text(0.5*(x0+x1), y, ls, clip_on=False,
ha='center', va='bottom', **kwargs)
# get y coordinate of text bottom in figure pixel coordinates:
ty = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[0,1]
dty = ly+0.5*lw + 2.0 - ty
else:
th = ax.text(0.5*(x0+x1), y, ls, clip_on=False,
ha='center', va='top', **kwargs)
# get y coordinate of text bottom in figure pixel coordinates:
ty = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[1,1]
dty = ly-0.5*lw - 2.0 - ty
th.set_position((0.5*(x0+x1), y+dyu*dty))
return x0, x1, y
def yscalebar(ax, x, y, height, hunit=None, hformat=None, ha='left', va='bottom',
lw=None, color=None, capsize=None, clw=None, **kwargs):
"""Vertical scale bar with label.
From bendalab/plottools package.
Parameters
----------
ax: matplotlib axes
Axes where to draw the scale bar.
x: float
x-coordinate where to draw the scale bar in relative units of the axes.
y: float
y-coordinate where to draw the scale bar in relative units of the axes.
height: float
Length of the scale bar in units of the data's y-values.
hunit: string
Unit of the data's y-values.
hformat: string or None
Optional format string for formatting the label of the scale bar
or simply a string used for labeling the scale bar.
ha: 'left' or 'right'
Label of the scale bar either to the left or to the right
of the scale bar.
va: 'top', 'bottom', or 'center'
Scale bar aligned above, below, or centered on (x, y).
lw: int, float, None
Line width of the scale bar.
color: matplotlib color
Color of the scalebar.
capsize: float or None
If larger then zero draw cap lines at the ends of the bar.
The length of the lines is given in points (same unit as linewidth).
clw: int, float
Line width of the cap lines.
kwargs: key-word arguments
Passed on to `ax.text()` used to print the scale bar label.
"""
ax.autoscale(False)
# ax dimensions:
pixelx = np.abs(np.diff(ax.get_window_extent().get_points()[:,0]))[0]
pixely = np.abs(np.diff(ax.get_window_extent().get_points()[:,1]))[0]
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
unitx = xmax - xmin
unity = ymax - ymin
dxu = np.abs(unitx)/pixelx
dyu = np.abs(unity)/pixely
# transform x, y from relative units to axis units:
x = xmin + x*unitx
y = ymin + y*unity
# bar length:
if hformat is None:
hformat = '%.0f'
if height < 1.0:
hformat = '%.1f'
try:
ls = hformat % height
width = float(ls)
except TypeError:
ls = hformat
# bar:
if va == 'bottom':
y0 = y
y1 = y+height
elif va == 'top':
y0 = y-height
y1 = y
else:
y0 = y-0.5*height
y1 = y+0.5*height
# line width:
if lw is None:
lw = 2
# color:
if color is None:
color = 'k'
# scalebar:
lh = ax.plot([x, x], [y0, y1], '-', color=color, lw=lw,
solid_capstyle='butt', clip_on=False)
# get x position of line in figure pixel coordinates:
lx = np.array(lh[0].get_window_extent(ax.get_figure().canvas.get_renderer()))[0,0]
# caps:
if capsize is None:
capsize = 0
if clw is None:
clw = 0.5
if capsize > 0.0:
dx = capsize*dxu
ax.plot([x-dx, x+dx], [y0, y0], '-', color=color, lw=clw, solid_capstyle='butt',
clip_on=False)
ax.plot([x-dx, x+dx], [y1, y1], '-', color=color, lw=clw, solid_capstyle='butt',
clip_on=False)
# label:
if hunit:
ls += u'\u2009%s' % hunit
if ha == 'right':
th = ax.text(x, 0.5*(y0+y1), ls, clip_on=False, rotation=90.0,
ha='left', va='center', **kwargs)
# get x coordinate of text bottom in figure pixel coordinates:
tx = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[0,0]
dtx = lx+0.5*lw + 2.0 - tx
else:
th = ax.text(x, 0.5*(y0+y1), ls, clip_on=False, rotation=90.0,
ha='right', va='center', **kwargs)
# get x coordinate of text bottom in figure pixel coordinates:
tx = np.array(th.get_window_extent(ax.get_figure().canvas.get_renderer()))[1,0]
dtx = lx-0.5*lw - 1.0 - tx
th.set_position((x+dxu*dtx, 0.5*(y0+y1)))
return x, y0, y1
def arrowed_spines(ax, ms=10):
""" Spine with arrow on the y-axis of a plot.
Parameters
----------
ax : matplotlib figure axis
Axis on which the arrow should be plot.
"""
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
ax.scatter([xmin], [ymax], s=ms, marker='^', clip_on=False, color='k')
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
def loghist(ax, x, bmin, bmax, n, c, orientation='vertical', label=''):
""" Plot histogram with logarithmic scale.
Parameters
----------
ax : matplotlib axis
Axis to plot the histogram on.
x : numpy array
Input data for histogram.
bmin : float
Minimum value for the histogram bins.
bmax : float
Maximum value for the histogram bins.
n : int
Number of bins.
c : matplotlib color
Color of histogram.
orientation : string (optional)
Histogram orientation.
Defaults to 'vertical'.
label : string (optional)
Label for x.
Defaults to '' (no label).
Returns
-------
n : array
The values of the histogram bins.
bins : array
The edges of the bins.
patches : BarContainer
Container of individual artists used to create the histogram.
"""
return ax.hist(x, bins=np.exp(np.linspace(np.log(bmin), np.log(bmax), n)),
color=c, orientation=orientation, label=label)
def plot_all(data, eod_p_times, eod_tr_times, fs, mean_eods):
"""Quick way to view the output of extract_pulsefish in a single plot.
Parameters
----------
data: array
Recording data.
eod_p_times: array of ints
EOD peak indices.
eod_tr_times: array of ints
EOD trough indices.
fs: float
Samplerate.
mean_eods: list of numpy arrays
Mean EODs of each pulsefish found in the recording.
"""
fig = plt.figure(figsize=(10, 5))
if len(eod_p_times) > 0:
gs = gridspec.GridSpec(2, len(eod_p_times))
ax = fig.add_subplot(gs[0,:])
ax.plot(np.arange(len(data))/fs, data, c='k', alpha=0.3)
for i, (pt, tt) in enumerate(zip(eod_p_times, eod_tr_times)):
ax.plot(pt, data[(pt*fs).astype('int')], 'o', label=i+1, ms=10, c=cmap(i))
ax.plot(tt, data[(tt*fs).astype('int')], 'o', label=i+1, ms=10, c=cmap(i))
ax.set_xlabel('time [s]')
ax.set_ylabel('amplitude [V]')
for i, m in enumerate(mean_eods):
ax = fig.add_subplot(gs[1,i])
ax.plot(1000*m[0], 1000*m[1], c='k')
ax.fill_between(1000*m[0], 1000*(m[1]-m[2]), 1000*(m[1]+m[2]), color=cmap(i))
ax.set_xlabel('time [ms]')
ax.set_ylabel('amplitude [mV]')
else:
plt.plot(np.arange(len(data))/fs, data, c='k', alpha=0.3)
plt.tight_layout()
def plot_clustering(samplerate, eod_widths, eod_hights, eod_shapes, disc_masks, merge_masks):
"""Plot all clustering steps.
Plot clustering steps on width, height and shape. Then plot the remaining EODs after
the EOD assessment step and the EODs after the merge step.
Parameters
----------
samplerate : float
Samplerate of EOD snippets.
eod_widths : list of three 1D numpy arrays
The first list entry gives the unique labels of all width clusters as a list of ints.
The second list entry gives the width values for each EOD in samples as a
1D numpy array of ints.
The third list entry gives the width labels for each EOD as a 1D numpy array of ints.
eod_hights : nested lists (2 layers) of three 1D numpy arrays
The first list entry gives the unique labels of all height clusters as a list of ints
for each width cluster.
The second list entry gives the height values for each EOD as a 1D numpy array
of floats for each width cluster.
The third list entry gives the height labels for each EOD as a 1D numpy array
of ints for each width cluster.
eod_shapes : nested lists (3 layers) of three 1D numpy arrays
The first list entry gives the raw EOD snippets as a 2D numpy array for each
height cluster in a width cluster.
The second list entry gives the snippet PCA values for each EOD as a 2D numpy array
of floats for each height cluster in a width cluster.
The third list entry gives the shape labels for each EOD as a 1D numpy array of ints
for each height cluster in a width cluster.
disc_masks : Nested lists (two layers) of 1D numpy arrays
The masks of EODs that are discarded by the discarding step of the algorithm.
The masks are 1D boolean arrays where
instances that are set to True are discarded by the algorithm. Discarding masks
are saved in nested lists that represent the width and height clusters.
merge_masks : Nested lists (two layers) of 2D numpy arrays
The masks of EODs that are discarded by the merging step of the algorithm.
The masks are 2D boolean arrays where
for each sample point `i` either `merge_mask[i,0]` or `merge_mask[i,1]` is set to True.
Here, merge_mask[:,0] represents the
peak-centered clusters and `merge_mask[:,1]` represents the trough-centered clusters.
Merge masks are saved in nested lists
that represent the width and height clusters.
"""
# create figure + transparant figure.
fig = plt.figure(figsize=(12, 7))
transFigure = fig.transFigure.inverted()
# set up the figure layout
outer = gridspec.GridSpec(1, 5, width_ratios=[1, 1, 2, 1, 2], left=0.05, right=0.95)
# set titles for each clustering step
titles = ['1. Widths', '2. Heights', '3. Shape', '4. Pulse EODs', '5. Merge']
for i, title in enumerate(titles):
title_ax = gridspec.GridSpecFromSubplotSpec(1, 1, subplot_spec = outer[i])
ax = fig.add_subplot(title_ax[0])
ax.text(0, 110, title, ha='center', va='bottom', clip_on=False)
ax.set_xlim(-100, 100)
ax.set_ylim(-100, 100)
ax.axis('off')
# compute sizes for each axis
w_size = 1
h_size = len(eod_hights[1])
shape_size = np.sum([len(sl) for sl in eod_shapes[0]])
# count required axes sized for the last two plot columns.
disc_size = 0
merge_size= 0
for shapelabel, dmasks, mmasks in zip(eod_shapes[2], disc_masks, merge_masks):
for sl, dm, mm in zip(shapelabel, dmasks, mmasks):
uld1 = np.unique((sl[0]+1)*np.invert(dm[0]))
uld2 = np.unique((sl[1]+1)*np.invert(dm[1]))
disc_size = disc_size+len(uld1[uld1>0])+len(uld2[uld2>0])
uld1 = np.unique((sl[0]+1)*mm[0])
uld2 = np.unique((sl[1]+1)*mm[1])
merge_size = merge_size+len(uld1[uld1>0])+len(uld2[uld2>0])
# set counters to keep track of the plot axes
disc_block = 0
merge_block = 0
shape_count = 0
# create all axes
width_hist_ax = gridspec.GridSpecFromSubplotSpec(w_size, 1, subplot_spec = outer[0])
hight_hist_ax = gridspec.GridSpecFromSubplotSpec(h_size, 1, subplot_spec = outer[1])
shape_ax = gridspec.GridSpecFromSubplotSpec(shape_size, 1, subplot_spec = outer[2])
shape_windows = [gridspec.GridSpecFromSubplotSpec(2, 2, hspace=0.0, wspace=0.0,
subplot_spec=shape_ax[i])
for i in range(shape_size)]
EOD_delete_ax = gridspec.GridSpecFromSubplotSpec(disc_size, 1, subplot_spec=outer[3])
EOD_merge_ax = gridspec.GridSpecFromSubplotSpec(merge_size, 1, subplot_spec=outer[4])
# plot width labels histogram
ax1 = fig.add_subplot(width_hist_ax[0])
# set axes features.
ax1.set_xscale('log')
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.axes.xaxis.set_visible(False)
ax1.set_yticklabels([])
# indices for plot colors (dark to light)
colidxsw = -np.linspace(-1.25, -0.5, h_size)
for i, (wl, colw, uhl, eod_h, eod_h_labs, w_snip, w_feat, w_lab, w_dm, w_mm) in enumerate(zip(eod_widths[0], colidxsw, eod_hights[0], eod_hights[1], eod_hights[2], eod_shapes[0], eod_shapes[1], eod_shapes[2], disc_masks, merge_masks)):
# plot width hist
hw, _, _ = ax1.hist(eod_widths[1][eod_widths[2]==wl],
bins=np.linspace(np.min(eod_widths[1]), np.max(eod_widths[1]), 100),
color=lighter(c_o, colw), orientation='horizontal')
# set arrow when the last hist is plot so the size of the axes are known.
if i == h_size-1:
arrowed_spines(ax1, ms=20)
# determine total size of the hight historgams now.
my, b = np.histogram(eod_h, bins=np.exp(np.linspace(np.min(np.log(eod_h)),
np.max(np.log(eod_h)), 100)))
maxy = np.max(my)
# set axes features for hight hist.
ax2 = fig.add_subplot(hight_hist_ax[h_size-i-1])
ax2.set_xscale('log')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.set_xlim(0.9, maxy)
ax2.axes.xaxis.set_visible(False)
ax2.set_yscale('log')
ax2.yaxis.set_major_formatter(ticker.NullFormatter())
ax2.yaxis.set_minor_formatter(ticker.NullFormatter())
# define colors for plots
colidxsh = -np.linspace(-1.25, -0.5, len(uhl))
for n, (hl, hcol, snippets, features, labels, dmasks, mmasks) in enumerate(zip(uhl, colidxsh, w_snip, w_feat, w_lab, w_dm, w_mm)):
hh, _, _ = loghist(ax2, eod_h[eod_h_labs==hl], np.min(eod_h), np.max(eod_h), 100,
lighter(c_g, hcol), orientation='horizontal')
# set arrow spines only on last plot
if n == len(uhl)-1:
arrowed_spines(ax2, ms=10)
# plot line from the width histogram to the height histogram.
if n == 0:
coord1 = transFigure.transform(ax1.transData.transform([np.median(hw[hw!=0]),
np.median(eod_widths[1][eod_widths[2]==wl])]))
coord2 = transFigure.transform(ax2.transData.transform([0.9, np.mean(eod_h)]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey', linewidth=0.5)
fig.lines.append(line)
# compute sizes of the eod_discarding and merge steps
s1 = np.unique((labels[0]+1)*(~dmasks[0]))
s2 = np.unique((labels[1]+1)*(~dmasks[1]))
disc_block = disc_block + len(s1[s1>0]) + len(s2[s2>0])
s1 = np.unique((labels[0]+1)*(mmasks[0]))
s2 = np.unique((labels[1]+1)*(mmasks[1]))
merge_block = merge_block + len(s1[s1>0]) + len(s2[s2>0])
axs = []
disc_count = 0
merge_count = 0
# now plot the clusters for peak and trough centerings
for pt, cmap_pt in zip([0, 1], cmap_pts):
ax3 = fig.add_subplot(shape_windows[shape_size-1-shape_count][pt,0])
ax4 = fig.add_subplot(shape_windows[shape_size-1-shape_count][pt,1])
# remove axes
ax3.axes.xaxis.set_visible(False)
ax4.axes.yaxis.set_visible(False)
ax3.axes.yaxis.set_visible(False)
ax4.axes.xaxis.set_visible(False)
# set color indices
colidxss = -np.linspace(-1.25, -0.5, len(np.unique(labels[pt][labels[pt]>=0])))
j=0
for c in np.unique(labels[pt]):
if c<0:
# plot noise features + snippets
ax3.plot(features[pt][labels[pt]==c,0], features[pt][labels[pt]==c,1],
'.', color='lightgrey', label='-1', rasterized=True)
ax4.plot(snippets[pt][labels[pt]==c].T, linewidth=0.1,
color='lightgrey', label='-1', rasterized=True)
else:
# plot cluster features and snippets
ax3.plot(features[pt][labels[pt]==c,0], features[pt][labels[pt]==c,1],
'.', color=lighter(cmap_pt, colidxss[j]), label=c,
rasterized=True)
ax4.plot(snippets[pt][labels[pt]==c].T, linewidth=0.1,
color=lighter(cmap_pt, colidxss[j]), label=c, rasterized=True)
# check if the current cluster is an EOD, if yes, plot it.
if np.sum(dmasks[pt][labels[pt]==c]) == 0:
ax = fig.add_subplot(EOD_delete_ax[disc_size-disc_block+disc_count])
ax.axis('off')
# plot mean EOD snippet
ax.plot(np.mean(snippets[pt][labels[pt]==c], axis=0),
color=lighter(cmap_pt, colidxss[j]))
disc_count = disc_count + 1
# match colors and draw line..
coord1 = transFigure.transform(ax4.transData.transform([ax4.get_xlim()[1],
ax4.get_ylim()[0] + 0.5*(ax4.get_ylim()[1]-ax4.get_ylim()[0])]))
coord2 = transFigure.transform(ax.transData.transform([ax.get_xlim()[0],ax.get_ylim()[0] + 0.5*(ax.get_ylim()[1]-ax.get_ylim()[0])]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey',
linewidth=0.5)
fig.lines.append(line)
axs.append(ax)
# check if the current EOD survives the merge step
# if so, plot it.
if np.sum(mmasks[pt, labels[pt]==c])>0:
ax = fig.add_subplot(EOD_merge_ax[merge_size-merge_block+merge_count])
ax.axis('off')
ax.plot(np.mean(snippets[pt][labels[pt]==c], axis=0),
color=lighter(cmap_pt, colidxss[j]))
merge_count = merge_count + 1
j=j+1
if pt==0:
# draw line from hight cluster to EOD shape clusters.
coord1 = transFigure.transform(ax2.transData.transform([np.median(hh[hh!=0]),
np.median(eod_h[eod_h_labs==hl])]))
coord2 = transFigure.transform(ax3.transData.transform([ax3.get_xlim()[0],
ax3.get_ylim()[0]]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey', linewidth=0.5)
fig.lines.append(line)
shape_count = shape_count + 1
if len(axs)>0:
# plot lines that indicate the merged clusters.
coord1 = transFigure.transform(axs[0].transData.transform([axs[0].get_xlim()[1]+0.1*(axs[0].get_xlim()[1]-axs[0].get_xlim()[0]),
axs[0].get_ylim()[1]-0.25*(axs[0].get_ylim()[1]-axs[0].get_ylim()[0])]))
coord2 = transFigure.transform(axs[-1].transData.transform([axs[-1].get_xlim()[1]+0.1*(axs[-1].get_xlim()[1]-axs[-1].get_xlim()[0]),
axs[-1].get_ylim()[0]+0.25*(axs[-1].get_ylim()[1]-axs[-1].get_ylim()[0])]))
line = Line2D((coord1[0], coord2[0]), (coord1[1], coord2[1]),
transform=fig.transFigure, color='grey', linewidth=1)
fig.lines.append(line)
def plot_bgm(x, means, variances, weights, use_log, labels, labels_am, xlab):
"""Plot a BGM clustering step either on EOD width or height.
Parameters
----------
x : 1D numpy array of floats
BGM input values.
means : list of floats
BGM Gaussian means
variances : list of floats
BGM Gaussian variances.
weights : list of floats
BGM Gaussian weights.
use_log : boolean
True if the z-scored logarithm of the data was used as BGM input.
labels : 1D numpy array of ints
Labels defined by BGM model (before merging based on merge factor).
labels_am : 1D numpy array of ints
Labels defined by BGM model (after merging based on merge factor).
xlab : string
Label for plot (defines the units of the BGM data).
"""
if 'width' in xlab:
ccol = c_o
elif 'height' in xlab:
ccol = c_g
else:
ccol = 'b'
# get the transform that was used as BGM input
if use_log:
x_transform = stats.zscore(np.log(x))
xplot = np.exp(np.linspace(np.log(np.min(x)), np.log(np.max(x)), 1000))
else:
x_transform = stats.zscore(x)
xplot = np.linspace(np.min(x), np.max(x), 1000)
# compute the x values and gaussians
x2 = np.linspace(np.min(x_transform), np.max(x_transform), 1000)
gaussians = []
gmax = 0
for i, (w, m, std) in enumerate(zip(weights, means, variances)):
gaus = np.sqrt(w*stats.norm.pdf(x2, m, np.sqrt(std)))
gaussians.append(gaus)
gmax = max(np.max(gaus), gmax)
# compute classes defined by gaussian intersections
classes = np.argmax(np.vstack(gaussians), axis=0)
# find the minimum of any gaussian that is within its class
gmin = 100
for i, c in enumerate(np.unique(classes)):
gmin=min(gmin, np.min(gaussians[c][classes==c]))
# set up the figure
fig, ax1 = plt.subplots(figsize=(8, 4.8))
fig_ysize = 4
ax2 = ax1.twinx()
ax1.spines['top'].set_visible(False)
ax2.spines['top'].set_visible(False)
ax1.set_xlabel('x [a.u.]')
ax1.set_ylabel('#')
ax2.set_ylabel('Likelihood')
ax2.set_yscale('log')
ax1.set_yscale('log')
if use_log:
ax1.set_xscale('log')
ax1.set_xlabel(xlab)
# define colors for plotting gaussians
colidxs = -np.linspace(-1.25, -0.5, len(np.unique(classes)))
# plot the gaussians
for i, c in enumerate(np.unique(classes)):
ax2.plot(xplot, gaussians[c], c=lighter(c_grey, colidxs[i]), linewidth=2,
label=r'$N(\mu_%i, \sigma_%i)$'%(c, c))
# plot intersection lines
ax2.vlines(xplot[1:][np.diff(classes)!=0], 0, gmax/gmin, color='k', linewidth=2,
linestyle='--')
ax2.set_ylim(gmin, np.max(np.vstack(gaussians))*1.1)
# plot data distributions and classes
colidxs = -np.linspace(-1.25, -0.5, len(np.unique(labels)))
for i, l in enumerate(np.unique(labels)):
if use_log:
h, binn, _ = loghist(ax1, x[labels==l], np.min(x), np.max(x), 100,
lighter(ccol, colidxs[i]), label=r'$x_%i$'%l)
else:
h, binn, _ = ax1.hist(x[labels==l], bins=np.linspace(np.min(x), np.max(x), 100),
color=lighter(ccol, colidxs[i]), label=r'$x_%i$'%l)
# annotate merged clusters
for l in np.unique(labels_am):
maps = np.unique(labels[labels_am==l])
if len(maps) > 1:
x1 = x[labels==maps[0]]
x2 = x[labels==maps[1]]
print(np.median(x1))
print(np.median(x2))
print(gmax)
ax2.plot([np.median(x1), np.median(x2)], [1.2*gmax, 1.2*gmax], c='k', clip_on=False)
ax2.plot([np.median(x1), np.median(x1)], [1.1*gmax, 1.2*gmax], c='k', clip_on=False)
ax2.plot([np.median(x2), np.median(x2)], [1.1*gmax, 1.2*gmax], c='k', clip_on=False)
ax2.annotate(r'$\frac{|{\tilde{x}_%i-\tilde{x}_%i}|}{max(\tilde{x}_%i, \tilde{x}_%i)} < \epsilon$' % (maps[0], maps[1], maps[0], maps[1]), [np.median(x1)*1.1, gmax*1.2], xytext=(10, 10), textcoords='offset points', fontsize=12, annotation_clip=False, ha='center')
# add legends and plot.
ax2.legend(loc='lower left', frameon=False, bbox_to_anchor=(-0.05, 1.3),
ncol=len(np.unique(classes)))
ax1.legend(loc='upper left', frameon=False, bbox_to_anchor=(-0.05, 1.3),
ncol=len(np.unique(labels)))
plt.tight_layout()
def plot_feature_extraction(raw_snippets, normalized_snippets, features, labels, dt, pt):
"""Plot clustering step on EOD shape.
Parameters
----------
raw_snippets : 2D numpy array
Raw EOD snippets.
normalized_snippets : 2D numpy array
Normalized EOD snippets.
features : 2D numpy array
PCA values for each normalized EOD snippet.
labels : 1D numpy array of ints
Cluster labels.
dt : float
Sample interval of snippets.
pt : int
Set to 0 for peak-centered EODs and set to 1 for trough-centered EODs.
"""
ccol = cmap_pts[pt]
# set up the figure layout
fig = plt.figure(figsize=(((2+0.2)*4.8), 4.8))
outer = gridspec.GridSpec(1, 2, wspace=0.2, hspace=0)
x = np.arange(-dt*1000*raw_snippets.shape[1]/2, dt*1000*raw_snippets.shape[1]/2, dt*1000)
snip_ax = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec = outer[0], hspace=0.35)
pc_ax = gridspec.GridSpecFromSubplotSpec(features.shape[1]-1, features.shape[1]-1,
subplot_spec = outer[1], hspace=0, wspace=0)
# 3 plots: raw snippets, normalized, pcs.
ax_raw_snip = fig.add_subplot(snip_ax[0])
ax_normalized_snip = fig.add_subplot(snip_ax[1])
colidxs = -np.linspace(-1.25, -0.5, len(np.unique(labels[labels>=0])))
j=0
for c in np.unique(labels):
if c<0:
color='lightgrey'
else:
color = lighter(ccol, colidxs[j])
j=j+1
ax_raw_snip.plot(x, raw_snippets[labels==c].T, color=color, label='-1',
rasterized=True, alpha=0.25)
ax_normalized_snip.plot(x, normalized_snippets[labels==c].T, color=color, alpha=0.25)
ax_raw_snip.spines['top'].set_visible(False)
ax_raw_snip.spines['right'].set_visible(False)
ax_raw_snip.get_xaxis().set_ticklabels([])
ax_raw_snip.set_title('Raw snippets')
ax_raw_snip.set_ylabel('Amplitude [a.u.]')
ax_normalized_snip.spines['top'].set_visible(False)
ax_normalized_snip.spines['right'].set_visible(False)
ax_normalized_snip.set_title('Normalized snippets')
ax_normalized_snip.set_ylabel('Amplitude [a.u.]')
ax_normalized_snip.set_xlabel('Time [ms]')
ax_raw_snip.axis('off')
ax_normalized_snip.axis('off')
ax_overlay = fig.add_subplot(pc_ax[:,:])
ax_overlay.set_title('Features')
ax_overlay.axis('off')
for n in range(features.shape[1]):
for m in range(n):
ax = fig.add_subplot(pc_ax[n-1,m])
ax.scatter(features[labels==c,m], features[labels==c,n], marker='.',
color=color, alpha=0.25)
ax.set_xlim(np.min(features), np.max(features))
ax.set_ylim(np.min(features), np.max(features))
ax.get_xaxis().set_ticklabels([])
ax.get_yaxis().set_ticklabels([])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if m==0:
ax.set_ylabel('PC %i'%(n+1))
if n==features.shape[1]-1:
ax.set_xlabel('PC %i'%(m+1))
ax = fig.add_subplot(pc_ax[0,features.shape[1]-2])
ax.set_xlim(np.min(features), np.max(features))
ax.set_ylim(np.min(features), np.max(features))
size = max(1, int(np.ceil(-np.log10(np.max(features)-np.min(features)))))
wbar = np.floor((np.max(features)-np.min(features))*10**size)/10**size
# should be smaller than the actual thing! so like x% of it?
xscalebar(ax, 0, 0, wbar, wformat='%%.%if'%size)
yscalebar(ax, 0, 0, wbar, hformat='%%.%if'%size)
ax.axis('off')
def plot_moving_fish(ws, dts, clusterss, ts, fishcounts, T, ignore_stepss):
"""Plot moving fish detection step.
Parameters
----------
ws : list of floats
Median width for each width cluster that the moving fish algorithm is computed on
(in seconds).
dts : list of floats
Sliding window size (in seconds) for each width cluster.
clusterss : list of 1D numpy int arrays
Cluster labels for each EOD cluster in a width cluster.
ts : list of 1D numpy float arrays
EOD emission times for each EOD in a width cluster.
fishcounts : list of lists
Sliding window timepoints and fishcounts for each width cluster.
T : float
Lenght of analyzed recording in seconds.
ignore_stepss : list of 1D int arrays
Mask for fishcounts that were ignored (ignored if True) in the moving_fish analysis.
"""
fig = plt.figure()
# create gridspec
outer = gridspec.GridSpec(len(ws), 1)
for i, (w, dt, clusters, t, fishcount, ignore_steps) in enumerate(zip(ws, dts, clusterss, ts, fishcounts, ignore_stepss)):
gs = gridspec.GridSpecFromSubplotSpec(2, 1, subplot_spec = outer[i])
# axis for clusters
ax1 = fig.add_subplot(gs[0])
# axis for fishcount
ax2 = fig.add_subplot(gs[1])
# plot clusters as eventplot
for cnum, c in enumerate(np.unique(clusters[clusters>=0])):
ax1.eventplot(t[clusters==c], lineoffsets=cnum, linelengths=0.5, color=cmap(i))
cnum = cnum + 1
# Plot the sliding window
rect=Rectangle((0, -0.5), dt, cnum, linewidth=1, linestyle='--', edgecolor='k',
facecolor='none', clip_on=False)
ax1.add_patch(rect)
ax1.arrow(dt+0.1, -0.5, 0.5, 0, head_width=0.1, head_length=0.1, facecolor='k',
edgecolor='k')
# plot parameters
ax1.set_title(r'$\tilde{w}_%i = %.3f ms$'%(i, 1000*w))
ax1.set_ylabel('cluster #')
ax1.set_yticks(range(0, cnum))
ax1.set_xlabel('time')
ax1.set_xlim(0, T)
ax1.axes.xaxis.set_visible(False)
ax1.spines['bottom'].set_visible(False)
ax1.spines['top'].set_visible(False)
ax1.spines['right'].set_visible(False)
ax1.spines['left'].set_visible(False)
# plot for fishcount
x = fishcount[0]
y = fishcount[1]
ax2 = fig.add_subplot(gs[1])
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.spines['bottom'].set_visible(False)
ax2.axes.xaxis.set_visible(False)
yplot = np.copy(y)
ax2.plot(x+dt/2, yplot, linestyle='-', marker='.', c=cmap(i), alpha=0.25)
yplot[ignore_steps.astype(bool)] = np.NaN
ax2.plot(x+dt/2, yplot, linestyle='-', marker='.', c=cmap(i))
ax2.set_ylabel('Fish count')
ax2.set_yticks(range(int(np.min(y)), 1+int(np.max(y))))
ax2.set_xlim(0, T)
if i < len(ws)-1:
ax2.axes.xaxis.set_visible(False)
else:
ax2.axes.xaxis.set_visible(False)
xscalebar(ax2, 1, 0, 1, wunit='s', ha='right')
con = ConnectionPatch([0, -0.5], [dt/2, y[0]], "data", "data",
axesA=ax1, axesB=ax2, color='k')
ax2.add_artist(con)
con = ConnectionPatch([dt, -0.5], [dt/2, y[0]], "data", "data",
axesA=ax1, axesB=ax2, color='k')
ax2.add_artist(con)
plt.xlim(0, T)
| gpl-3.0 |
jerjorg/BZI | BZI/convergence.py | 1 | 6793 | import numpy as np
import matplotlib.pyplot as plt
import time
from BZI.symmetry import make_ptvecs
from BZI.sampling import make_grid
from BZI.pseudopots import Al_PP
from BZI.integration import monte_carlo
from BZI.plots import PlotMesh
class Convergence(object):
""" Compare integrations of pseudo-potentials by creating convergence plots.
Args:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cutoff (float): the energy cutoff of the pseudo-potential
cell_type (str): the geometry of the integration cell
cell_constant (float): the size of the integration cell
offset (list): a vector that offsets the grid from the origin and is
given in grid coordinates.
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
Attributes:
pseudo_potential (function): a pseudo-potential function taken from
BZI.pseudopots
cell_type (str): the geometry of the integration cell.
cell_constant (float): the size of the integration cell.
cell_vectors (np.ndarray): an array vectors as columns of a 3x3 numpy
array that is used to create the cell
grid_types (list): a list of grid types
grid_constants (list): a list of grid constants
integration_methods (list): a list of integration methods
answer (float): the expected result of integration
errors (list): a list of errors for each grid type
nspts (list): a list of the number of sampling points for each grid type
integrals (list): a list of integral value for each grid type and constant
times (list): a list of the amount of time taken computing the grid
generation and integration.
"""
def __init__(self, pseudo_potential=None, cutoff=None, cell_centering=None,
cell_constants=None, cell_angles=None, offset=None,
grid_types=None, grid_constants=None,
integration_methods=None, origin=None, random = None):
self.pseudo_potential = pseudo_potential or Al_PP
self.cutoff = cutoff or 4.
self.cell_centering = cell_centering or "prim"
self.cell_constants = cell_constants or [1.]*3
self.cell_angles = cell_angles or [np.pi/2]*3
self.cell_vectors = make_ptvecs(self.cell_centering, self.cell_constants,
self.cell_angles)
self.grid_centerings = grid_centerings or ["prim", "base", "body", "face"]
self.grid_constants = grid_constants or [1/n for n in range(2,11)]
self.offset = offset or [0.,0.,0.]
# self.integration_methods = integration_methods or [rectangle_method]
self.origin = origin or [0.,0.,0.]
self.random = random or False
def compare_grids(self, answer, plot=False, save=False):
self.answer = answer
if self.random:
nm = len(self.grid_types)
self.nspts = [[] for _ in range(nm + 1)]
self.errors = [[] for _ in range(nm + 1)]
self.integrals = [[] for _ in range(nm + 1)]
self.times = [[] for _ in range(nm + 1)]
npts_list = [2**n for n in range(8,14)]
for npts in npts_list:
time1 = time.time()
integral = monte_carlo(self.pseudo_potential,
self.cell_vectors,
npts,
self.cutoff)
self.nspts[nm].append(npts)
self.integrals[nm].append(integral)
self.times[nm].append((time.time() - time1))
self.errors[nm].append(np.abs(self.integrals[nm][-1] - answer))
else:
self.nspts = [[] for _ in range(len(self.grid_types))]
self.errors = [[] for _ in range(len(self.grid_types))]
self.integrals = [[] for _ in range(len(self.grid_types))]
self.times = [[] for _ in range(len(self.grid_types))]
integration_method = self.integration_methods[0]
for (i,grid_centering) in enumerate(self.grid_centering_list):
for grid_consts in self.grid_constants_list:
for grid_angles in grid_angles_list:
grid_vecs = make_ptvecs(grid_centering, grid_consts, grid_angles)
time1 = time.time()
npts, integral = integration_method(self.pseudo_potential,
self.cell_vectors,
grid_vecs,
self.offset,
self.origin,
self.cutoff)
self.nspts[i].append(npts)
self.integrals[i].append(integral)
self.times[i].append((time.time() - time1))
self.errors[i].append(np.abs(self.integrals[i][-1] - answer))
if save:
np.save("%s_times" %self.pseudo_potential, self.times)
np.save("%s_integrals" %self.pseudo_potential, self.integrals)
np.save("%s_errors" %self.pseudo_potential, self.errors)
if plot:
if self.random:
plt.loglog(self.nspts[nm], self.errors[nm], label="random", color="orange")
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.errors[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Error")
test = [1./n**(2./3) for n in self.nspts[0]]
plt.loglog(self.nspts[0], test, label="1/n**(2/3)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
for i in range(len(self.grid_types)):
plt.loglog(self.nspts[i], self.times[i], label=self.grid_types[i])
plt.xlabel("Number of samping points")
plt.ylabel("Time (s)")
lgd = plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.grid()
plt.show()
plt.close()
def plot_grid(self,i,j):
"""Plot one of the grids in the convergence plot.
"""
grid_vecs = make_ptvecs(self.grid_types[i], self.grid_constants[j])
grid_pts = make_grid(self.rcell_vectors, gr_vecs, self.offset)
PlotMesh(grid_pts, self.rcell_vectors, self.offset)
| gpl-3.0 |
chrsrds/scikit-learn | examples/manifold/plot_swissroll.py | 72 | 1295 | """
===================================
Swiss Roll reduction with LLE
===================================
An illustration of Swiss Roll reduction
with locally linear embedding
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause (C) INRIA 2011
print(__doc__)
import matplotlib.pyplot as plt
# This import is needed to modify the way figure behaves
from mpl_toolkits.mplot3d import Axes3D
Axes3D
#----------------------------------------------------------------------
# Locally linear embedding of the swiss roll
from sklearn import manifold, datasets
X, color = datasets.samples_generator.make_swiss_roll(n_samples=1500)
print("Computing LLE embedding")
X_r, err = manifold.locally_linear_embedding(X, n_neighbors=12,
n_components=2)
print("Done. Reconstruction error: %g" % err)
#----------------------------------------------------------------------
# Plot result
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.set_title("Original data")
ax = fig.add_subplot(212)
ax.scatter(X_r[:, 0], X_r[:, 1], c=color, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title('Projected data')
plt.show()
| bsd-3-clause |
perrette/pyglacier | pyglacier/plotting.py | 1 | 1106 | import matplotlib.pyplot as plt
#
# plotting
#
def plot_elevation(ds, ax=None):
if ax is None:
ax = plt.gca()
ds['hs'].plot(ax=ax,label="surface")
ds['hb'].plot(ax=ax,label="bottom")
# add horizontal line to indicate sea level
ax.hlines(0, ds.x[0], ds.x[-1], linestyle='dashed', color='black')
ds['zb'].plot(ax=ax, color='black', linewidth=2, label="bedrock") # add bedrock
ax.legend(frameon=False, loc="upper right")
def plot_velocity(ds, ax=None):
if ax is None:
ax = plt.gca()
ds = ds.copy()
u = 'u' if 'u' in ds else 'U'
ds[u] = ds[u]*3600*24
ds[u].plot(ax=ax)
ax.set_ylabel('velocity [m/d]')
def plot_glacier(ds):
fig,axes=plt.subplots(2,1,sharex=True)
ax=axes[0]
plot_elevation(ds, ax)
ax=axes[1]
plot_velocity(ds, ax)
ax.set_xlim([ds.x[0], ds.x[-1]])
return fig, axes
def plot_stress(ds):
_v = ["driving", "lat", "long", "basal", "residual"]
try:
ds = ds.take(_v)
except KeyError:
ds = ds.take([k + '_stress' for k in _v])
return ds.to_array(axis='stress').T.plot()
| mit |
ofgulban/scikit-image | doc/examples/filters/plot_rank_mean.py | 7 | 1525 | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level.
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%).
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
from skimage.filters import rank
image = data.coins()
selem = disk(20)
percentile_result = rank.mean_percentile(image, selem=selem, p0=.1, p1=.9)
bilateral_result = rank.mean_bilateral(image, selem=selem, s0=500, s1=500)
normal_result = rank.mean(image, selem=selem)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(8, 10),
sharex=True, sharey=True)
ax = axes.ravel()
titles = ['Original', 'Percentile mean', 'Bilateral mean', 'Local mean']
imgs = [image, percentile_result, bilateral_result, normal_result]
for n in range(0, len(imgs)):
ax[n].imshow(imgs[n])
ax[n].set_title(titles[n])
ax[n].set_adjustable('box-forced')
ax[n].axis('off')
plt.show()
| bsd-3-clause |
sinhrks/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
IsCoolEntertainment/debpkg_python-pyzmq | examples/bench/plot_latency.py | 12 | 2229 | """Plot latency data from messaging benchmarks.
To generate the data for each library, I started the server and then did
the following for each client::
from xmlrpc_client import client
for i in range(9):
s = '0'*10**i
print s
%timeit client.echo(s)
"""
from matplotlib.pylab import *
rawdata = """# Data in milliseconds
Bytes JSONRPC PYRO XMLRPC pyzmq_copy pyzmq_nocopy
1 2.15 0.186 2.07 0.111 0.136
10 2.49 0.187 1.87 0.115 0.137
100 2.5 0.189 1.9 0.126 0.138
1000 2.54 0.196 1.91 0.129 0.141
10000 2.91 0.271 2.77 0.204 0.197
100000 6.65 1.44 9.17 0.961 0.546
1000000 50.2 15.8 81.5 8.39 2.25
10000000 491 159 816 91.7 25.2
100000000 5010 1560 8300 893 248
"""
with open('latency.csv','w') as f:
f.writelines(rawdata)
data = csv2rec('latency.csv',delimiter='\t')
loglog(data.bytes, data.xmlrpc*1000, label='XMLRPC')
loglog(data.bytes, data.jsonrpc*1000, label='JSONRPC')
loglog(data.bytes, data.pyro*1000, label='Pyro')
loglog(data.bytes, data.pyzmq_nocopy*1000, label='PyZMQ')
loglog(data.bytes, len(data.bytes)*[60], label='Ping')
legend(loc=2)
title('Latency')
xlabel('Number of bytes')
ylabel('Round trip latency ($\mu s$)')
grid(True)
show()
savefig('latency.png')
clf()
semilogx(data.bytes, 1000/data.xmlrpc, label='XMLRPC')
semilogx(data.bytes, 1000/data.jsonrpc, label='JSONRPC')
semilogx(data.bytes, 1000/data.pyro, label='Pyro')
semilogx(data.bytes, 1000/data.pyzmq_nocopy, label='PyZMQ')
legend(loc=1)
xlabel('Number of bytes')
ylabel('Message/s')
title('Message Throughput')
grid(True)
show()
savefig('msgs_sec.png')
clf()
loglog(data.bytes, 1000/data.xmlrpc, label='XMLRPC')
loglog(data.bytes, 1000/data.jsonrpc, label='JSONRPC')
loglog(data.bytes, 1000/data.pyro, label='Pyro')
loglog(data.bytes, 1000/data.pyzmq_nocopy, label='PyZMQ')
legend(loc=3)
xlabel('Number of bytes')
ylabel('Message/s')
title('Message Throughput')
grid(True)
show()
savefig('msgs_sec_log.png')
clf()
semilogx(data.bytes, data.pyro/data.pyzmq_nocopy, label="No-copy")
semilogx(data.bytes, data.pyro/data.pyzmq_copy, label="Copy")
xlabel('Number of bytes')
ylabel('Ratio throughputs')
title('PyZMQ Throughput/Pyro Throughput')
grid(True)
legend(loc=2)
show()
savefig('msgs_sec_ratio.png')
| lgpl-3.0 |
piyueh/PetIBM | examples/api_examples/oscillatingcylinder2dRe100_GPU/scripts/plotDragCoefficient.py | 2 | 2057 | """
Plot the drag coefficient over 4 oscillation cycles.
"""
import pathlib
import numpy
from matplotlib import pyplot
from scipy import signal
# Read the drag force from file.
simu_dir = pathlib.Path(__file__).parents[1]
data_dir = simu_dir / 'output'
filepath = data_dir / 'forces-0.txt'
with open(filepath, 'r') as infile:
t, fx = numpy.loadtxt(infile, dtype=numpy.float64,
unpack=True, usecols=(0, 1))
# Set the parameters of the kinematics.
KC = 5.0 # Keulegan-Carpenter number
D = 1.0 # cylinder diameter
f = 0.2 # frequency of oscillation
w = 2 * numpy.pi * f # angular frequency
Am = KC * D / (2 * numpy.pi) # amplitude of oscillation
rho = 1.0 # fluid density
Um = w * Am # maximum translational velocity of cylinder
V = numpy.pi * D**2 / 4 # volume of cylinder
# Add force due to body acceleration.
ax = w**2 * Am * numpy.sin(w * t)
fx += rho * V * ax
# Get the drag coefficient.
cd = fx / (0.5 * rho * Um**2 * D)
# Compute and print info abount extrema of the drag coefficient.
idx_min = signal.argrelextrema(fx, numpy.less_equal, order=100)[0][1:-1]
t_min = t[idx_min]
print('Non-dimensional time-interval between minima:\n\t{}'
.format(f * (t_min[1:] - t_min[:-1])))
cd_min = cd[idx_min]
print('Drag coefficient valleys: {}'.format(cd_min))
idx_max = signal.argrelextrema(fx, numpy.greater_equal, order=100)[0][1:]
t_max = t[idx_max]
print('Non-dimensional time-interval between maxima:\n\t{}'
.format(f * (t_max[1:] - t_max[:-1])))
cd_max = cd[idx_max]
print('Drag coefficient peaks: {}'.format(cd_max))
# Plot the drag coefficient over the 4 cycles.
pyplot.rcParams['font.size'] = 16
pyplot.rcParams['font.family'] = 'serif'
fig, ax = pyplot.subplots(figsize=(8.0, 4.0))
ax.grid()
ax.set_xlabel('$t / T$')
ax.set_ylabel('$C_D$')
ax.plot(f * t, cd)
ax.axis((0.0, 4.0, -6.0, 6.0))
fig.tight_layout()
pyplot.show()
# Save the figure.
fig_dir = simu_dir / 'figures'
fig_dir.mkdir(parents=True, exist_ok=True)
filepath = fig_dir / 'dragCoefficient.png'
fig.savefig(str(filepath), dpi=300)
| bsd-3-clause |
lammy/artisan | setup-mac3.py | 9 | 8968 | """
This is a setup.py script generated by py2applet
Usage:
python3 setup-mac3.py py2app
"""
# manually remove sample-data mpl subdirectory from Python installation:
# sudo rm -rf /Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/site-packages/matplotlib/mpl-data/sample_data
from distutils import sysconfig
their_parse_makefile = sysconfig.parse_makefile
def my_parse_makefile(filename, g):
their_parse_makefile(filename, g)
g['MACOSX_DEPLOYMENT_TARGET'] = '10.6'
sysconfig.parse_makefile = my_parse_makefile
import sys, os
from setuptools import setup
import string
from plistlib import Plist
import artisanlib
# current version of Artisan
VERSION = artisanlib.__version__
LICENSE = 'GNU General Public License (GPL)'
QTDIR = r'/Developer/Applications/Qt/'
APP = ['artisan.py']
DATA_FILES = [
"LICENSE.txt",
("../Resources/qt_plugins/iconengines", [QTDIR + r'/plugins/iconengines/libqsvgicon.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqsvg.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqjpeg.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqgif.dylib']),
("../Resources/qt_plugins/imageformats", [QTDIR + r'/plugins/imageformats/libqtiff.dylib']),
# standard QT translation needed to get the Application menu bar and
# the standard dialog elements translated
("../translations", [QTDIR + r'/translations/qt_de.qm']),
("../translations", [QTDIR + r'/translations/qt_es.qm']),
("../translations", [QTDIR + r'/translations/qt_fr.qm']),
("../translations", [QTDIR + r'/translations/qt_sv.qm']),
("../translations", [QTDIR + r'/translations/qt_zh_CN.qm']),
("../translations", [QTDIR + r'/translations/qt_zh_TW.qm']),
("../translations", [QTDIR + r'/translations/qt_ko.qm']),
("../translations", [QTDIR + r'/translations/qt_pt.qm']),
("../translations", [QTDIR + r'/translations/qt_ru.qm']),
("../translations", [QTDIR + r'/translations/qt_ar.qm']),
("../translations", [QTDIR + r'/translations/qt_ja.qm']),
("../translations", [QTDIR + r'/translations/qt_hu.qm']),
("../translations", [r"translations/artisan_de.qm"]),
("../translations", [r"translations/artisan_es.qm"]),
("../translations", [r"translations/artisan_fr.qm"]),
("../translations", [r"translations/artisan_sv.qm"]),
("../translations", [r'translations/artisan_zh_CN.qm']),
("../translations", [r'translations/artisan_zh_TW.qm']),
("../translations", [r'translations/artisan_ko.qm']),
("../translations", [r'translations/artisan_pt.qm']),
("../translations", [r'translations/artisan_ru.qm']),
("../translations", [r'translations/artisan_ar.qm']),
("../translations", [r"translations/artisan_it.qm"]),
("../translations", [r"translations/artisan_el.qm"]),
("../translations", [r"translations/artisan_no.qm"]),
("../translations", [r"translations/artisan_nl.qm"]),
("../translations", [r"translations/artisan_fi.qm"]),
("../translations", [r"translations/artisan_tr.qm"]),
("../translations", [r"translations/artisan_ja.qm"]),
("../translations", [r"translations/artisan_hu.qm"]),
("../translations", [r"translations/artisan_he.qm"]),
("../Resources", [r"qt.conf"]),
("../Resources", [r"artisanProfile.icns"]),
("../Resources", [r"artisanAlarms.icns"]),
("../Resources", [r"artisanPalettes.icns"]),
("../Resources", [r"artisanWheel.icns"]),
("../Resources", [r"includes/Humor-Sans.ttf"]),
]
plist = Plist.fromFile('Info3.plist')
plist.update({ 'CFBundleDisplayName': 'Artisan',
'CFBundleGetInfoString': 'Artisan, Roast Logger',
'CFBundleIdentifier': 'com.google.code.p.Artisan',
'CFBundleShortVersionString': VERSION,
'CFBundleVersion': 'Artisan ' + VERSION,
'LSMinimumSystemVersion': '10.6',
'LSMultipleInstancesProhibited': 'false',
'LSPrefersPPC': False,
'LSArchitecturePriority': 'x86_64',
'NSHumanReadableCopyright': LICENSE,
})
OPTIONS = {
'strip':True,
'argv_emulation': False, # this would confuses GUI processing
'semi_standalone': False,
'site_packages': True,
'dylib_excludes': ['phonon','QtDBus','QtDeclarative','QtDesigner',
'QtHelp','QtMultimedia','QtNetwork',
'QtOpenGL','QtScript','QtScriptTools',
'QtSql','QtTest','QtXmlPatterns','QtWebKit'],
# 'packages': ['matplotlib'], # with this the full pkg is copied to Resources/lib/python3.4
'packages': ['yoctopuce'],
'optimize': 2,
'compressed': True,
'iconfile': 'artisan.icns',
'arch': 'x86_64',
'matplotlib_backends': '-', # '-' for only-imported or explicit 'qt4agg'; without this the full pkg is copied to Resources/lib/python3.4
'includes': ['serial',
'PyQt4',
'PyQt4.QtCore',
'PyQt4.QtGui',
'PyQt4.QtSvg',
'PyQt4.QtXml'],
'excludes' : ['_tkagg','_ps','_fltkagg','Tkinter','Tkconstants',
'_agg','_cairo','_gtk','gtkcairo','pydoc','sqlite3',
'bsddb','curses','tcl',
'_wxagg','_gtagg','_cocoaagg','_wx'],
'plist' : plist}
setup(
name='Artisan',
version=VERSION,
author='YOUcouldbeTOO',
author_email='zaub.ERASE.org@yahoo.com',
license=LICENSE,
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app']
)
os.system(r'cp README.txt dist')
os.system(r'cp LICENSE.txt dist')
os.system(r'mkdir dist/Wheels')
os.system(r'mkdir dist/Wheels/Cupping')
os.system(r'mkdir dist/Wheels/Other')
os.system(r'mkdir dist/Wheels/Roasting')
os.system(r'cp Wheels/Cupping/* dist/Wheels/Cupping')
os.system(r'cp Wheels/Other/* dist/Wheels/Other')
os.system(r'cp Wheels/Roasting/* dist/Wheels/Roasting')
os.chdir('./dist')
# to prevent the error "Artisan.app/Contents/Resources/lib/python3.3/config-3.3m/Makefile'" on startup
# generated by v0.8 of py2app:
#os.system(r'cp /Library/Frameworks/Python.framework/Versions/3.3/lib/python3.3/config-3.3m/Makefile ./Artisan.app/Contents/Resources/lib/python3.3/config-3.3m/')
# delete unused Qt.framework files (py2app exclude does not seem to work)
print('*** Removing unused Qt frameworks ***')
for fw in [
'phonon',
'QtDeclarative',
'QtHelp',
'QtMultimedia',
'QtNetwork',
'QtOpenGL',
'QtScript',
'QtScriptTools',
'QtSql',
'QtTest',
'QtWebKit',
'QtXMLPatterns']:
for root,dirs,files in os.walk('./Artisan.app/Contents/Frameworks/' + fw + ".framework"):
for file in files:
print('Deleting', file)
os.remove(os.path.join(root,file))
print('*** Removing Qt debug libs ***')
for root, dirs, files in os.walk('.'):
for file in files:
if 'debug' in file:
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.startswith('test_'):
print('Deleting', file)
os.remove(os.path.join(root,file))
elif '_tests' in file:
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.pyc') and file != "site.pyc":
print('Deleting', file)
os.remove(os.path.join(root,file))
# remove also all .h .in .cpp .cc .html files
elif file.endswith('.h') and file != "pyconfig.h":
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.in'):
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.cpp'):
print('Deleting', file)
os.remove(os.path.join(root,file))
elif file.endswith('.cc'):
print('Deleting', file)
os.remove(os.path.join(root,file))
# .afm files should not be removed as without matplotlib will fail on startup
# elif file.endswith('.afm'):
# print('Deleting', file)
# os.remove(os.path.join(root,file))
# remove test files
for dir in dirs:
if 'tests' in dir:
for r,d,f in os.walk(os.path.join(root,dir)):
for fl in f:
print('Deleting', os.path.join(r,fl))
os.remove(os.path.join(r,fl))
os.chdir('..')
os.system(r"rm artisan-mac-" + VERSION + r".dmg")
os.system(r'hdiutil create artisan-mac-' + VERSION + r'.dmg -volname "Artisan" -fs HFS+ -srcfolder "dist"')
# otool -L dist/Artisan.app/Contents/MacOS/Artisan
| gpl-3.0 |
matthew-tucker/mne-python | examples/time_frequency/plot_source_power_spectrum.py | 19 | 1929 | """
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
print(__doc__)
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/jupyter_core/tests/dotipython/profile_default/ipython_console_config.py | 24 | 21691 | # Configuration file for ipython-console.
c = get_config()
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp configuration
#------------------------------------------------------------------------------
# ZMQTerminalIPythonApp will inherit config from: TerminalIPythonApp,
# BaseIPythonApplication, Application, InteractiveShellApp, IPythonConsoleApp,
# ConnectionFileMixin
# Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
# c.ZMQTerminalIPythonApp.hide_initial_ns = True
# set the heartbeat port [default: random]
# c.ZMQTerminalIPythonApp.hb_port = 0
# A list of dotted module names of IPython extensions to load.
# c.ZMQTerminalIPythonApp.extensions = []
# Execute the given command string.
# c.ZMQTerminalIPythonApp.code_to_run = ''
# Path to the ssh key to use for logging in to the ssh server.
# c.ZMQTerminalIPythonApp.sshkey = ''
# The date format used by logging formatters for %(asctime)s
# c.ZMQTerminalIPythonApp.log_datefmt = '%Y-%m-%d %H:%M:%S'
# set the control (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.control_port = 0
# Reraise exceptions encountered loading IPython extensions?
# c.ZMQTerminalIPythonApp.reraise_ipython_extension_failures = False
# Set the log level by value or name.
# c.ZMQTerminalIPythonApp.log_level = 30
# Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
# c.ZMQTerminalIPythonApp.exec_PYTHONSTARTUP = True
# Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
# c.ZMQTerminalIPythonApp.pylab = None
# Run the module as a script.
# c.ZMQTerminalIPythonApp.module_to_run = ''
# Whether to display a banner upon starting IPython.
# c.ZMQTerminalIPythonApp.display_banner = True
# dotted module name of an IPython extension to load.
# c.ZMQTerminalIPythonApp.extra_extension = ''
# Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
# c.ZMQTerminalIPythonApp.verbose_crash = False
# Whether to overwrite existing config files when copying
# c.ZMQTerminalIPythonApp.overwrite = False
# The IPython profile to use.
# c.ZMQTerminalIPythonApp.profile = 'default'
# If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
# c.ZMQTerminalIPythonApp.force_interact = False
# List of files to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_files = []
# Start IPython quickly by skipping the loading of config files.
# c.ZMQTerminalIPythonApp.quick = False
# The Logging format template
# c.ZMQTerminalIPythonApp.log_format = '[%(name)s]%(highlevel)s %(message)s'
# Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
# c.ZMQTerminalIPythonApp.copy_config_files = False
# set the stdin (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.stdin_port = 0
# Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
# c.ZMQTerminalIPythonApp.extra_config_file = ''
# lines of code to run at IPython startup.
# c.ZMQTerminalIPythonApp.exec_lines = []
# Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk3', 'osx',
# 'pyglet', 'qt', 'qt5', 'tk', 'wx').
# c.ZMQTerminalIPythonApp.gui = None
# A file to be run
# c.ZMQTerminalIPythonApp.file_to_run = ''
# Configure matplotlib for interactive use with the default matplotlib backend.
# c.ZMQTerminalIPythonApp.matplotlib = None
# Suppress warning messages about legacy config files
# c.ZMQTerminalIPythonApp.ignore_old_config = False
# set the iopub (PUB) port [default: random]
# c.ZMQTerminalIPythonApp.iopub_port = 0
#
# c.ZMQTerminalIPythonApp.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.ZMQTerminalIPythonApp.connection_file = ''
# The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
# c.ZMQTerminalIPythonApp.ipython_dir = ''
# The SSH server to use to connect to the kernel.
# c.ZMQTerminalIPythonApp.sshserver = ''
# Set to display confirmation dialog on exit. You can always use 'exit' or
# 'quit', to force a direct exit without any confirmation.
# c.ZMQTerminalIPythonApp.confirm_exit = True
# set the shell (ROUTER) port [default: random]
# c.ZMQTerminalIPythonApp.shell_port = 0
# The name of the default kernel to start.
# c.ZMQTerminalIPythonApp.kernel_name = 'python'
# If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
# c.ZMQTerminalIPythonApp.pylab_import_all = True
# Connect to an already running kernel
# c.ZMQTerminalIPythonApp.existing = ''
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.ZMQTerminalIPythonApp.ip = ''
#------------------------------------------------------------------------------
# ZMQTerminalInteractiveShell configuration
#------------------------------------------------------------------------------
# A subclass of TerminalInteractiveShell that uses the 0MQ kernel
# ZMQTerminalInteractiveShell will inherit config from:
# TerminalInteractiveShell, InteractiveShell
#
# c.ZMQTerminalInteractiveShell.history_length = 10000
# auto editing of files with syntax errors.
# c.ZMQTerminalInteractiveShell.autoedit_syntax = False
# If True, anything that would be passed to the pager will be displayed as
# regular output instead.
# c.ZMQTerminalInteractiveShell.display_page = False
#
# c.ZMQTerminalInteractiveShell.debug = False
# 'all', 'last', 'last_expr' or 'none', specifying which nodes should be run
# interactively (displaying output from expressions).
# c.ZMQTerminalInteractiveShell.ast_node_interactivity = 'last_expr'
# Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
# c.ZMQTerminalInteractiveShell.logstart = False
# Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 20 (if you provide a value
# less than 20, it is reset to 0 and a warning is issued). This limit is
# defined because otherwise you'll spend more time re-flushing a too small cache
# than working
# c.ZMQTerminalInteractiveShell.cache_size = 1000
# The shell program to be used for paging.
# c.ZMQTerminalInteractiveShell.pager = 'less'
# The name of the logfile to use.
# c.ZMQTerminalInteractiveShell.logfile = ''
# Save multi-line entries as one entry in readline history
# c.ZMQTerminalInteractiveShell.multiline_history = True
#
# c.ZMQTerminalInteractiveShell.readline_remove_delims = '-/~'
# Enable magic commands to be called without the leading %.
# c.ZMQTerminalInteractiveShell.automagic = True
# Prefix to add to outputs coming from clients other than this one.
#
# Only relevant if include_other_output is True.
# c.ZMQTerminalInteractiveShell.other_output_prefix = '[remote] '
#
# c.ZMQTerminalInteractiveShell.readline_parse_and_bind = ['tab: complete', '"\\C-l": clear-screen', 'set show-all-if-ambiguous on', '"\\C-o": tab-insert', '"\\C-r": reverse-search-history', '"\\C-s": forward-search-history', '"\\C-p": history-search-backward', '"\\C-n": history-search-forward', '"\\e[A": history-search-backward', '"\\e[B": history-search-forward', '"\\C-k": kill-line', '"\\C-u": unix-line-discard']
# Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
# c.ZMQTerminalInteractiveShell.color_info = True
# Callable object called via 'callable' image handler with one argument, `data`,
# which is `msg["content"]["data"]` where `msg` is the message from iopub
# channel. For exmaple, you can find base64 encoded PNG data as
# `data['image/png']`.
# c.ZMQTerminalInteractiveShell.callable_image_handler = None
# Command to invoke an image viewer program when you are using 'stream' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. Raw image data
# is given as STDIN to the program.
# c.ZMQTerminalInteractiveShell.stream_image_handler = []
#
# c.ZMQTerminalInteractiveShell.separate_out2 = ''
# Autoindent IPython code entered interactively.
# c.ZMQTerminalInteractiveShell.autoindent = True
# The part of the banner to be printed after the profile
# c.ZMQTerminalInteractiveShell.banner2 = ''
# Don't call post-execute functions that have failed in the past.
# c.ZMQTerminalInteractiveShell.disable_failing_post_execute = False
# Deprecated, use PromptManager.out_template
# c.ZMQTerminalInteractiveShell.prompt_out = 'Out[\\#]: '
#
# c.ZMQTerminalInteractiveShell.object_info_string_level = 0
#
# c.ZMQTerminalInteractiveShell.separate_out = ''
# Automatically call the pdb debugger after every exception.
# c.ZMQTerminalInteractiveShell.pdb = False
# Deprecated, use PromptManager.in_template
# c.ZMQTerminalInteractiveShell.prompt_in1 = 'In [\\#]: '
#
# c.ZMQTerminalInteractiveShell.separate_in = '\n'
#
# c.ZMQTerminalInteractiveShell.wildcards_case_sensitive = True
# Enable auto setting the terminal title.
# c.ZMQTerminalInteractiveShell.term_title = False
# Enable deep (recursive) reloading by default. IPython can use the deep_reload
# module which reloads changes in modules recursively (it replaces the reload()
# function, so you don't need to change anything to use it). deep_reload()
# forces a full reload of modules whose code may have changed, which the default
# reload() function does not. When deep_reload is off, IPython will use the
# normal reload(), but deep_reload will still be available as dreload().
# c.ZMQTerminalInteractiveShell.deep_reload = False
# Deprecated, use PromptManager.in2_template
# c.ZMQTerminalInteractiveShell.prompt_in2 = ' .\\D.: '
# Whether to include output from clients other than this one sharing the same
# kernel.
#
# Outputs are not displayed until enter is pressed.
# c.ZMQTerminalInteractiveShell.include_other_output = False
# Preferred object representation MIME type in order. First matched MIME type
# will be used.
# c.ZMQTerminalInteractiveShell.mime_preference = ['image/png', 'image/jpeg', 'image/svg+xml']
#
# c.ZMQTerminalInteractiveShell.readline_use = True
# Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
# c.ZMQTerminalInteractiveShell.autocall = 0
# The part of the banner to be printed before the profile
# c.ZMQTerminalInteractiveShell.banner1 = 'Python 3.4.3 |Continuum Analytics, Inc.| (default, Mar 6 2015, 12:07:41) \nType "copyright", "credits" or "license" for more information.\n\nIPython 3.1.0 -- An enhanced Interactive Python.\nAnaconda is brought to you by Continuum Analytics.\nPlease check out: http://continuum.io/thanks and https://binstar.org\n? -> Introduction and overview of IPython\'s features.\n%quickref -> Quick reference.\nhelp -> Python\'s own help system.\nobject? -> Details about \'object\', use \'object??\' for extra details.\n'
# Handler for image type output. This is useful, for example, when connecting
# to the kernel in which pylab inline backend is activated. There are four
# handlers defined. 'PIL': Use Python Imaging Library to popup image; 'stream':
# Use an external program to show the image. Image will be fed into the STDIN
# of the program. You will need to configure `stream_image_handler`;
# 'tempfile': Use an external program to show the image. Image will be saved in
# a temporally file and the program is called with the temporally file. You
# will need to configure `tempfile_image_handler`; 'callable': You can set any
# Python callable which is called with the image data. You will need to
# configure `callable_image_handler`.
# c.ZMQTerminalInteractiveShell.image_handler = None
# Set the color scheme (NoColor, Linux, or LightBG).
# c.ZMQTerminalInteractiveShell.colors = 'LightBG'
# Set the editor used by IPython (default to $EDITOR/vi/notepad).
# c.ZMQTerminalInteractiveShell.editor = 'mate -w'
# Show rewritten input, e.g. for autocall.
# c.ZMQTerminalInteractiveShell.show_rewritten_input = True
#
# c.ZMQTerminalInteractiveShell.xmode = 'Context'
#
# c.ZMQTerminalInteractiveShell.quiet = False
# A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
# c.ZMQTerminalInteractiveShell.ast_transformers = []
#
# c.ZMQTerminalInteractiveShell.ipython_dir = ''
# Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
# c.ZMQTerminalInteractiveShell.confirm_exit = True
# Deprecated, use PromptManager.justify
# c.ZMQTerminalInteractiveShell.prompts_pad_left = True
# Timeout for giving up on a kernel (in seconds).
#
# On first connect and restart, the console tests whether the kernel is running
# and responsive by sending kernel_info_requests. This sets the timeout in
# seconds for how long the kernel can take before being presumed dead.
# c.ZMQTerminalInteractiveShell.kernel_timeout = 60
# Number of lines of your screen, used to control printing of very long strings.
# Strings longer than this number of lines will be sent through a pager instead
# of directly printed. The default value for this is 0, which means IPython
# will auto-detect your screen size every time it needs to print certain
# potentially long strings (this doesn't change the behavior of the 'print'
# keyword, it's only triggered internally). If for some reason this isn't
# working well (it needs curses support), specify it yourself. Otherwise don't
# change the default.
# c.ZMQTerminalInteractiveShell.screen_length = 0
# Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
# c.ZMQTerminalInteractiveShell.logappend = ''
# Command to invoke an image viewer program when you are using 'tempfile' image
# handler. This option is a list of string where the first element is the
# command itself and reminders are the options for the command. You can use
# {file} and {format} in the string to represent the location of the generated
# image file and image format.
# c.ZMQTerminalInteractiveShell.tempfile_image_handler = []
#------------------------------------------------------------------------------
# KernelManager configuration
#------------------------------------------------------------------------------
# Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
# KernelManager will inherit config from: ConnectionFileMixin
# set the heartbeat port [default: random]
# c.KernelManager.hb_port = 0
# set the stdin (ROUTER) port [default: random]
# c.KernelManager.stdin_port = 0
#
# c.KernelManager.transport = 'tcp'
# JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
# c.KernelManager.connection_file = ''
# set the control (ROUTER) port [default: random]
# c.KernelManager.control_port = 0
# set the shell (ROUTER) port [default: random]
# c.KernelManager.shell_port = 0
# Should we autorestart the kernel if it dies.
# c.KernelManager.autorestart = False
# DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, IPython does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the IPython command
# line.
# c.KernelManager.kernel_cmd = []
# Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
# c.KernelManager.ip = ''
# set the iopub (PUB) port [default: random]
# c.KernelManager.iopub_port = 0
#------------------------------------------------------------------------------
# ProfileDir configuration
#------------------------------------------------------------------------------
# An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
# Set the profile location directly. This overrides the logic used by the
# `profile` option.
# c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# Session configuration
#------------------------------------------------------------------------------
# Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
# The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
# c.Session.signature_scheme = 'hmac-sha256'
# The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
# c.Session.digest_history_size = 65536
# The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
# c.Session.unpacker = 'json'
# The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
# c.Session.packer = 'json'
# Username for the Session. Default is your system username.
# c.Session.username = 'minrk'
# Debug output in the Session
# c.Session.debug = False
# path to file containing execution key.
# c.Session.keyfile = ''
# The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
# c.Session.item_threshold = 64
# Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
# c.Session.buffer_threshold = 1024
# The UUID identifying this session.
# c.Session.session = ''
# Threshold (in bytes) beyond which a buffer should be sent without copying.
# c.Session.copy_threshold = 65536
# execution key, for signing messages.
# c.Session.key = b''
# Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
# c.Session.metadata = {}
| mit |
wdurhamh/statsmodels | statsmodels/tsa/arima_model.py | 9 | 77514 | # Note: The information criteria add 1 to the number of parameters
# whenever the model has an AR or MA term since, in principle,
# the variance could be treated as a free parameter and restricted
# This code does not allow this, but it adds consistency with other
# packages such as gretl and X12-ARIMA
from __future__ import absolute_import
from statsmodels.compat.python import string_types, range
# for 2to3 with extensions
from datetime import datetime
import numpy as np
from scipy import optimize
from scipy.stats import t, norm
from scipy.signal import lfilter
from numpy import dot, log, zeros, pi
from numpy.linalg import inv
from statsmodels.tools.decorators import (cache_readonly,
resettable_cache)
import statsmodels.tsa.base.tsa_model as tsbase
import statsmodels.base.wrapper as wrap
from statsmodels.regression.linear_model import yule_walker, GLS
from statsmodels.tsa.tsatools import (lagmat, add_trend,
_ar_transparams, _ar_invtransparams,
_ma_transparams, _ma_invtransparams,
unintegrate, unintegrate_levels)
from statsmodels.tsa.vector_ar import util
from statsmodels.tsa.ar_model import AR
from statsmodels.tsa.arima_process import arma2ma
from statsmodels.tools.numdiff import approx_hess_cs, approx_fprime_cs
from statsmodels.tsa.base.datetools import _index_date
from statsmodels.tsa.kalmanf import KalmanFilter
_armax_notes = """
Notes
-----
If exogenous variables are given, then the model that is fit is
.. math::
\\phi(L)(y_t - X_t\\beta) = \\theta(L)\epsilon_t
where :math:`\\phi` and :math:`\\theta` are polynomials in the lag
operator, :math:`L`. This is the regression model with ARMA errors,
or ARMAX model. This specification is used, whether or not the model
is fit using conditional sum of square or maximum-likelihood, using
the `method` argument in
:meth:`statsmodels.tsa.arima_model.%(Model)s.fit`. Therefore, for
now, `css` and `mle` refer to estimation methods only. This may
change for the case of the `css` model in future versions.
"""
_arma_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional array of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_arma_model = "Autoregressive Moving Average ARMA(p,q) Model"
_arima_model = "Autoregressive Integrated Moving Average ARIMA(p,d,q) Model"
_arima_params = """\
endog : array-like
The endogenous variable.
order : iterable
The (p,d,q) order of the model for the number of AR parameters,
differences, and MA parameters to use.
exog : array-like, optional
An optional array of exogenous variables. This should *not* include a
constant or trend. You can specify this in the `fit` method."""
_predict_notes = """
Notes
-----
Use the results predict method instead.
"""
_results_notes = """
Notes
-----
It is recommended to use dates with the time-series models, as the
below will probably make clear. However, if ARIMA is used without
dates and/or `start` and `end` are given as indices, then these
indices are in terms of the *original*, undifferenced series. Ie.,
given some undifferenced observations::
1970Q1, 1
1970Q2, 1.5
1970Q3, 1.25
1970Q4, 2.25
1971Q1, 1.2
1971Q2, 4.1
1970Q1 is observation 0 in the original series. However, if we fit an
ARIMA(p,1,q) model then we lose this first observation through
differencing. Therefore, the first observation we can forecast (if
using exact MLE) is index 1. In the differenced series this is index
0, but we refer to it as 1 from the original series.
"""
_predict = """
%(Model)s model in-sample and out-of-sample prediction
Parameters
----------
%(params)s
start : int, str, or datetime
Zero-indexed observation number at which to start forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type.
end : int, str, or datetime
Zero-indexed observation number at which to end forecasting, ie.,
the first forecast is start. Can also be a date string to
parse or a datetime type. However, if the dates index does not
have a fixed frequency, end must be an integer index if you
want out of sample prediction.
exog : array-like, optional
If the model is an ARMAX and out-of-sample forecasting is
requested, exog must be given. Note that you'll need to pass
`k_ar` additional lags for any exogenous variables. E.g., if you
fit an ARMAX(2, q) model and want to predict 5 steps, you need 7
observations to do this.
dynamic : bool, optional
The `dynamic` keyword affects in-sample prediction. If dynamic
is False, then the in-sample lagged values are used for
prediction. If `dynamic` is True, then in-sample forecasts are
used in place of lagged dependent variables. The first forecasted
value is `start`.
%(extra_params)s
Returns
-------
%(returns)s
%(extra_section)s
"""
_predict_returns = """predict : array
The predicted values.
"""
_arma_predict = _predict % {"Model" : "ARMA",
"params" : """
params : array-like
The fitted parameters of the model.""",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _predict_notes}
_arma_results_predict = _predict % {"Model" : "ARMA", "params" : "",
"extra_params" : "",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_predict = _predict % {"Model" : "ARIMA",
"params" : """params : array-like
The fitted parameters of the model.""",
"extra_params" : """typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""", "returns" : _predict_returns,
"extra_section" : _predict_notes}
_arima_results_predict = _predict % {"Model" : "ARIMA",
"params" : "",
"extra_params" :
"""typ : str {'linear', 'levels'}
- 'linear' : Linear prediction in terms of the differenced
endogenous variables.
- 'levels' : Predict the levels of the original endogenous
variables.\n""",
"returns" : _predict_returns,
"extra_section" : _results_notes}
_arima_plot_predict_example = """ Examples
--------
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import pandas as pd
>>>
>>> dta = sm.datasets.sunspots.load_pandas().data[['SUNACTIVITY']]
>>> dta.index = pd.DatetimeIndex(start='1700', end='2009', freq='A')
>>> res = sm.tsa.ARMA(dta, (3, 0)).fit()
>>> fig, ax = plt.subplots()
>>> ax = dta.ix['1950':].plot(ax=ax)
>>> fig = res.plot_predict('1990', '2012', dynamic=True, ax=ax,
... plot_insample=False)
>>> plt.show()
.. plot:: plots/arma_predict_plot.py
"""
_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' + _results_notes)
}
_arima_plot_predict = ("""
Plot forecasts
""" + '\n'.join(_predict.split('\n')[2:])) % {
"params" : "",
"extra_params" : """alpha : float, optional
The confidence intervals for the forecasts are (1 - alpha)%
plot_insample : bool, optional
Whether to plot the in-sample series. Default is True.
ax : matplotlib.Axes, optional
Existing axes to plot with.""",
"returns" : """fig : matplotlib.Figure
The plotted Figure instance""",
"extra_section" : ('\n' + _arima_plot_predict_example +
'\n' +
'\n'.join(_results_notes.split('\n')[:3]) +
("""
This is hard-coded to only allow plotting of the forecasts in levels.
""") +
'\n'.join(_results_notes.split('\n')[3:]))
}
def cumsum_n(x, n):
if n:
n -= 1
x = np.cumsum(x)
return cumsum_n(x, n)
else:
return x
def _check_arima_start(start, k_ar, k_diff, method, dynamic):
if start < 0:
raise ValueError("The start index %d of the original series "
"has been differenced away" % start)
elif (dynamic or 'mle' not in method) and start < k_ar:
raise ValueError("Start must be >= k_ar for conditional MLE "
"or dynamic forecast. Got %d" % start)
def _get_predict_out_of_sample(endog, p, q, k_trend, k_exog, start, errors,
trendparam, exparams, arparams, maparams, steps,
method, exog=None):
"""
Returns endog, resid, mu of appropriate length for out of sample
prediction.
"""
if q:
resid = np.zeros(q)
if start and 'mle' in method or (start == p and not start == 0):
resid[:q] = errors[start-q:start]
elif start:
resid[:q] = errors[start-q-p:start-p]
else:
resid[:q] = errors[-q:]
else:
resid = None
y = endog
if k_trend == 1:
# use expectation not constant
if k_exog > 0:
#TODO: technically should only hold for MLE not
# conditional model. See #274.
# ensure 2-d for conformability
if np.ndim(exog) == 1 and k_exog == 1:
# have a 1d series of observations -> 2d
exog = exog[:, None]
elif np.ndim(exog) == 1:
# should have a 1d row of exog -> 2d
if len(exog) != k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
X = lagmat(np.dot(exog, exparams), p, original='in', trim='both')
mu = trendparam * (1 - arparams.sum())
# arparams were reversed in unpack for ease later
mu = mu + (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = trendparam * (1 - arparams.sum())
mu = np.array([mu]*steps)
elif k_exog > 0:
X = np.dot(exog, exparams)
#NOTE: you shouldn't have to give in-sample exog!
X = lagmat(X, p, original='in', trim='both')
mu = (np.r_[1, -arparams[::-1]] * X).sum(1)[:, None]
else:
mu = np.zeros(steps)
endog = np.zeros(p + steps - 1)
if p and start:
endog[:p] = y[start-p:start]
elif p:
endog[:p] = y[-p:]
return endog, resid, mu
def _arma_predict_out_of_sample(params, steps, errors, p, q, k_trend, k_exog,
endog, exog=None, start=0, method='mle'):
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q), k_trend,
k_exog, reverse=True)
endog, resid, mu = _get_predict_out_of_sample(endog, p, q, k_trend, k_exog,
start, errors, trendparam,
exparams, arparams,
maparams, steps, method,
exog)
forecast = np.zeros(steps)
if steps == 1:
if q:
return mu[0] + np.dot(arparams, endog[:p]) + np.dot(maparams,
resid[:q])
else:
return mu[0] + np.dot(arparams, endog[:p])
if q:
i = 0 # if q == 1
else:
i = -1
for i in range(min(q, steps - 1)):
fcast = (mu[i] + np.dot(arparams, endog[i:i + p]) +
np.dot(maparams[:q - i], resid[i:i + q]))
forecast[i] = fcast
endog[i+p] = fcast
for i in range(i + 1, steps - 1):
fcast = mu[i] + np.dot(arparams, endog[i:i+p])
forecast[i] = fcast
endog[i+p] = fcast
#need to do one more without updating endog
forecast[steps - 1] = mu[steps - 1] + np.dot(arparams, endog[steps - 1:])
return forecast
def _arma_predict_in_sample(start, end, endog, resid, k_ar, method):
"""
Pre- and in-sample fitting for ARMA.
"""
if 'mle' in method:
fittedvalues = endog - resid # get them all then trim
else:
fittedvalues = endog[k_ar:] - resid
fv_start = start
if 'mle' not in method:
fv_start -= k_ar # start is in terms of endog index
fv_end = min(len(fittedvalues), end + 1)
return fittedvalues[fv_start:fv_end]
def _validate(start, k_ar, k_diff, dates, method):
if isinstance(start, (string_types, datetime)):
start = _index_date(start, dates)
start -= k_diff
if 'mle' not in method and start < k_ar - k_diff:
raise ValueError("Start must be >= k_ar for conditional "
"MLE or dynamic forecast. Got %s" % start)
return start
def _unpack_params(params, order, k_trend, k_exog, reverse=False):
p, q = order
k = k_trend + k_exog
maparams = params[k+p:]
arparams = params[k:k+p]
trend = params[:k_trend]
exparams = params[k_trend:k]
if reverse:
return trend, exparams, arparams[::-1], maparams[::-1]
return trend, exparams, arparams, maparams
def _unpack_order(order):
k_ar, k_ma, k = order
k_lags = max(k_ar, k_ma+1)
return k_ar, k_ma, order, k_lags
def _make_arma_names(data, k_trend, order, exog_names):
k_ar, k_ma = order
exog_names = exog_names or []
ar_lag_names = util.make_lag_names([data.ynames], k_ar, 0)
ar_lag_names = [''.join(('ar.', i)) for i in ar_lag_names]
ma_lag_names = util.make_lag_names([data.ynames], k_ma, 0)
ma_lag_names = [''.join(('ma.', i)) for i in ma_lag_names]
trend_name = util.make_lag_names('', 0, k_trend)
# ensure exog_names stays unchanged when the `fit` method
# is called multiple times.
if exog_names[-k_ma:] == ma_lag_names and \
exog_names[-(k_ar+k_ma):-k_ma] == ar_lag_names and \
(not exog_names or not trend_name or trend_name[0] == exog_names[0]):
return exog_names
exog_names = trend_name + exog_names + ar_lag_names + ma_lag_names
return exog_names
def _make_arma_exog(endog, exog, trend):
k_trend = 1 # overwritten if no constant
if exog is None and trend == 'c': # constant only
exog = np.ones((len(endog), 1))
elif exog is not None and trend == 'c': # constant plus exogenous
exog = add_trend(exog, trend='c', prepend=True)
elif exog is not None and trend == 'nc':
# make sure it's not holding constant from last run
if exog.var() == 0:
exog = None
k_trend = 0
if trend == 'nc':
k_trend = 0
return k_trend, exog
def _check_estimable(nobs, n_params):
if nobs <= n_params:
raise ValueError("Insufficient degrees of freedom to estimate")
class ARMA(tsbase.TimeSeriesModel):
__doc__ = tsbase._tsa_doc % {"model" : _arma_model,
"params" : _arma_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARMA"}}
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
super(ARMA, self).__init__(endog, exog, dates, freq, missing=missing)
exog = self.data.exog # get it after it's gone through processing
_check_estimable(len(self.endog), sum(order))
self.k_ar = k_ar = order[0]
self.k_ma = k_ma = order[1]
self.k_lags = max(k_ar, k_ma+1)
if exog is not None:
if exog.ndim == 1:
exog = exog[:, None]
k_exog = exog.shape[1] # number of exog. variables excl. const
else:
k_exog = 0
self.k_exog = k_exog
def _fit_start_params_hr(self, order):
"""
Get starting parameters for fit.
Parameters
----------
order : iterable
(p,q,k) - AR lags, MA lags, and number of exogenous variables
including the constant.
Returns
-------
start_params : array
A first guess at the starting parameters.
Notes
-----
If necessary, fits an AR process with the laglength selected according
to best BIC. Obtain the residuals. Then fit an ARMA(p,q) model via
OLS using these residuals for a first approximation. Uses a separate
OLS regression to find the coefficients of exogenous variables.
References
----------
Hannan, E.J. and Rissanen, J. 1982. "Recursive estimation of mixed
autoregressive-moving average order." `Biometrika`. 69.1.
"""
p, q, k = order
start_params = zeros((p+q+k))
endog = self.endog.copy() # copy because overwritten
exog = self.exog
if k != 0:
ols_params = GLS(endog, exog).fit().params
start_params[:k] = ols_params
endog -= np.dot(exog, ols_params).squeeze()
if q != 0:
if p != 0:
# make sure we don't run into small data problems in AR fit
nobs = len(endog)
maxlag = int(round(12*(nobs/100.)**(1/4.)))
if maxlag >= nobs:
maxlag = nobs - 1
armod = AR(endog).fit(ic='bic', trend='nc', maxlag=maxlag)
arcoefs_tmp = armod.params
p_tmp = armod.k_ar
# it's possible in small samples that optimal lag-order
# doesn't leave enough obs. No consistent way to fix.
if p_tmp + q >= len(endog):
raise ValueError("Proper starting parameters cannot"
" be found for this order with this "
"number of observations. Use the "
"start_params argument.")
resid = endog[p_tmp:] - np.dot(lagmat(endog, p_tmp,
trim='both'),
arcoefs_tmp)
if p < p_tmp + q:
endog_start = p_tmp + q - p
resid_start = 0
else:
endog_start = 0
resid_start = p - p_tmp - q
lag_endog = lagmat(endog, p, 'both')[endog_start:]
lag_resid = lagmat(resid, q, 'both')[resid_start:]
# stack ar lags and resids
X = np.column_stack((lag_endog, lag_resid))
coefs = GLS(endog[max(p_tmp + q, p):], X).fit().params
start_params[k:k+p+q] = coefs
else:
start_params[k+p:k+p+q] = yule_walker(endog, order=q)[0]
if q == 0 and p != 0:
arcoefs = yule_walker(endog, order=p)[0]
start_params[k:k+p] = arcoefs
# check AR coefficients
if p and not np.all(np.abs(np.roots(np.r_[1, -start_params[k:k + p]]
)) < 1):
raise ValueError("The computed initial AR coefficients are not "
"stationary\nYou should induce stationarity, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
elif q and not np.all(np.abs(np.roots(np.r_[1, start_params[k + p:]]
)) < 1):
raise ValueError("The computed initial MA coefficients are not "
"invertible\nYou should induce invertibility, "
"choose a different model order, or you can\n"
"pass your own start_params.")
# check MA coefficients
return start_params
def _fit_start_params(self, order, method):
if method != 'css-mle': # use Hannan-Rissanen to get start params
start_params = self._fit_start_params_hr(order)
else: # use CSS to get start params
func = lambda params: -self.loglike_css(params)
#start_params = [.1]*(k_ar+k_ma+k_exog) # different one for k?
start_params = self._fit_start_params_hr(order)
if self.transparams:
start_params = self._invtransparams(start_params)
bounds = [(None,)*2]*sum(order)
mlefit = optimize.fmin_l_bfgs_b(func, start_params,
approx_grad=True, m=12,
pgtol=1e-7, factr=1e3,
bounds=bounds, iprint=-1)
start_params = self._transparams(mlefit[0])
return start_params
def score(self, params):
"""
Compute the score function at params.
Notes
-----
This is a numerical approximation.
"""
return approx_fprime_cs(params, self.loglike, args=(False,))
def hessian(self, params):
"""
Compute the Hessian at params,
Notes
-----
This is a numerical approximation.
"""
return approx_hess_cs(params, self.loglike, args=(False,))
def _transparams(self, params):
"""
Transforms params to induce stationarity/invertability.
Reference
---------
Jones(1980)
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = np.zeros_like(params)
# just copy exogenous parameters
if k != 0:
newparams[:k] = params[:k]
# AR Coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_transparams(params[k:k+k_ar].copy())
# MA Coeffs
if k_ma != 0:
newparams[k+k_ar:] = _ma_transparams(params[k+k_ar:].copy())
return newparams
def _invtransparams(self, start_params):
"""
Inverse of the Jones reparameterization
"""
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
newparams = start_params.copy()
arcoefs = newparams[k:k+k_ar]
macoefs = newparams[k+k_ar:]
# AR coeffs
if k_ar != 0:
newparams[k:k+k_ar] = _ar_invtransparams(arcoefs)
# MA coeffs
if k_ma != 0:
newparams[k+k_ar:k+k_ar+k_ma] = _ma_invtransparams(macoefs)
return newparams
def _get_predict_start(self, start, dynamic):
# do some defaults
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
k_diff = getattr(self, 'k_diff', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
self._set_predict_start_date(start) # else it's done in super
elif isinstance(start, int):
start = super(ARMA, self)._get_predict_start(start)
else: # should be on a date
#elif 'mle' not in method or dynamic: # should be on a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARMA, self)._get_predict_start(start)
_check_arima_start(start, k_ar, k_diff, method, dynamic)
return start
def _get_predict_end(self, end, dynamic=False):
# pass through so predict works for ARIMA and ARMA
return super(ARMA, self)._get_predict_end(end)
def geterrors(self, params):
"""
Get the errors of the ARMA process.
Parameters
----------
params : array-like
The fitted ARMA parameters
order : array-like
3 item iterable, with the number of AR, MA, and exogenous
parameters, including the trend
"""
#start = self._get_predict_start(start) # will be an index of a date
#end, out_of_sample = self._get_predict_end(end)
params = np.asarray(params)
k_ar, k_ma = self.k_ar, self.k_ma
k = self.k_exog + self.k_trend
method = getattr(self, 'method', 'mle')
if 'mle' in method: # use KalmanFilter to get errors
(y, k, nobs, k_ar, k_ma, k_lags, newparams, Z_mat, m, R_mat,
T_mat, paramsdtype) = KalmanFilter._init_kalman_state(params,
self)
errors = KalmanFilter.geterrors(y, k, k_ar, k_ma, k_lags, nobs,
Z_mat, m, R_mat, T_mat,
paramsdtype)
if isinstance(errors, tuple):
errors = errors[0] # non-cython version returns a tuple
else: # use scipy.signal.lfilter
y = self.endog.copy()
k = self.k_exog + self.k_trend
if k > 0:
y -= dot(self.exog, params[:k])
k_ar = self.k_ar
k_ma = self.k_ma
(trendparams, exparams,
arparams, maparams) = _unpack_params(params, (k_ar, k_ma),
self.k_trend, self.k_exog,
reverse=False)
b, a = np.r_[1, -arparams], np.r_[1, maparams]
zi = zeros((max(k_ar, k_ma)))
for i in range(k_ar):
zi[i] = sum(-b[:i+1][::-1]*y[:i+1])
e = lfilter(b, a, y, zi=zi)
errors = e[0][k_ar:]
return errors.squeeze()
def predict(self, params, start=None, end=None, exog=None, dynamic=False):
method = getattr(self, 'method', 'mle') # don't assume fit
#params = np.asarray(params)
# will return an index of a date
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end, dynamic)
if out_of_sample and (exog is None and self.k_exog > 0):
raise ValueError("You must provide exog for ARMAX")
endog = self.endog
resid = self.geterrors(params)
k_ar = self.k_ar
if exog is not None:
# Note: we ignore currently the index of exog if it is available
exog = np.asarray(exog)
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
if out_of_sample != 0 and self.k_exog > 0:
# we need the last k_ar exog for the lag-polynomial
if self.k_exog > 0 and k_ar > 0 and not dynamic:
# need the last k_ar exog for the lag-polynomial
exog = np.vstack((self.exog[-k_ar:, self.k_trend:], exog))
if dynamic:
if self.k_exog > 0:
# need the last k_ar exog for the lag-polynomial
exog = np.vstack((self.exog[start - k_ar:, self.k_trend:], exog))
#TODO: now that predict does dynamic in-sample it should
# also return error estimates and confidence intervals
# but how? len(endog) is not tot_obs
out_of_sample += end - start + 1
return _arma_predict_out_of_sample(params, out_of_sample, resid,
k_ar, self.k_ma, self.k_trend,
self.k_exog, endog, exog,
start, method)
predictedvalues = _arma_predict_in_sample(start, end, endog, resid,
k_ar, method)
if out_of_sample:
forecastvalues = _arma_predict_out_of_sample(params, out_of_sample,
resid, k_ar,
self.k_ma,
self.k_trend,
self.k_exog, endog,
exog, method=method)
predictedvalues = np.r_[predictedvalues, forecastvalues]
return predictedvalues
predict.__doc__ = _arma_predict
def loglike(self, params, set_sigma2=True):
"""
Compute the log-likelihood for ARMA(p,q) model
Notes
-----
Likelihood used depends on the method set in fit
"""
method = self.method
if method in ['mle', 'css-mle']:
return self.loglike_kalman(params, set_sigma2)
elif method == 'css':
return self.loglike_css(params, set_sigma2)
else:
raise ValueError("Method %s not understood" % method)
def loglike_kalman(self, params, set_sigma2=True):
"""
Compute exact loglikelihood for ARMA(p,q) model by the Kalman Filter.
"""
return KalmanFilter.loglike(params, self, set_sigma2)
def loglike_css(self, params, set_sigma2=True):
"""
Conditional Sum of Squares likelihood function.
"""
k_ar = self.k_ar
k_ma = self.k_ma
k = self.k_exog + self.k_trend
y = self.endog.copy().astype(params.dtype)
nobs = self.nobs
# how to handle if empty?
if self.transparams:
newparams = self._transparams(params)
else:
newparams = params
if k > 0:
y -= dot(self.exog, newparams[:k])
# the order of p determines how many zeros errors to set for lfilter
b, a = np.r_[1, -newparams[k:k + k_ar]], np.r_[1, newparams[k + k_ar:]]
zi = np.zeros((max(k_ar, k_ma)), dtype=params.dtype)
for i in range(k_ar):
zi[i] = sum(-b[:i + 1][::-1] * y[:i + 1])
errors = lfilter(b, a, y, zi=zi)[0][k_ar:]
ssr = np.dot(errors, errors)
sigma2 = ssr/nobs
if set_sigma2:
self.sigma2 = sigma2
llf = -nobs/2.*(log(2*pi) + log(sigma2)) - ssr/(2*sigma2)
return llf
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARMA(p,q) model using exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
statsmodels.tsa.arima_model.ARMAResults class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
k_ar = self.k_ar
k_ma = self.k_ma
# enforce invertibility
self.transparams = transparams
endog, exog = self.endog, self.exog
k_exog = self.k_exog
self.nobs = len(endog) # this is overwritten if method is 'css'
# (re)set trend and handle exogenous variables
# always pass original exog
k_trend, exog = _make_arma_exog(endog, self.exog, trend)
# Check has something to estimate
if k_ar == 0 and k_ma == 0 and k_trend == 0 and k_exog == 0:
raise ValueError("Estimation requires the inclusion of least one "
"AR term, MA term, a constant or an exogenous "
"variable.")
# check again now that we know the trend
_check_estimable(len(endog), k_ar + k_ma + k_exog + k_trend)
self.k_trend = k_trend
self.exog = exog # overwrites original exog from __init__
# (re)set names for this model
self.exog_names = _make_arma_names(self.data, k_trend, (k_ar, k_ma),
self.exog_names)
k = k_trend + k_exog
# choose objective function
if k_ma == 0 and k_ar == 0:
method = "css" # Always CSS when no AR or MA terms
self.method = method = method.lower()
# adjust nobs for css
if method == 'css':
self.nobs = len(self.endog) - k_ar
if start_params is not None:
start_params = np.asarray(start_params)
else: # estimate starting parameters
start_params = self._fit_start_params((k_ar, k_ma, k), method)
if transparams: # transform initial parameters to ensure invertibility
start_params = self._invtransparams(start_params)
if solver == 'lbfgs':
kwargs.setdefault('pgtol', 1e-8)
kwargs.setdefault('factr', 1e2)
kwargs.setdefault('m', 12)
kwargs.setdefault('approx_grad', True)
mlefit = super(ARMA, self).fit(start_params, method=solver,
maxiter=maxiter,
full_output=full_output, disp=disp,
callback=callback, **kwargs)
params = mlefit.params
if transparams: # transform parameters back
params = self._transparams(params)
self.transparams = False # so methods don't expect transf.
normalized_cov_params = None # TODO: fix this
armafit = ARMAResults(self, params, normalized_cov_params)
armafit.mle_retvals = mlefit.mle_retvals
armafit.mle_settings = mlefit.mle_settings
return ARMAResultsWrapper(armafit)
#NOTE: the length of endog changes when we give a difference to fit
#so model methods are not the same on unfit models as fit ones
#starting to think that order of model should be put in instantiation...
class ARIMA(ARMA):
__doc__ = tsbase._tsa_doc % {"model" : _arima_model,
"params" : _arima_params, "extra_params" : "",
"extra_sections" : _armax_notes %
{"Model" : "ARIMA"}}
def __new__(cls, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d == 0: # then we just use an ARMA model
return ARMA(endog, (p, q), exog, dates, freq, missing)
else:
mod = super(ARIMA, cls).__new__(cls)
mod.__init__(endog, order, exog, dates, freq, missing)
return mod
def __init__(self, endog, order, exog=None, dates=None, freq=None,
missing='none'):
p, d, q = order
if d > 2:
#NOTE: to make more general, need to address the d == 2 stuff
# in the predict method
raise ValueError("d > 2 is not supported")
super(ARIMA, self).__init__(endog, (p, q), exog, dates, freq, missing)
self.k_diff = d
self._first_unintegrate = unintegrate_levels(self.endog[:d], d)
self.endog = np.diff(self.endog, n=d)
#NOTE: will check in ARMA but check again since differenced now
_check_estimable(len(self.endog), p+q)
if exog is not None:
self.exog = self.exog[d:]
if d == 1:
self.data.ynames = 'D.' + self.endog_names
else:
self.data.ynames = 'D{0:d}.'.format(d) + self.endog_names
# what about exog, should we difference it automatically before
# super call?
def _get_predict_start(self, start, dynamic):
"""
"""
#TODO: remove all these getattr and move order specification to
# class constructor
k_diff = getattr(self, 'k_diff', 0)
method = getattr(self, 'method', 'mle')
k_ar = getattr(self, 'k_ar', 0)
if start is None:
if 'mle' in method and not dynamic:
start = 0
else:
start = k_ar
elif isinstance(start, int):
start -= k_diff
try: # catch when given an integer outside of dates index
start = super(ARIMA, self)._get_predict_start(start,
dynamic)
except IndexError:
raise ValueError("start must be in series. "
"got %d" % (start + k_diff))
else: # received a date
start = _validate(start, k_ar, k_diff, self.data.dates,
method)
start = super(ARIMA, self)._get_predict_start(start, dynamic)
# reset date for k_diff adjustment
self._set_predict_start_date(start + k_diff)
return start
def _get_predict_end(self, end, dynamic=False):
"""
Returns last index to be forecast of the differenced array.
Handling of inclusiveness should be done in the predict function.
"""
end, out_of_sample = super(ARIMA, self)._get_predict_end(end, dynamic)
if 'mle' not in self.method and not dynamic:
end -= self.k_ar
return end - self.k_diff, out_of_sample
def fit(self, start_params=None, trend='c', method="css-mle",
transparams=True, solver='lbfgs', maxiter=50, full_output=1,
disp=5, callback=None, **kwargs):
"""
Fits ARIMA(p,d,q) model by exact maximum likelihood via Kalman filter.
Parameters
----------
start_params : array-like, optional
Starting parameters for ARMA(p,q). If None, the default is given
by ARMA._fit_start_params. See there for more information.
transparams : bool, optional
Whehter or not to transform the parameters to ensure stationarity.
Uses the transformation suggested in Jones (1980). If False,
no checking for stationarity or invertibility is done.
method : str {'css-mle','mle','css'}
This is the loglikelihood to maximize. If "css-mle", the
conditional sum of squares likelihood is maximized and its values
are used as starting values for the computation of the exact
likelihood via the Kalman filter. If "mle", the exact likelihood
is maximized via the Kalman Filter. If "css" the conditional sum
of squares likelihood is maximized. All three methods use
`start_params` as starting parameters. See above for more
information.
trend : str {'c','nc'}
Whether to include a constant or not. 'c' includes constant,
'nc' no constant.
solver : str or None, optional
Solver to be used. The default is 'lbfgs' (limited memory
Broyden-Fletcher-Goldfarb-Shanno). Other choices are 'bfgs',
'newton' (Newton-Raphson), 'nm' (Nelder-Mead), 'cg' -
(conjugate gradient), 'ncg' (non-conjugate gradient), and
'powell'. By default, the limited memory BFGS uses m=12 to
approximate the Hessian, projected gradient tolerance of 1e-8 and
factr = 1e2. You can change these by using kwargs.
maxiter : int, optional
The maximum number of function evaluations. Default is 50.
tol : float
The convergence tolerance. Default is 1e-08.
full_output : bool, optional
If True, all output from solver will be available in
the Results object's mle_retvals attribute. Output is dependent
on the solver. See Notes for more information.
disp : bool, optional
If True, convergence information is printed. For the default
l_bfgs_b solver, disp controls the frequency of the output during
the iterations. disp < 0 means no output in this case.
callback : function, optional
Called after each iteration as callback(xk) where xk is the current
parameter vector.
kwargs
See Notes for keyword arguments that can be passed to fit.
Returns
-------
`statsmodels.tsa.arima.ARIMAResults` class
See also
--------
statsmodels.base.model.LikelihoodModel.fit : for more information
on using the solvers.
ARIMAResults : results class returned by fit
Notes
------
If fit by 'mle', it is assumed for the Kalman Filter that the initial
unkown state is zero, and that the inital variance is
P = dot(inv(identity(m**2)-kron(T,T)),dot(R,R.T).ravel('F')).reshape(r,
r, order = 'F')
"""
mlefit = super(ARIMA, self).fit(start_params, trend,
method, transparams, solver,
maxiter, full_output, disp,
callback, **kwargs)
normalized_cov_params = None # TODO: fix this?
arima_fit = ARIMAResults(self, mlefit._results.params,
normalized_cov_params)
arima_fit.k_diff = self.k_diff
arima_fit.mle_retvals = mlefit.mle_retvals
arima_fit.mle_settings = mlefit.mle_settings
return ARIMAResultsWrapper(arima_fit)
def predict(self, params, start=None, end=None, exog=None, typ='linear',
dynamic=False):
# go ahead and convert to an index for easier checking
if isinstance(start, (string_types, datetime)):
start = _index_date(start, self.data.dates)
if typ == 'linear':
if not dynamic or (start != self.k_ar + self.k_diff and
start is not None):
return super(ARIMA, self).predict(params, start, end, exog,
dynamic)
else:
# need to assume pre-sample residuals are zero
# do this by a hack
q = self.k_ma
self.k_ma = 0
predictedvalues = super(ARIMA, self).predict(params, start,
end, exog,
dynamic)
self.k_ma = q
return predictedvalues
elif typ == 'levels':
endog = self.data.endog
if not dynamic:
predict = super(ARIMA, self).predict(params, start, end, exog,
dynamic)
start = self._get_predict_start(start, dynamic)
end, out_of_sample = self._get_predict_end(end)
d = self.k_diff
if 'mle' in self.method:
start += d - 1 # for case where d == 2
end += d - 1
# add each predicted diff to lagged endog
if out_of_sample:
fv = predict[:-out_of_sample] + endog[start:end+1]
if d == 2: #TODO: make a general solution to this
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[start:end + 1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
k_ar = self.k_ar
if out_of_sample:
fv = (predict[:-out_of_sample] +
endog[max(start, self.k_ar-1):end+k_ar+1])
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
levels = unintegrate_levels(endog[-d:], d)
fv = np.r_[fv,
unintegrate(predict[-out_of_sample:],
levels)[d:]]
else:
fv = predict + endog[max(start, k_ar):end+k_ar+1]
if d == 2:
fv += np.diff(endog[start - 1:end + 1])
else:
#IFF we need to use pre-sample values assume pre-sample
# residuals are zero, do this by a hack
if start == self.k_ar + self.k_diff or start is None:
# do the first k_diff+1 separately
p = self.k_ar
q = self.k_ma
k_exog = self.k_exog
k_trend = self.k_trend
k_diff = self.k_diff
(trendparam, exparams,
arparams, maparams) = _unpack_params(params, (p, q),
k_trend,
k_exog,
reverse=True)
# this is the hack
self.k_ma = 0
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
if not start:
start = self._get_predict_start(start, dynamic)
start += k_diff
self.k_ma = q
return endog[start-1] + np.cumsum(predict)
else:
predict = super(ARIMA, self).predict(params, start, end,
exog, dynamic)
return endog[start-1] + np.cumsum(predict)
return fv
else: # pragma : no cover
raise ValueError("typ %s not understood" % typ)
predict.__doc__ = _arima_predict
class ARMAResults(tsbase.TimeSeriesModelResults):
"""
Class to hold results from fitting an ARMA model.
Parameters
----------
model : ARMA instance
The fitted model instance
params : array
Fitted parameters
normalized_cov_params : array, optional
The normalized variance covariance matrix
scale : float, optional
Optional argument to scale the variance covariance matrix.
Returns
--------
**Attributes**
aic : float
Akaike Information Criterion
:math:`-2*llf+2* df_model`
where `df_model` includes all AR parameters, MA parameters, constant
terms parameters on constant terms and the variance.
arparams : array
The parameters associated with the AR coefficients in the model.
arroots : array
The roots of the AR coefficients are the solution to
(1 - arparams[0]*z - arparams[1]*z**2 -...- arparams[p-1]*z**k_ar) = 0
Stability requires that the roots in modulus lie outside the unit
circle.
bic : float
Bayes Information Criterion
-2*llf + log(nobs)*df_model
Where if the model is fit using conditional sum of squares, the
number of observations `nobs` does not include the `p` pre-sample
observations.
bse : array
The standard errors of the parameters. These are computed using the
numerical Hessian.
df_model : array
The model degrees of freedom = `k_exog` + `k_trend` + `k_ar` + `k_ma`
df_resid : array
The residual degrees of freedom = `nobs` - `df_model`
fittedvalues : array
The predicted values of the model.
hqic : float
Hannan-Quinn Information Criterion
-2*llf + 2*(`df_model`)*log(log(nobs))
Like `bic` if the model is fit using conditional sum of squares then
the `k_ar` pre-sample observations are not counted in `nobs`.
k_ar : int
The number of AR coefficients in the model.
k_exog : int
The number of exogenous variables included in the model. Does not
include the constant.
k_ma : int
The number of MA coefficients.
k_trend : int
This is 0 for no constant or 1 if a constant is included.
llf : float
The value of the log-likelihood function evaluated at `params`.
maparams : array
The value of the moving average coefficients.
maroots : array
The roots of the MA coefficients are the solution to
(1 + maparams[0]*z + maparams[1]*z**2 + ... + maparams[q-1]*z**q) = 0
Stability requires that the roots in modules lie outside the unit
circle.
model : ARMA instance
A reference to the model that was fit.
nobs : float
The number of observations used to fit the model. If the model is fit
using exact maximum likelihood this is equal to the total number of
observations, `n_totobs`. If the model is fit using conditional
maximum likelihood this is equal to `n_totobs` - `k_ar`.
n_totobs : float
The total number of observations for `endog`. This includes all
observations, even pre-sample values if the model is fit using `css`.
params : array
The parameters of the model. The order of variables is the trend
coefficients and the `k_exog` exognous coefficients, then the
`k_ar` AR coefficients, and finally the `k_ma` MA coefficients.
pvalues : array
The p-values associated with the t-values of the coefficients. Note
that the coefficients are assumed to have a Student's T distribution.
resid : array
The model residuals. If the model is fit using 'mle' then the
residuals are created via the Kalman Filter. If the model is fit
using 'css' then the residuals are obtained via `scipy.signal.lfilter`
adjusted such that the first `k_ma` residuals are zero. These zero
residuals are not returned.
scale : float
This is currently set to 1.0 and not used by the model or its results.
sigma2 : float
The variance of the residuals. If the model is fit by 'css',
sigma2 = ssr/nobs, where ssr is the sum of squared residuals. If
the model is fit by 'mle', then sigma2 = 1/nobs * sum(v**2 / F)
where v is the one-step forecast error and F is the forecast error
variance. See `nobs` for the difference in definitions depending on the
fit.
"""
_cache = {}
#TODO: use this for docstring when we fix nobs issue
def __init__(self, model, params, normalized_cov_params=None, scale=1.):
super(ARMAResults, self).__init__(model, params, normalized_cov_params,
scale)
self.sigma2 = model.sigma2
nobs = model.nobs
self.nobs = nobs
k_exog = model.k_exog
self.k_exog = k_exog
k_trend = model.k_trend
self.k_trend = k_trend
k_ar = model.k_ar
self.k_ar = k_ar
self.n_totobs = len(model.endog)
k_ma = model.k_ma
self.k_ma = k_ma
df_model = k_exog + k_trend + k_ar + k_ma
self._ic_df_model = df_model + 1
self.df_model = df_model
self.df_resid = self.nobs - df_model
self._cache = resettable_cache()
@cache_readonly
def arroots(self):
return np.roots(np.r_[1, -self.arparams])**-1
@cache_readonly
def maroots(self):
return np.roots(np.r_[1, self.maparams])**-1
@cache_readonly
def arfreq(self):
r"""
Returns the frequency of the AR roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.arroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def mafreq(self):
r"""
Returns the frequency of the MA roots.
This is the solution, x, to z = abs(z)*exp(2j*np.pi*x) where z are the
roots.
"""
z = self.maroots
if not z.size:
return
return np.arctan2(z.imag, z.real) / (2*pi)
@cache_readonly
def arparams(self):
k = self.k_exog + self.k_trend
return self.params[k:k+self.k_ar]
@cache_readonly
def maparams(self):
k = self.k_exog + self.k_trend
k_ar = self.k_ar
return self.params[k+k_ar:]
@cache_readonly
def llf(self):
return self.model.loglike(self.params)
@cache_readonly
def bse(self):
params = self.params
hess = self.model.hessian(params)
if len(params) == 1: # can't take an inverse, ensure 1d
return np.sqrt(-1./hess[0])
return np.sqrt(np.diag(-inv(hess)))
def cov_params(self): # add scale argument?
params = self.params
hess = self.model.hessian(params)
return -inv(hess)
@cache_readonly
def aic(self):
return -2 * self.llf + 2 * self._ic_df_model
@cache_readonly
def bic(self):
nobs = self.nobs
return -2 * self.llf + np.log(nobs) * self._ic_df_model
@cache_readonly
def hqic(self):
nobs = self.nobs
return -2 * self.llf + 2 * np.log(np.log(nobs)) * self._ic_df_model
@cache_readonly
def fittedvalues(self):
model = self.model
endog = model.endog.copy()
k_ar = self.k_ar
exog = model.exog # this is a copy
if exog is not None:
if model.method == "css" and k_ar > 0:
exog = exog[k_ar:]
if model.method == "css" and k_ar > 0:
endog = endog[k_ar:]
fv = endog - self.resid
# add deterministic part back in
#k = self.k_exog + self.k_trend
#TODO: this needs to be commented out for MLE with constant
#if k != 0:
# fv += dot(exog, self.params[:k])
return fv
@cache_readonly
def resid(self):
return self.model.geterrors(self.params)
@cache_readonly
def pvalues(self):
#TODO: same for conditional and unconditional?
df_resid = self.df_resid
return t.sf(np.abs(self.tvalues), df_resid) * 2
def predict(self, start=None, end=None, exog=None, dynamic=False):
return self.model.predict(self.params, start, end, exog, dynamic)
predict.__doc__ = _arma_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcasterr = np.sqrt(sigma2 * np.cumsum(ma_rep**2))
return fcasterr
def _forecast_conf_int(self, forecast, fcasterr, alpha):
const = norm.ppf(1 - alpha / 2.)
conf_int = np.c_[forecast - const * fcasterr,
forecast + const * fcasterr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
"""
if exog is not None:
#TODO: make a convenience function for this. we're using the
# pattern elsewhere in the codebase
exog = np.asarray(exog)
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
elif exog.ndim == 1:
if len(exog) != self.k_exog:
raise ValueError("1d exog given and len(exog) != k_exog")
exog = exog[None, :]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
if self.k_ar > 0:
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params,
steps, self.resid, self.k_ar,
self.k_ma, self.k_trend,
self.k_exog, self.model.endog,
exog, method=self.model.method)
# compute the standard errors
fcasterr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcasterr, alpha)
return forecast, fcasterr, conf_int
def summary(self, alpha=.05):
"""Summarize the Model
Parameters
----------
alpha : float, optional
Significance level for the confidence intervals.
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary.Summary
"""
from statsmodels.iolib.summary import Summary
model = self.model
title = model.__class__.__name__ + ' Model Results'
method = model.method
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += ['- ' + dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
if not k_diff:
order = str((k_ar, k_ma))
else:
order = str((k_ar, k_diff, k_ma))
top_left = [('Dep. Variable:', None),
('Model:', [model.__class__.__name__ + order]),
('Method:', [method]),
('Date:', None),
('Time:', None),
('Sample:', [sample[0]]),
('', [sample[1]])
]
top_right = [
('No. Observations:', [str(len(self.model.endog))]),
('Log Likelihood', ["%#5.3f" % self.llf]),
('S.D. of innovations', ["%#5.3f" % self.sigma2**.5]),
('AIC', ["%#5.3f" % self.aic]),
('BIC', ["%#5.3f" % self.bic]),
('HQIC', ["%#5.3f" % self.hqic])]
smry = Summary()
smry.add_table_2cols(self, gleft=top_left, gright=top_right,
title=title)
smry.add_table_params(self, alpha=alpha, use_t=False)
# Make the roots table
from statsmodels.iolib.table import SimpleTable
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0,0 model
stubs = []
if len(stubs): # not 0, 0
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
roots_table = SimpleTable(data,
headers=[' Real',
' Imaginary',
' Modulus',
' Frequency'],
title="Roots",
stubs=stubs,
data_fmts=["%17.4f", "%+17.4fj",
"%17.4f", "%17.4f"])
smry.tables.append(roots_table)
return smry
def summary2(self, title=None, alpha=.05, float_format="%.4f"):
"""Experimental summary function for ARIMA Results
Parameters
-----------
title : string, optional
Title for the top table. If not None, then this replaces the
default title
alpha : float
significance level for the confidence intervals
float_format: string
print format for floats in parameters summary
Returns
-------
smry : Summary instance
This holds the summary table and text, which can be printed or
converted to various output formats.
See Also
--------
statsmodels.iolib.summary2.Summary : class to hold summary
results
"""
from pandas import DataFrame
# get sample TODO: make better sample machinery for estimation
k_diff = getattr(self, 'k_diff', 0)
if 'mle' in self.model.method:
start = k_diff
else:
start = k_diff + self.k_ar
if self.data.dates is not None:
dates = self.data.dates
sample = [dates[start].strftime('%m-%d-%Y')]
sample += [dates[-1].strftime('%m-%d-%Y')]
else:
sample = str(start) + ' - ' + str(len(self.data.orig_endog))
k_ar, k_ma = self.k_ar, self.k_ma
# Roots table
if k_ma and k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = arstubs + mastubs
roots = np.r_[self.arroots, self.maroots]
freq = np.r_[self.arfreq, self.mafreq]
elif k_ma:
mastubs = ["MA.%d" % i for i in range(1, k_ma + 1)]
stubs = mastubs
roots = self.maroots
freq = self.mafreq
elif k_ar:
arstubs = ["AR.%d" % i for i in range(1, k_ar + 1)]
stubs = arstubs
roots = self.arroots
freq = self.arfreq
else: # 0, 0 order
stubs = []
if len(stubs):
modulus = np.abs(roots)
data = np.column_stack((roots.real, roots.imag, modulus, freq))
data = DataFrame(data)
data.columns = ['Real', 'Imaginary', 'Modulus', 'Frequency']
data.index = stubs
# Summary
from statsmodels.iolib import summary2
smry = summary2.Summary()
# Model info
model_info = summary2.summary_model(self)
model_info['Method:'] = self.model.method
model_info['Sample:'] = sample[0]
model_info[' '] = sample[-1]
model_info['S.D. of innovations:'] = "%#5.3f" % self.sigma2**.5
model_info['HQIC:'] = "%#5.3f" % self.hqic
model_info['No. Observations:'] = str(len(self.model.endog))
# Parameters
params = summary2.summary_params(self)
smry.add_dict(model_info)
smry.add_df(params, float_format=float_format)
if len(stubs):
smry.add_df(data, float_format="%17.4f")
smry.add_title(results=self, title=title)
return smry
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=False)
end, out_of_sample = self.model._get_predict_end(end, dynamic=False)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
ax.plot(x[:end + 1 - start], self.model.endog[start:end+1],
label=self.model.endog_names)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _plot_predict
class ARMAResultsWrapper(wrap.ResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(tsbase.TimeSeriesResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(ARMAResultsWrapper, ARMAResults)
class ARIMAResults(ARMAResults):
def predict(self, start=None, end=None, exog=None, typ='linear',
dynamic=False):
return self.model.predict(self.params, start, end, exog, typ, dynamic)
predict.__doc__ = _arima_results_predict
def _forecast_error(self, steps):
sigma2 = self.sigma2
ma_rep = arma2ma(np.r_[1, -self.arparams],
np.r_[1, self.maparams], nobs=steps)
fcerr = np.sqrt(np.cumsum(cumsum_n(ma_rep, self.k_diff)**2)*sigma2)
return fcerr
def _forecast_conf_int(self, forecast, fcerr, alpha):
const = norm.ppf(1 - alpha/2.)
conf_int = np.c_[forecast - const*fcerr, forecast + const*fcerr]
return conf_int
def forecast(self, steps=1, exog=None, alpha=.05):
"""
Out-of-sample forecasts
Parameters
----------
steps : int
The number of out of sample forecasts from the end of the
sample.
exog : array
If the model is an ARIMAX, you must provide out of sample
values for the exogenous variables. This should not include
the constant.
alpha : float
The confidence intervals for the forecasts are (1 - alpha) %
Returns
-------
forecast : array
Array of out of sample forecasts
stderr : array
Array of the standard error of the forecasts.
conf_int : array
2d array of the confidence interval for the forecast
Notes
-----
Prediction is done in the levels of the original endogenous variable.
If you would like prediction of differences in levels use `predict`.
"""
if exog is not None:
if self.k_exog == 1 and exog.ndim == 1:
exog = exog[:, None]
if exog.shape[0] != steps:
raise ValueError("new exog needed for each step")
# prepend in-sample exog observations
if self.k_ar > 0:
exog = np.vstack((self.model.exog[-self.k_ar:, self.k_trend:],
exog))
forecast = _arma_predict_out_of_sample(self.params, steps, self.resid,
self.k_ar, self.k_ma,
self.k_trend, self.k_exog,
self.model.endog,
exog, method=self.model.method)
d = self.k_diff
endog = self.model.data.endog[-d:]
forecast = unintegrate(forecast, unintegrate_levels(endog, d))[d:]
# get forecast errors
fcerr = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast, fcerr, alpha)
return forecast, fcerr, conf_int
def plot_predict(self, start=None, end=None, exog=None, dynamic=False,
alpha=.05, plot_insample=True, ax=None):
from statsmodels.graphics.utils import _import_mpl, create_mpl_ax
_ = _import_mpl()
fig, ax = create_mpl_ax(ax)
# use predict so you set dates
forecast = self.predict(start, end, exog, 'levels', dynamic)
# doing this twice. just add a plot keyword to predict?
start = self.model._get_predict_start(start, dynamic=dynamic)
end, out_of_sample = self.model._get_predict_end(end, dynamic=dynamic)
if out_of_sample:
steps = out_of_sample
fc_error = self._forecast_error(steps)
conf_int = self._forecast_conf_int(forecast[-steps:], fc_error,
alpha)
if hasattr(self.data, "predict_dates"):
from pandas import TimeSeries
forecast = TimeSeries(forecast, index=self.data.predict_dates)
ax = forecast.plot(ax=ax, label='forecast')
else:
ax.plot(forecast)
x = ax.get_lines()[-1].get_xdata()
if out_of_sample:
label = "{0:.0%} confidence interval".format(1 - alpha)
ax.fill_between(x[-out_of_sample:], conf_int[:, 0], conf_int[:, 1],
color='gray', alpha=.5, label=label)
if plot_insample:
import re
k_diff = self.k_diff
label = re.sub("D\d*\.", "", self.model.endog_names)
levels = unintegrate(self.model.endog,
self.model._first_unintegrate)
ax.plot(x[:end + 1 - start],
levels[start + k_diff:end + k_diff + 1], label=label)
ax.legend(loc='best')
return fig
plot_predict.__doc__ = _arima_plot_predict
class ARIMAResultsWrapper(ARMAResultsWrapper):
pass
wrap.populate_wrapper(ARIMAResultsWrapper, ARIMAResults)
if __name__ == "__main__":
import statsmodels.api as sm
# simulate arma process
from statsmodels.tsa.arima_process import arma_generate_sample
y = arma_generate_sample([1., -.75], [1., .25], nsample=1000)
arma = ARMA(y)
res = arma.fit(trend='nc', order=(1, 1))
np.random.seed(12345)
y_arma22 = arma_generate_sample([1., -.85, .35], [1, .25, -.9],
nsample=1000)
arma22 = ARMA(y_arma22)
res22 = arma22.fit(trend='nc', order=(2, 2))
# test CSS
arma22_css = ARMA(y_arma22)
res22css = arma22_css.fit(trend='nc', order=(2, 2), method='css')
data = sm.datasets.sunspots.load()
ar = ARMA(data.endog)
resar = ar.fit(trend='nc', order=(9, 0))
y_arma31 = arma_generate_sample([1, -.75, -.35, .25], [.1],
nsample=1000)
arma31css = ARMA(y_arma31)
res31css = arma31css.fit(order=(3, 1), method="css", trend="nc",
transparams=True)
y_arma13 = arma_generate_sample([1., -.75], [1, .25, -.5, .8],
nsample=1000)
arma13css = ARMA(y_arma13)
res13css = arma13css.fit(order=(1, 3), method='css', trend='nc')
# check css for p < q and q < p
y_arma41 = arma_generate_sample([1., -.75, .35, .25, -.3], [1, -.35],
nsample=1000)
arma41css = ARMA(y_arma41)
res41css = arma41css.fit(order=(4, 1), trend='nc', method='css')
y_arma14 = arma_generate_sample([1, -.25], [1., -.75, .35, .25, -.3],
nsample=1000)
arma14css = ARMA(y_arma14)
res14css = arma14css.fit(order=(4, 1), trend='nc', method='css')
# ARIMA Model
from statsmodels.datasets import webuse
dta = webuse('wpi1')
wpi = dta['wpi']
mod = ARIMA(wpi, (1, 1, 1)).fit()
| bsd-3-clause |
codingpoets/tigl | misc/math-scripts/ms_componentSegmentGeom.py | 2 | 19336 | #
# Copyright (C) 2007-2013 German Aerospace Center (DLR/SC)
#
# Created: 2012-12-17 Martin Siggel <Martin.Siggel@dlr.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @file ms_componentSegmentGeom.py
# @brief Implements coordinate transforms on the component segment geometry
#
from numpy import *
from ms_optAlgs import *
from ms_segmentGeometry import *
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
class Elongation:
Left, No, Right, LeftRight = range(4)
class ComponentSegment:
def __init__(self):
self.segments = []
def addSegment(self, p1, p2, p3, p4):
self.segments.append(ComponentSegmentGeometry(p1,p2,p3,p4))
self.__calcEtaRanges()
def __calcEtaRanges(self):
size = len(self.segments)
# calculate total projected length
len_tot = 0
for item in self.segments:
len_tot = len_tot + item.calcProjectedLeadingEdgeLength(Elongation.No)
len_tot = len_tot + self.segments[0].calcProjectedLeadingEdgeLength(Elongation.Left) \
- self.segments[0].calcProjectedLeadingEdgeLength(Elongation.No) \
+ self.segments[size-1].calcProjectedLeadingEdgeLength(Elongation.Right) \
- self.segments[size-1].calcProjectedLeadingEdgeLength(Elongation.No)
#calculate inner eta of first segment
etastart = (self.segments[0].calcProjectedLeadingEdgeLength(Elongation.Left) \
- self.segments[0].calcProjectedLeadingEdgeLength(Elongation.No) )/len_tot
for item in self.segments:
etastop = etastart + item.calcProjectedLeadingEdgeLength(Elongation.No)/len_tot
item.setLeadingEdgeEtas(etastart, etastop)
etastart = etastop
def draw(self, axis):
for item in self.segments:
item.drawSegment(axis)
def calcPoint(self, eta, xsi):
for item in self.segments:
if item.checkCoordValidity(eta,xsi) == True:
return item.calcCSPoint(eta,xsi)
def drawPoint(self, axis, eta, xsi):
point = self.calcPoint(eta, xsi)
axis.plot([point[0]], [point[1]], [point[2]],'rx')
class ComponentSegmentGeometry:
def __init__(self, p1, p2, p3, p4, etamin = 0, etamax = 1):
self.setPoints(p1, p2, p3, p4, etamin, etamax)
def setPoints(self, p1, p2, p3, p4, etamin, etamax):
self.__p1 = p1
self.__p2 = p2
self.__p3 = p3
self.__p4 = p4
self.__etamin = etamin
self.__etamax = etamax
sv = p2 - p1
sh = p4 - p3
# normal vector of plane
n = array([0, -sv[1], -sv[2]])
## calculate extended leading edge and trailing edge points, this has to be done
# only once per wing segment.
#calculate point where le intersects plane
avo = dot(p4-p1,n) / dot(p2-p1,n)
if avo > 1:
self.__p2p = avo*sv + p1
self.__p4p = p4
else:
self.__p2p = p2;
# check trailing edge
aho = dot(p2-p3,n) / dot(p4-p3,n)
assert aho >= 1
self.__p4p = sh*aho + p3
# now the inner section, the normal vector should be still the same
avi = dot(p3-p1,n)/dot(p2-p1,n)
if avi < 0:
# leading edge has to be extended
self.__p3p = p3
self.__p1p = avi*sv + p1
else:
self.__p1p = p1;
ahi = dot(p1-p3,n)/dot(p4-p3,n)
self.__p3p = p3 + ahi*sh
assert ahi <= 0
#calculate eta values of segment edges, these values define also, when a given cs coordinate is outside the wing segment
self.__eta1 = dot(p1-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
self.__eta2 = dot(p2-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
self.__eta3 = dot(p3-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
self.__eta4 = dot(p4-self.__p1p,n) / dot(self.__p2p-self.__p1p,n)
# sets the eta range from inner segment tip to the outer tip
def setEtaMinMax(self, etamin, etamax):
self.__etamax = etamax
self.__etamin = etamin
def getEtaMinMax(self):
return self.__etamin, self.__etamax
# sets the eta range of the leading edge. if e.g. the trailing edge is longer than
# the leading edge, this makes a difference to setEtaMinMax
def setLeadingEdgeEtas(self, eta_in, eta_out):
# we need to scale etamax, etamin accordingly
etamin = (self.__eta2*eta_in - self.__eta1*eta_out)/(self.__eta2 - self.__eta1)
etamax = etamin + (eta_out - eta_in)/(self.__eta2 - self.__eta1)
self.setEtaMinMax(etamin, etamax)
# only valid with calcsCSPoint (not calcCSPoint2/3)
def checkCoordValidity(self, eta, xsi):
if eta < self.__etamin or eta > self.__etamax or xsi < 0 or xsi > 1:
return False
else:
actetamin = (1-xsi)*self.__eta1 + xsi*self.__eta3
actetamax = (1-xsi)*self.__eta2 + xsi*self.__eta4
if (eta>= actetamin) and (eta <= actetamax):
return True
else:
return False
def calcProjectedLeadingEdgeLength(self, elongation):
p1 = self.__p1;
p2 = self.__p2;
if elongation == Elongation.Left:
p1 = self.__p1p
elif elongation == Elongation.Right:
p2 = self.__p2p
elif elongation == Elongation.LeftRight:
p1 = self.__p1p
p2 = self.__p2p
# project leading edge into the z-y plane
vProj = array([0, 1, 1])
return linalg.norm(vProj*(p2-p1))
def calcCSPoint(self, eta, xsi):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
#calculate eta values at given xsi
eta1p = self.__eta1*(1-xsi) + self.__eta3*xsi
eta2p = self.__eta2*(1-xsi) + self.__eta4*xsi
pbeg = self.__p1*(1-xsi) + self.__p3*xsi
pend = self.__p2*(1-xsi) + self.__p4*xsi
p = pbeg + (pend-pbeg)*(eta_ - eta1p)/(eta2p-eta1p)
return p
def calcCSPoint2(self, eta, xsi):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
p = self.__p1p + (self.__p2p-self.__p1p)*(eta_);
a = -self.__p1+self.__p2;
b = -self.__p1+self.__p3;
c = self.__p1-self.__p2-self.__p3+self.__p4;
d = self.__p1;
n = array([0, -a[1], -a[2]])
# calc some constants
a1 = dot(p - d, n);
a2 = -dot(b, n);
a3 = dot(a, n);
a4 = dot(c, n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# diff( eta(xi). xi), tangent in eta xsi space
alp = lambda beta: (a2*a3 - a1*a4)/((a3 + a4*beta)**2);
# 3d intersection curve, parametrized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d,ones(size(beta)));
# tangent in 3d space
cup = lambda beta: (outer(a,ones(size(beta))) + outer(c,beta))*outer(ones(3),alp(beta)) + outer(c,al(beta)) + outer(b,ones(size(beta)));
#norm of tangent curve
f = lambda beta: sqrt(sum(cup(beta)**2.,0));
# we want to integrate int f(x)*dx, we do gaussian method
# substitution of f to range [-1,1]
g = lambda x,beta: beta/2.*f((x+1.)*beta/2.);
# gauss x points 5th order, this is really some dark magic ;)
x = array([9.06179845938664e-01,
5.38469310105683e-01,
0.00000000000000e+00,
-5.38469310105683e-01,
-9.06179845938664e-01])
# gauss weights
w = array([2.36926885056189e-01,
4.78628670499366e-01,
5.68888888888889e-01,
4.78628670499366e-01,
2.36926885056189e-01])
# calculate total length of iso eta curve
ltot = dot(g(x,1.),w);
#now we use Newton Raphson to find beta, such that F(beta) == xi*ltot
F = lambda beta: dot(g(x,beta),w)/ltot - xsi
beta = xsi
diff = F(beta)
while abs(diff) > 1e-12:
dir = -diff/(f(beta)/ltot)
beta = beta + dir
diff = F(beta)
return cu(beta)
# alternative implementation, where xsi resembles the relative coordinate
# between intersection point of the leading edge and the intersection point
# of the trailing edge. This intermediate point will then be projected onto
# the true intersection curve
def calcCSPoint3(self, eta, xsi):
debug = True
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
p = self.__p1p + (self.__p2p-self.__p1p)*(eta_);
a = -self.__p1+self.__p2;
b = -self.__p1+self.__p3;
c = self.__p1-self.__p2-self.__p3+self.__p4;
d = self.__p1;
n = array([0, -a[1], -a[2]])
# calc some constants
a1 = dot(p - d, n);
a2 = -dot(b, n);
a3 = dot(a, n);
a4 = dot(c, n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# diff( eta(xi). xi), tangent in eta xsi space
alp = lambda beta: (a2*a3 - a1*a4)/((a3 + a4*beta)**2);
# 3d intersection curve, parametrized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d,ones(size(beta)));
# tangent in 3d space
cup = lambda beta: (outer(a,ones(size(beta))) + outer(c,beta))*outer(ones(3),alp(beta)) + outer(c,al(beta)) + outer(b,ones(size(beta)));
# calculate intersection with leading and trailing edge
pbeg = cu(0)[:,0]
pend = cu(1)[:,0]
reflen = linalg.norm(pbeg-pend);
# go along this line
pact = (1-xsi)*pbeg + xsi*pend
# project this point onto intersection curve i.e. find beta so that (cu(beta) - pact) * (pbeg-pend) == 0
# as cu(beta) is not linear, we try to find the solution with newton raphson method
f = lambda beta: dot(cu(beta)[:,0] - pact, pend - pbeg)/reflen
fp = lambda beta: dot(cup(beta)[:,0], pend - pbeg)/reflen
beta = xsi
diff = f(beta)
iter = 0;
if debug: print 'Iter:', iter, ' Error=', abs(diff), ' @ Beta=' , beta
while abs(diff) > 1e-12 and iter < 20:
iter += 1
dir = -diff/(fp(beta))
# maybe we need a linesearch here...
beta = beta + dir
diff = f(beta)
if debug: print 'Iter:', iter, ' Error=', abs(diff), '@ Beta=' , beta
if iter >= 20:
print "ERROR: could not project intersection curve onto line"
if debug == True:
myb = linspace(-1.,1., 1000)
val = 0*myb
for i in range(len(myb)):
val[i] = f(myb[i]);
fig = plt.figure()
ax2 = fig.gca()
ax2.plot(myb, val)
# calculate result
point = cu(beta)
# here we got for free our segment coordinates also, which are
# eta_s = al(beta), xsi_s = beta
return point
# calculates the tangents in eta and xsi direction at the given point
def calcCSPointTangents(self, eta, xsi):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
deta_ = 1. /(self.__etamax - self.__etamin)
#calculate eta values at given xsi
eta1p = self.__eta1*(1-xsi) + self.__eta3*xsi
eta2p = self.__eta2*(1-xsi) + self.__eta4*xsi
pbeg = self.__p1*(1-xsi) + self.__p3*xsi
pend = self.__p2*(1-xsi) + self.__p4*xsi
# calculate derivatives
deta1p = self.__eta3 - self.__eta1;
deta2p = self.__eta4 - self.__eta2;
dpbeg = self.__p3 - self.__p1;
dpend = self.__p4 - self.__p2;
J = zeros((3,2))
J[:,0] = (pend-pbeg)/(eta2p-eta1p)*deta_;
J[:,1] = dpbeg + (dpend-dpbeg)*(eta_ - eta1p)/(eta2p-eta1p) + (pend - pbeg)*(-deta1p/(eta2p-eta1p) - (eta_ - eta1p)/((eta2p-eta1p)**2)*(deta2p - deta1p) );
return J
def calcCSPointNormal(self, eta, xsi):
J = self.calcCSPointTangents(eta, xsi);
normal = cross(J[:,1],J[:,0])
return normal/linalg.norm(normal)
def __calcCSHessian(self, eta, xsi, p):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
deta_ = 1. /(self.__etamax - self.__etamin)
#calculate eta values at given xsi
eta1p = self.__eta1*(1-xsi) + self.__eta3*xsi
eta2p = self.__eta2*(1-xsi) + self.__eta4*xsi
pbeg = self.__p1*(1-xsi) + self.__p3*xsi
pend = self.__p2*(1-xsi) + self.__p4*xsi
# calculate derivatives
deta1p = self.__eta3 - self.__eta1;
deta2p = self.__eta4 - self.__eta2;
dpbeg = self.__p3 - self.__p1;
dpend = self.__p4 - self.__p2;
# helper variables and their derivatives to xsi
hv1 = pend - pbeg;
dhv1 = dpend - dpbeg;
h2 = eta_ - eta1p;
dh2 = - deta1p;
h3 = 1/(eta2p-eta1p);
dh3 = -1/(eta2p-eta1p)**2*(deta2p-deta1p);
d2h3 = 2/(eta2p-eta1p)**3*(deta2p-deta1p)**2;
# p(eta, xsi)
p_ = pbeg + hv1*h2*h3;
# first derivative, dp(eta, xsi)
J1 = hv1*h3*deta_;
J2 = dpbeg + dhv1*h2*h3 + hv1*(dh2*h3 + h2 * dh3);
# second order derivative d2p(eta, xsi), H11 is zero!
H21 = (dhv1*h3 + hv1*dh3)*deta_;
H22 = dhv1*(dh2*h3 + h2*dh3)*2 + hv1*( 2*dh2*dh3 + h2*d2h3 );
# finally applying for the object function
H = zeros((2,2))
H[0,0] = dot(J1,J1)
H[0,1] = dot(J1,J2) + dot(p_ - p, H21)
H[1,1] = dot(J2,J2) + dot(p_ - p, H22)
H[1,0] = H[0,1];
return 2.*H
def projectOnCS(self, p):
opttype = 'newton'
# calculate initial guess, project onto leading edge and inner section
eta = dot(p - self.__p1p, self.__p2p - self.__p1p)/( linalg.norm(self.__p2p - self.__p1p)**2)
xsi = dot(p - self.__p1, self.__p3 - self.__p1)/( linalg.norm(self.__p3 - self.__p1)**2)
# scale according to local eta range
eta = eta*(self.__etamax-self.__etamin) + self.__etamin
x = array([eta,xsi])
of = lambda x: linalg.norm(self.calcCSPoint(x[0], x[1])-p)**2;
ograd = lambda x: 2.* dot(self.calcCSPointTangents(x[0], x[1]).transpose(), self.calcCSPoint(x[0], x[1])-p);
#ograd = lambda x: ms_numGrad(of, x, 1e-9)
ohess = lambda x: self.__calcCSHessian( x[0], x[1], p);
#ohess = lambda x: ms_numHess(ograd, x, 1e-9)
fig2 = plt.figure();
X, Y = meshgrid(arange(self.__etamin-0.2, self.__etamax+0.2, 0.02), arange(-0.2, 1.2, 0.02))
Z = zeros(X.shape);
for i in range(0,size(X,0)):
for j in range(0,size(X,1)):
Z[i,j] = of([X[i,j], Y[i,j]])
plt.imshow(Z,origin='lower', extent=[self.__etamin-0.2, self.__etamax+0.2,-0.2,1.2], aspect=1./1.)
plt.colorbar();
plt.contour(X,Y,Z)
plt.title('Objective function opt:'+opttype)
plt.xlabel('eta');
plt.ylabel('xsi');
if opttype == 'bfgs':
x_= ms_optQuasiNewton(of,ograd, x, 'bfgs')
elif opttype == 'sr1':
x_= ms_optQuasiNewton(of,ograd, x, 'sr1')
elif opttype == 'gradient':
x_= ms_optSteepestDescent(of,ograd, x)
elif opttype == 'cg':
x_= ms_optCG(of,ograd, x, 'fr')
else:
x_= ms_optNewton(of,ograd,ohess,x)
eta = x_[0]; xsi = x_[1];
return (eta, xsi)
def projectOnCS3(self, p):
segment = SegmentGeometry(self.__p1, self.__p2, self.__p3, self.__p4)
# get the projection point on the, here we get some numerical uncertainty
# if we'd knew that p is already on the plane, we could directly use p
alpha, beta = segment.projectOnSegment(p);
return self.convertAlphaBetaToEtaXsi(alpha, beta)
#return self.convertXYZtoEtaXsi(p[:,0])
def convertAlphaBetaToEtaXsi(self, alpha, beta):
segment = SegmentGeometry(self.__p1, self.__p2, self.__p3, self.__p4)
p_proj = segment.getPoint(alpha, beta)[:,0]
return self.convertXYZtoEtaXsi(p_proj)
# we must ensure that p_proj already lies on the segment, if unsure use projectOnCS3
def convertXYZtoEtaXsi(self, p_proj):
# project leading edge into the z-y plane
vProj = array([0, 1, 1])
n = vProj*(self.__p2p-self.__p1p)
# calc eta koordinate of that point
eta = (dot(p_proj,n)*(self.__eta2 - self.__eta1) + dot(self.__p2,n)*self.__eta1 - dot(self.__p1,n)*self.__eta2) \
/ dot(self.__p2 - self.__p1, n);
# intersection point of plane with leading edge
p_ = 1./(self.__eta2 - self.__eta1)*( (self.__eta2 - eta)*self.__p1 + (eta - self.__eta1)*self.__p2 )
a = -self.__p1+self.__p2;
b = -self.__p1+self.__p3;
c = self.__p1-self.__p2-self.__p3+self.__p4;
d = self.__p1;
# calc some constants
a1 = dot(p_ - d, n);
a2 = -dot(b, n);
a3 = dot(a, n);
a4 = dot(c, n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# 3d intersection curve, parametrized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d,ones(size(beta)));
# now we have to find the xi coordinate, i.e. (pbeg + (pend-pbeg)*xi - p_proj)*(pbeg-pend) == 0
pbeg = cu(0)[:,0]
pend = cu(1)[:,0]
xsi = dot(p_proj - pbeg, pbeg-pend)/dot(pend-pbeg, pbeg-pend);
return eta, xsi
def calcCSIsoXsiLine(self, xsi, extentToGeometry = False):
etamin = self.__etamin
etamax = self.__etamax
if not extentToGeometry:
etamin = xsi * (self.__eta3 - self.__eta1) + self.__eta1
etamin = etamin * (self.__etamax - self.__etamin) + self.__etamin
etamax = xsi * (self.__eta4 - self.__eta2) + self.__eta2
etamax = etamax * (self.__etamax - self.__etamin) + self.__etamin
P1 = self.calcCSPoint(etamin,xsi);
P2 = self.calcCSPoint(etamax,xsi);
return ([P1[0], P2[0]], [P1[1], P2[1]], [P1[2], P2[2]] )
def calcCSIsoEtaLine(self, eta, extentToGeometry = False, npoints = 30):
eta_ = (eta - self.__etamin)/(self.__etamax - self.__etamin)
# calculate bilinear vectors
a = -self.__p1 + self.__p2;
b = -self.__p1 + self.__p3;
c = self.__p1 - self.__p2 - self.__p3 + self.__p4;
d = self.__p1;
# leading edge vector
sv = self.__p2 - self.__p1
# normal vector of intersection plane
n = array([0, -sv[1], -sv[2]])
# calculate eta point on leading edge
p_ = self.__p1p*(1-eta_) + eta_*self.__p2p;
a1 = dot(p_-d,n);
a2 = -dot(b,n);
a3 = dot(a,n);
a4 = dot(c,n);
# this calculates the intersection curve from the segment with a plane (normal vector n, point p2)
al = lambda beta: (a1 + a2*beta)/(a3 + a4*beta);
# 3d intersection curve, parameterized by beta [0,1]
cu = lambda beta: outer(a,al(beta)) + outer(b,beta) + outer(c,al(beta)*beta) + outer(d, ones(size(beta)));
xsistart = 0 if (eta_ <= self.__eta2) else (eta_ - self.__eta2)/(self.__eta4 - self.__eta2)
xsistop = 1 if (eta_ <= self.__eta4) else (eta_ - self.__eta2)/(self.__eta4 - self.__eta2)
xsistart = xsistart if (eta_ >= self.__eta1) else (eta_ - self.__eta1)/(self.__eta3 - self.__eta1)
xsistop = xsistop if (eta_ >= self.__eta3) else (eta_ - self.__eta1)/(self.__eta3 - self.__eta1)
if extentToGeometry:
xsi = linspace(0,1,npoints)
else:
xsi = linspace(xsistart,xsistop,npoints)
points = cu(xsi);
X = points[0,:];
Y = points[1,:];
Z = points[2,:];
return (X,Y,Z)
def drawSegment(self, axis, extentToGeometry = False):
start = math.ceil (self.__etamin*10.)/10.
stop = math.floor(self.__etamax*10.)/10.
alpha = start
while alpha <= stop:
X,Y,Z = self.calcCSIsoEtaLine(alpha, extentToGeometry)
axis.plot(X,Y,Z,'g')
alpha = alpha + 0.1
beta = 0.0
while beta <= 1.0:
X,Y,Z = self.calcCSIsoXsiLine(beta, extentToGeometry)
axis.plot(X,Y,Z,'g');
beta = beta + 0.1
lw = 2
axis.plot([self.__p1[0], self.__p2[0]], [self.__p1[1], self.__p2[1]], [self.__p1[2], self.__p2[2]],'b',linewidth=lw);
axis.plot([self.__p1[0], self.__p3[0]], [self.__p1[1], self.__p3[1]], [self.__p1[2], self.__p3[2]],'b',linewidth=lw);
axis.plot([self.__p4[0], self.__p2[0]], [self.__p4[1], self.__p2[1]], [self.__p4[2], self.__p2[2]],'b',linewidth=lw);
axis.plot([self.__p3[0], self.__p4[0]], [self.__p3[1], self.__p4[1]], [self.__p3[2], self.__p4[2]],'b',linewidth=lw);
| apache-2.0 |
bertdecoensel/noysim | noysim/viewer.py | 1 | 28081 | # Noysim -- Noise simulation tools for Aimsun.
# Copyright (c) 2010-2011 by Bert De Coensel, Ghent University & Griffith University.
#
# Classes for sending and viewing noise levels in real-time
import os
import sys
import socket
import threading
import time
import random
import msvcrt
if not hasattr(sys, 'frozen'):
import wxversion
wxversion.select('2.8-msw-unicode') # version of wxPython
import wx
from wx.lib.agw.floatspin import FloatSpin, EVT_FLOATSPIN
import matplotlib
matplotlib.use('WXAgg')
from matplotlib.figure import Figure
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigCanvas, NavigationToolbar2WxAgg as NavigationToolbar
import numpy
import pylab
USERPYC = True # if set to False, low level sockets are used
if USERPYC:
try:
# check if rpyc is installed
import rpyc
from rpyc.utils.server import ThreadedServer
from rpyc.utils.classic import DEFAULT_SERVER_PORT
from rpyc.utils.registry import UDPRegistryClient
from rpyc.core import SlaveService
except:
# revert to using low level sockets
USERPYC = False
raise Exception('rpyc has to be installed')
import version
#---------------------------------------------------------------------------------------------------
# Parameters
#---------------------------------------------------------------------------------------------------
# general parameters
NAME = '%s %s Viewer' % (version.name.capitalize(), version.version)
ABOUT = NAME + '\n\n' + version.copyright.replace(', ', '\n') + '\n' + version.email
# communication with level viewer
RPYCTHREAD = None # global level thread variable (needed to circumvent the rpyc service factory)
HOST = 'localhost'
PORT = 50007
TIMEOUT = 0.01
SLEEP = 0.001
BUFSIZE = 4096
# timing parameters
REDRAWTIME = 100 # number of milliseconds between redraws
FLASHTIME = 1500 # duration of messages on the status bar, in milliseconds
# visualisation parameters
DPI = 100 # dots per inch for plotting and saving
FIGSIZE = (3.0, 3.0) # size of plotting canvas in inches (defaults to 300x300 pixels)
FONTSIZE = 8 # size of font of labels
BGCOLOR = 'black'
GRIDCOLOR = 'gray'
LINECOLOR = 'yellow'
LINEWIDTH = 1
# axes parameters
SPININC = 5.0 # increment of spin controls
XMIN = 10.0 # minimal x-axis range width
XWIDTH = 30.0 # initial value of x-axis range width
YMIN = (0.0, 10.0) # minimal y-axis low and height
YRANGE = (30.0, 60.0) # initial values of y-axis low and height
MARGIN = 1.0 # margin for auto range of levels
# test parameters
TESTDT = 0.5 # simulation timestep in seconds
TESTSLEEP = 0.2 # time between level updates
TESTLOCS = ['(1.00,2.00,3.00)', '(4.00,5.00,6.00)'] # locations of test receivers
randomLevel = lambda: 40.0 + 30.0*random.random() # function that generates a random sound level
#---------------------------------------------------------------------------------------------------
# Communication from plugin to viewer
#---------------------------------------------------------------------------------------------------
class LevelBuffer(object):
""" base interface for sending levels to the viewer, implementing the one-way communication protocol
types of messages:
- command: 'clear'
- levels: 't;loc:level;loc:level'
"""
def __init__(self, host = HOST, port = PORT, active = True, sleep = 0, verbose = False):
object.__init__(self)
self.host = host
self.port = port
self.queue = [] # queue of messages to send
self.active = active # if False, nothing is sent
self.sleep = sleep/1000.0 # time to sleep (in seconds) after sending levels (to slow down a simulation)
self.verbose = verbose # if True, debug code is printed
def sendLevels(self, t, levels):
""" send a series of levels at a particular time at different locations (dict of location:level) """
if self.active:
message = ('%.2f;' % t) + ';'.join([('%s:%.2f' % (str(loc), level)) for loc, level in levels.iteritems()])
self.queue.append(message)
self.flush()
if self.sleep > 0.0:
time.sleep(self.sleep)
def sendClear(self):
""" send a 'clear' message """
if self.active:
message = 'clear'
self.queue.append(message)
self.flush()
def send(self, message):
""" should send a single message string to the viewer (raise an error if not succesful) """
raise NotImplementedError
def flush(self):
""" try to send all message strings in the queue to the viewer """
while (len(self.queue) > 0) and (self.active == True):
message = self.queue[0]
try:
if self.verbose:
print 'trying to send message "%s"' % message
self.send(message)
# remove message from queue
del self.queue[0]
if self.verbose:
print 'sending succesful'
except:
if self.verbose:
print 'sending failed - aborting - length of queue: %d' % len(self.queue)
break
class SocketLevelBuffer(LevelBuffer):
""" implement the level buffer using low level sockets """
def __init__(self, *args, **kwargs):
LevelBuffer.__init__(self, *args, **kwargs)
def send(self, message):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.host, self.port))
s.sendall(message)
s.close()
class RPyCLevelBuffer(LevelBuffer):
""" implement the level buffer using Remote Python Calls (RPyC) """
def __init__(self, *args, **kwargs):
LevelBuffer.__init__(self, *args, **kwargs)
def send(self, message):
conn = rpyc.classic.connect('localhost')
conn.root.processMessage(message)
conn.close()
def createLevelBuffer(*args, **kwargs):
""" create a level buffer according to the defined protocol """
if USERPYC:
return RPyCLevelBuffer(*args, **kwargs)
else:
return SocketLevelBuffer(*args, **kwargs)
#---------------------------------------------------------------------------------------------------
# Viewer thread for receiving levels
#---------------------------------------------------------------------------------------------------
VIEWERLOCK = threading.Lock()
class BaseLevelThread(threading.Thread):
""" base interface for a thread for receiving levels """
def __init__(self):
threading.Thread.__init__(self)
self.active = True # set this to false for the thread to stop
self.clear()
def clear(self):
""" clear all data """
VIEWERLOCK.acquire()
self.data = {} # dict with received levels, for each receiver location
self.times = [] # list with times
VIEWERLOCK.release()
def locations(self):
""" return the receiver locations """
VIEWERLOCK.acquire()
result = self.data.keys()[:]
VIEWERLOCK.release()
return result
def levels(self, loc):
""" return the times and levels at the given location """
VIEWERLOCK.acquire()
result = (numpy.asarray(self.times).copy(), numpy.asarray(self.data[loc]).copy())
VIEWERLOCK.release()
return result
class DummyLevelThread(BaseLevelThread):
""" dummy interface for receiving levels, which adds levels at regular instances in time """
def __init__(self, dt = TESTDT, sleep = TESTSLEEP, locs = TESTLOCS):
BaseLevelThread.__init__(self)
self.dt = dt
self.sleep = sleep
self.locs = locs
def run(self):
""" instantiate the server """
print 'thread started...'
t = 0.0
while self.active:
t += self.dt
VIEWERLOCK.acquire()
self.times.append(t)
for loc in self.locs:
if not loc in self.data:
self.data[loc] = []
level = randomLevel()
self.data[loc].append(level)
print 'level received succesfully: time %.2fs, %s, %.2f dB' % (t, loc,level)
VIEWERLOCK.release()
time.sleep(self.sleep)
class ViewerLevelThread(BaseLevelThread):
""" interface for receiving levels, as a thread that runs a server which listens to new levels """
def __init__(self, frame = None, host = HOST, port = PORT, verbose = False):
BaseLevelThread.__init__(self)
self.frame = frame # frame to which the thread is connected
self.host = host
self.port = port
self.verbose = verbose # if True, debug code is printed
def processMessage(self, message):
""" process an incoming message """
if message == '':
pass
elif message == 'clear':
self.clear()
# clear the frame if applicable
if self.frame != None:
self.frame.clear_choices()
self.frame.clear_plot()
if self.verbose:
print 'levels cleared'
else:
# parse the incoming message
tokens = message.split(';')
t = float(tokens[0])
levels = []
for token in tokens[1:]:
loc, level = token.split(':')
level = float(level)
levels.append((loc, level))
# when parsing is succesful, update the data
if (len(self.times) > 0) and (t < self.times[-1]):
if self.verbose:
print 'discarding non-chronological levels: %s' % message
else:
VIEWERLOCK.acquire()
self.times.append(t)
for loc, level in levels:
if not loc in self.data:
self.data[loc] = []
self.data[loc].append(level)
if self.verbose:
print 'level received succesfully: time %.2fs, %s, %.2f dB' % (t, loc,level)
VIEWERLOCK.release()
class SocketViewerLevelThread(ViewerLevelThread):
""" implementation of viewer level thread using low level sockets """
def __init__(self, *args, **kwargs):
ViewerLevelThread.__init__(self, *args, **kwargs)
def run(self):
""" instantiate the server """
if self.verbose:
print 'thread started...'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((self.host, self.port))
s.listen(1)
while self.active:
# wait for a connection from the plugin
try:
s.settimeout(TIMEOUT)
conn, addr = s.accept()
s.settimeout(None)
except:
time.sleep(SLEEP)
continue
# when there is a connection, fetch the message
if self.verbose:
print 'connection established'
data = ''
try:
while True:
temp = conn.recv(BUFSIZE)
if not temp:
break
data += temp
conn.close()
except:
if self.verbose:
print 'socket error, so skipping message'
# update the levels
try:
self.processMessage(data)
except:
if self.verbose:
print 'error with received message: "%s"' % data
s.close()
if USERPYC:
class RPyCViewerService(SlaveService):
""" service for managing received messages using Remote Python Calls (RPyC) """
def __init__(self, conn):
SlaveService.__init__(self, conn)
def exposed_processMessage(self, message):
""" send a message to the parent thread for processing """
global RPYCTHREAD
RPYCTHREAD.processMessage(message)
class RPyCViewerLevelThread(ViewerLevelThread):
""" implementation of viewer level thread using Remote Python Calls (RPyC) """
def __init__(self, *args, **kwargs):
ViewerLevelThread.__init__(self, *args, **kwargs)
def run(self):
""" instantiate the server """
if self.verbose:
print 'thread started...'
global RPYCTHREAD
RPYCTHREAD = self
self.server = ThreadedServer(RPyCViewerService, port = DEFAULT_SERVER_PORT, auto_register = False, registrar = UDPRegistryClient())
self.server.start()
def join(self):
self.server.close()
ViewerLevelThread.join(self)
def createViewerLevelThread(*args, **kwargs):
""" create a viewer level thread according to the defined protocol """
if USERPYC:
return RPyCViewerLevelThread(*args, **kwargs)
else:
return SocketViewerLevelThread(*args, **kwargs)
#---------------------------------------------------------------------------------------------------
# Utility GUI controls
#---------------------------------------------------------------------------------------------------
class XAxisRangeBox(wx.Panel):
""" panel for adjusting x-axis range """
def __init__(self, parent, ID, minvalue = XMIN, initvalue = XWIDTH, increment = SPININC):
wx.Panel.__init__(self, parent, ID)
self.minvalue = minvalue
self.value = initvalue # initial x-axis range width (in sliding mode)
# controls
self.radio_full = wx.RadioButton(self, -1, label = 'Full range', style = wx.RB_GROUP)
self.radio_slide = wx.RadioButton(self, -1, label = 'Sliding')
self.slide_width = FloatSpin(self, -1, size = (50, -1), digits = 0, value = self.value, min_val = minvalue, increment = increment)
self.slide_width.GetTextCtrl().SetEditable(False)
# event bindings
self.Bind(wx.EVT_UPDATE_UI, self.on_update_radio_buttons, self.radio_full)
self.Bind(EVT_FLOATSPIN, self.on_float_spin, self.slide_width)
# layout
box = wx.StaticBox(self, -1, 'X-axis')
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
slide_box = wx.BoxSizer(wx.HORIZONTAL)
slide_box.Add(self.radio_slide, flag=wx.ALIGN_CENTER_VERTICAL)
slide_box.Add(self.slide_width, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_full, 0, wx.ALL, 10)
sizer.Add(slide_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_radio_buttons(self, event):
""" called when the radio buttons are toggled """
self.slide_width.Enable(self.radio_slide.GetValue())
def on_float_spin(self, event):
""" called when the sliding mode spinbox is changed """
self.value = self.slide_width.GetValue()
def is_full(self):
""" return True if full range is checked """
return self.radio_full.GetValue()
class YAxisRangeBox(wx.Panel):
""" panel for adjusting y-axis range """
def __init__(self, parent, ID, minvalue = YMIN, initvalue = YRANGE, increment = SPININC):
wx.Panel.__init__(self, parent, ID)
self.value = initvalue # initial y-axis range (in manual mode), i.e. (min, max-min)
# controls
self.radio_auto = wx.RadioButton(self, -1, label = 'Auto', style = wx.RB_GROUP)
self.radio_manual = wx.RadioButton(self, -1, label = 'Manual')
self.manual_min = FloatSpin(self, -1, size = (50, -1), digits = 0, value = self.value[0], min_val = minvalue[0], increment = increment)
self.manual_min.GetTextCtrl().SetEditable(False)
self.manual_width = FloatSpin(self, -1, size = (50, -1), digits = 0, value = self.value[1], min_val = minvalue[1], increment = increment)
self.manual_width.GetTextCtrl().SetEditable(False)
# event bindings
self.Bind(wx.EVT_UPDATE_UI, self.on_update_radio_buttons, self.radio_auto)
self.Bind(EVT_FLOATSPIN, self.on_float_spin, self.manual_min)
self.Bind(EVT_FLOATSPIN, self.on_float_spin, self.manual_width)
# layout
box = wx.StaticBox(self, -1, 'Y-axis')
sizer = wx.StaticBoxSizer(box, wx.VERTICAL)
manual_box = wx.BoxSizer(wx.HORIZONTAL)
manual_box.Add(self.radio_manual, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_min, flag=wx.ALIGN_CENTER_VERTICAL)
manual_box.Add(self.manual_width, flag=wx.ALIGN_CENTER_VERTICAL)
sizer.Add(self.radio_auto, 0, wx.ALL, 10)
sizer.Add(manual_box, 0, wx.ALL, 10)
self.SetSizer(sizer)
sizer.Fit(self)
def on_update_radio_buttons(self, event):
""" called when the radio buttons are toggled """
toggle = self.radio_manual.GetValue()
self.manual_min.Enable(toggle)
self.manual_width.Enable(toggle)
def on_float_spin(self, event):
""" called when one of the manual mode spinboxes is changed """
self.value = (self.manual_min.GetValue(), self.manual_width.GetValue())
def is_auto(self):
""" return True if auto range is checked """
return self.radio_auto.GetValue()
#---------------------------------------------------------------------------------------------------
# Viewer frame class
#---------------------------------------------------------------------------------------------------
class ViewerFrame(wx.Frame):
""" main frame of the viewer application """
def __init__(self, test = False):
wx.Frame.__init__(self, None, -1, NAME)
self.paused = False
self.locations = []
# creation of controls
self.create_menu()
self.create_status_bar()
self.create_main_panel()
# timer for redrawing
self.redraw_timer = wx.Timer(self)
self.Bind(wx.EVT_TIMER, self.on_redraw_timer, self.redraw_timer)
self.redraw_timer.Start(REDRAWTIME)
# handle closing the frame
self.Bind(wx.EVT_CLOSE, self.on_exit, self)
# manage window style (always on top or not)
self.wstyle = self.GetWindowStyle()
self.SetWindowStyle(self.wstyle | wx.STAY_ON_TOP)
# coordination with data server
if test:
self.thread = DummyLevelThread()
else:
self.thread = createViewerLevelThread(frame = self)
self.thread.start()
def create_menu(self):
""" construction of menu bar """
self.menubar = wx.MenuBar()
# File menu
menu_file = wx.Menu()
m_expt = menu_file.Append(-1, '&Save plot\tCtrl-S')
self.Bind(wx.EVT_MENU, self.on_save_plot, m_expt)
menu_file.AppendSeparator()
m_exit = menu_file.Append(-1, 'E&xit\tCtrl-X')
self.Bind(wx.EVT_MENU, self.on_exit, m_exit)
# View menu
menu_view = wx.Menu()
self.m_ontop = menu_view.Append(-1, '&Stay on top', kind = wx.ITEM_CHECK)
self.m_ontop.Check(True)
self.Bind(wx.EVT_MENU, self.on_ontop, self.m_ontop)
# Help menu
menu_help = wx.Menu()
m_about = menu_help.Append(-1, '&About...')
self.Bind(wx.EVT_MENU, self.on_about, m_about)
# construction of menu bar
self.menubar.Append(menu_file, '&File')
self.menubar.Append(menu_view, '&View')
self.menubar.Append(menu_help, '&Help')
self.SetMenuBar(self.menubar)
def create_status_bar(self):
""" construction of status bar """
self.statusbar = self.CreateStatusBar()
self.statusbar.SetFieldsCount(2)
self.statusbar.SetStatusWidths([50, -1])
def create_main_panel(self):
""" construction of the main controls """
self.panel = wx.Panel(self)
# contruct plotting area
self.fig = Figure(FIGSIZE, dpi = DPI)
# construct axes
self.axes = self.fig.add_subplot(111)
self.axes.set_axis_bgcolor(BGCOLOR)
# adjust font size of axes labels
pylab.setp(self.axes.get_xticklabels(), fontsize = FONTSIZE)
pylab.setp(self.axes.get_yticklabels(), fontsize = FONTSIZE)
# construct canvas with plotting area
self.plot_data = self.axes.plot([], linewidth = LINEWIDTH, color = LINECOLOR)[0]
self.canvas = FigCanvas(self.panel, -1, self.fig)
# construct location choice box
self.location_txt = wx.StaticText(self.panel, -1, label = ' Select location:')
self.location_box = wx.Choice(self.panel, -1, choices = [], size = (150,-1))
self.location_box.Enable(False)
self.Bind(wx.EVT_CHOICE, lambda event: self.draw_plot(), self.location_box)
# layout location choice box
self.hbox0 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox0.Add(self.location_txt, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox0.Add(self.location_box, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
# construct buttons
self.pause_button = wx.Button(self.panel, -1, 'Pause')
self.Bind(wx.EVT_BUTTON, self.on_pause_button, self.pause_button)
self.Bind(wx.EVT_UPDATE_UI, self.on_update_pause_button, self.pause_button)
self.clear_button = wx.Button(self.panel, -1, 'Clear')
self.Bind(wx.EVT_BUTTON, self.on_clear_button, self.clear_button)
self.cb_grid = wx.CheckBox(self.panel, -1, 'Show grid', style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, lambda event: self.draw_plot(), self.cb_grid)
self.cb_grid.SetValue(True)
self.cb_xlab = wx.CheckBox(self.panel, -1, 'X-labels', style=wx.ALIGN_RIGHT)
self.Bind(wx.EVT_CHECKBOX, lambda event: self.draw_plot(), self.cb_xlab)
self.cb_xlab.SetValue(True)
# layout buttons (add space using self.hbox1.AddSpacer(5))
self.hbox1 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox1.Add(self.pause_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.Add(self.clear_button, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.Add(self.cb_grid, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
self.hbox1.Add(self.cb_xlab, border=5, flag=wx.ALL | wx.ALIGN_CENTER_VERTICAL)
# construct axis controls
self.xrange_control = XAxisRangeBox(self.panel, -1)
self.yrange_control = YAxisRangeBox(self.panel, -1)
# layout axis controls
self.hbox2 = wx.BoxSizer(wx.HORIZONTAL)
self.hbox2.Add(self.xrange_control, border=5, flag=wx.ALL)
self.hbox2.Add(self.yrange_control, border=5, flag=wx.ALL)
# finally, create layout of viewer frame
self.vbox = wx.BoxSizer(wx.VERTICAL)
self.vbox.Add(self.canvas, 1, flag=wx.LEFT | wx.TOP | wx.GROW)
self.vbox.Add(self.hbox0, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox1, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.vbox.Add(self.hbox2, 0, flag=wx.ALIGN_LEFT | wx.TOP)
self.panel.SetSizer(self.vbox)
self.vbox.Fit(self)
def draw_plot(self):
""" redraw the plot and update the gui if necessary """
if not self.paused:
# check if data is available
if len(self.locations) == 0:
self.locations = sorted(self.thread.locations())
if len(self.locations) > 0:
self.location_box.AppendItems(self.locations)
self.location_box.SetSelection(0)
self.location_box.Enable(True)
self.flash_status_message('Connection established')
if len(self.locations) > 0:
# fetch data at selected receiver location
loc = self.locations[self.location_box.GetSelection()]
times, levels = self.thread.levels(loc)
if (len(times) == len(levels)):
# calculate x-axis limits
if self.xrange_control.is_full():
# show the full range for the x-axis
xmin = times[0]
xmax = max(times[0] + self.xrange_control.minvalue, times[-1])
else:
# show a sliding window
xmax = times[-1]
xmin = xmax - self.xrange_control.value
# calculate y-axis limits
if self.yrange_control.is_auto():
# find the min and max values of the data and add a minimal margin
ymin = round(min(levels), 0) - MARGIN
ymax = round(max(levels), 0) + MARGIN
else:
# use manual interval
ymin = self.yrange_control.value[0]
ymax = ymin + self.yrange_control.value[1]
# set axis limits
self.axes.set_xbound(lower = xmin, upper = xmax)
self.axes.set_ybound(lower = ymin, upper = ymax)
# finally, plot the data and redraw the plot
self.plot_data.set_xdata(numpy.array(times))
self.plot_data.set_ydata(numpy.array(levels))
# draw grid
if self.cb_grid.IsChecked():
self.axes.grid(True, color = GRIDCOLOR)
else:
self.axes.grid(False)
# draw axis labels
pylab.setp(self.axes.get_xticklabels(), visible = self.cb_xlab.IsChecked())
self.canvas.draw()
def clear_plot(self):
""" clear the data on the plot """
self.plot_data.set_xdata([])
self.plot_data.set_ydata([])
self.canvas.draw()
def on_redraw_timer(self, event):
""" redraw the plot """
self.draw_plot()
def on_pause_button(self, event):
""" called when the pause button is clicked """
self.paused = not self.paused
if self.paused:
self.statusbar.SetStatusText('Paused', 0)
else:
self.statusbar.SetStatusText('', 0)
def on_update_pause_button(self, event):
""" called when the pause button is to be updated """
label = 'Resume' if self.paused else 'Pause'
self.pause_button.SetLabel(label)
def on_clear_button(self, event):
""" called when the clear butten is clicked """
self.thread.clear()
self.clear_choices()
self.clear_plot()
def clear_choices(self):
""" clear the choices box """
self.locations = []
self.location_box.Clear()
self.location_box.Enable(False)
self.flash_status_message('Cleared')
def on_save_plot(self, event):
""" show a window for saving a screenshot """
dlg = wx.FileDialog(self, message = 'Save plot as...', defaultDir = os.getcwd(), defaultFile = 'plot.png', wildcard = 'PNG (*.png)|*.png', style = wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.print_figure(path, dpi = DPI)
self.flash_status_message('Saved to %s' % path)
def stop_thread(self):
""" stop the level thread """
self.thread.active = False
self.thread.join()
def on_exit(self, event):
""" called when the viewer is closed """
self.stop_thread()
self.Destroy()
def on_ontop(self, event):
""" toggles the stay on top modus """
if self.m_ontop.IsChecked():
self.SetWindowStyle(self.wstyle | wx.STAY_ON_TOP)
else:
self.SetWindowStyle(self.wstyle)
def on_about(self, event):
""" show an about box """
wx.MessageBox(ABOUT, 'About ' + NAME)
def flash_status_message(self, message):
""" flash a message on the status bar """
try:
self.statusbar.SetStatusText(message, 1)
self.timeroff = wx.Timer(self)
self.Bind(wx.EVT_TIMER, lambda event: self.statusbar.SetStatusText('', 1), self.timeroff)
self.timeroff.Start(FLASHTIME, oneShot = True)
except:
pass
#---------------------------------------------------------------------------------------------------
# Test code
#---------------------------------------------------------------------------------------------------
if __name__ == '__main__':
if len(sys.argv) <= 1:
# no command line argument, so run the viewer application
app = wx.PySimpleApp()
app.frame = ViewerFrame()
app.frame.Show()
app.MainLoop()
if (len(sys.argv) == 2) and (sys.argv[1] == 'test'):
# run the viewer in test mode, i.e. generating its own levels for display
app = wx.PySimpleApp()
app.frame = ViewerFrame(test = True)
app.frame.Show()
app.MainLoop()
if (len(sys.argv) == 2) and (sys.argv[1] == 'command'):
# run the viewer in command line mode, i.e. only receiving levels and printing them to the console
print 'Running viewer in command line mode - press any key to stop...'
thread = createViewerLevelThread(frame = None, verbose = True)
thread.start()
# wait until a key is pressed
stop = False
while not stop:
if msvcrt.kbhit():
c = msvcrt.getch()
stop = True
time.sleep(0.1)
# stop the thread
thread.active = False
thread.join()
if (len(sys.argv) == 2) and (sys.argv[1] == 'dummy'):
# run a dummy Aimsun/Noysim2 client that sends random levels (for use with viewer in normal or command line mode)
print 'Running dummy Aimsun/Noysim2 client - press any key to stop...'
client = createLevelBuffer(verbose = True, sleep = 1000*TESTSLEEP)
client.sendClear()
stop = False
(t, dt) = (0.0, TESTDT)
while not stop:
t += dt
client.sendLevels(t = t, levels = dict([(loc, randomLevel()) for loc in TESTLOCS]))
if msvcrt.kbhit():
c = msvcrt.getch()
stop = True
| mit |
claesenm/optunity-benchmark | optimizers/tpe/hyperopt_august2013_mod_src/hyperopt/mongoexp.py | 2 | 60046 | """
Mongo-based Experiment driver and worker client
===============================================
Components involved:
- mongo
e.g. mongod ...
- driver
e.g. hyperopt-mongo-search mongo://address bandit_json bandit_algo_json
- worker
e.g. hyperopt-mongo-worker --loop mongo://address
Mongo
=====
Mongo (daemon process mongod) is used for IPC between the driver and worker.
Configure it as you like, so that hyperopt-mongo-search can communicate with it.
I think there is some support in this file for an ssh+mongo connection type.
The experiment uses the following collections for IPC:
* jobs - documents of a standard form used to store suggested trials and their
results. These documents have keys:
* spec : subdocument returned by bandit_algo.suggest
* exp_key: an identifier of which driver suggested this trial
* cmd: a tuple (protocol, ...) identifying bandit.evaluate
* state: 0, 1, 2, 3 for job state (new, running, ok, fail)
* owner: None for new jobs, (hostname, pid) for started jobs
* book_time: time a job was reserved
* refresh_time: last time the process running the job checked in
* result: the subdocument returned by bandit.evaluate
* error: for jobs of state 3, a reason for failure.
* logs: a dict of sequences of strings received by ctrl object
* info: info messages
* warn: warning messages
* error: error messages
* fs - a gridfs storage collection (used for pickling)
* drivers - documents describing drivers. These are used to prevent two drivers
from using the same exp_key simultaneously, and to attach saved states.
* exp_key
* workdir: [optional] path where workers should chdir to
Attachments:
* pkl: [optional] saved state of experiment class
* bandit_args_kwargs: [optional] pickled (clsname, args, kwargs) to
reconstruct bandit in worker processes
The MongoJobs, MongoExperiment, and CtrlObj classes as well as the main_worker
method form the abstraction barrier around this database layout.
Driver
======
A driver directs an experiment, by calling a bandit_algo to suggest trial
points, and queuing them in mongo so that a worker can evaluate that trial
point.
The hyperopt-mongo-search script creates a single MongoExperiment instance, and
calls its run() method.
Saving and Resuming
-------------------
The command
"hyperopt-mongo-search bandit algo"
creates a new experiment or resumes an existing experiment.
The command
"hyperopt-mongo-search --exp-key=<EXPKEY>"
can only resume an existing experiment.
The command
"hyperopt-mongo-search --clear-existing bandit algo"
can only create a new experiment, and potentially deletes an existing one.
The command
"hyperopt-mongo-search --clear-existing --exp-key=EXPKEY bandit algo"
can only create a new experiment, and potentially deletes an existing one.
By default, MongoExperiment.run will try to save itself before returning. It
does so by pickling itself to a file called 'exp_key' in the fs collection.
Resuming means unpickling that file and calling run again.
The MongoExperiment instance itself is minimal (a key, a bandit, a bandit algo,
a workdir, a poll interval). The only stateful element is the bandit algo. The
difference between resume and start is in the handling of the bandit algo.
Worker
======
A worker looks up a job in a mongo database, maps that job document to a
runnable python object, calls that object, and writes the return value back to
the database.
A worker *reserves* a job by atomically identifying a document in the jobs
collection whose owner is None and whose state is 0, and setting the state to
1. If it fails to identify such a job, it loops with a random sleep interval
of a few seconds and polls the database.
If hyperopt-mongo-worker is called with a --loop argument then it goes back to
the database after finishing a job to identify and perform another one.
CtrlObj
-------
The worker allocates a CtrlObj and passes it to bandit.evaluate in addition to
the subdocument found at job['spec']. A bandit can use ctrl.info, ctrl.warn,
ctrl.error and so on like logger methods, and those messages will be written
to the mongo database (to job['logs']). They are not written synchronously
though, they are written when the bandit.evaluate function calls
ctrl.checkpoint().
Ctrl.checkpoint does several things:
* flushes logging messages to the database
* updates the refresh_time
* optionally updates the result subdocument
The main_worker routine calls Ctrl.checkpoint(rval) once after the
bandit.evalute function has returned before setting the state to 2 or 3 to
finalize the job in the database.
"""
__authors__ = ["James Bergstra", "Dan Yamins"]
__license__ = "3-clause BSD License"
__contact__ = "github.com/jaberg/hyperopt"
import copy
try:
import dill as cPickle
except ImportError:
import cPickle
import hashlib
import logging
import optparse
import os
import shutil
import signal
import socket
import subprocess
import sys
import time
import urlparse
import warnings
import numpy
import pymongo
import gridfs
from bson import SON
logger = logging.getLogger(__name__)
from .base import JOB_STATES
from .base import (JOB_STATE_NEW, JOB_STATE_RUNNING, JOB_STATE_DONE,
JOB_STATE_ERROR)
from .base import Experiment
from .base import Trials
from .base import trials_from_docs
from .base import InvalidTrial
from .base import Ctrl
from .base import SONify
from .base import spec_from_misc
from .utils import coarse_utcnow
from .utils import fast_isin
from .utils import get_most_recent_inds
from .utils import json_call
import plotting
class OperationFailure(Exception):
"""Proxy that could be factored out if we also want to use CouchDB and
JobmanDB classes with this interface
"""
class Shutdown(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class WaitQuit(Exception):
"""
Exception for telling mongo_worker loop to quit
"""
class InvalidMongoTrial(InvalidTrial):
pass
class BanditSwapError(Exception):
"""Raised when the search program tries to change the bandit attached to
an experiment.
"""
class ReserveTimeout(Exception):
"""No job was reserved in the alotted time
"""
def read_pw():
username = 'hyperopt'
password = open(os.path.join(os.getenv('HOME'), ".hyperopt")).read()[:-1]
return dict(
username=username,
password=password)
def authenticate_for_db(db):
d = read_pw()
db.authenticate(d['username'], d['password'])
def parse_url(url, pwfile=None):
"""Unpacks a url of the form
protocol://[username[:pw]]@hostname[:port]/db/collection
:rtype: tuple of strings
:returns: protocol, username, password, hostname, port, dbname, collection
:note:
If the password is not given in the url but the username is, then
this function will read the password from file by calling
``open(pwfile).read()[:-1]``
"""
protocol=url[:url.find(':')]
ftp_url='ftp'+url[url.find(':'):]
# -- parse the string as if it were an ftp address
tmp = urlparse.urlparse(ftp_url)
logger.info( 'PROTOCOL %s'% protocol)
logger.info( 'USERNAME %s'% tmp.username)
logger.info( 'HOSTNAME %s'% tmp.hostname)
logger.info( 'PORT %s'% tmp.port)
logger.info( 'PATH %s'% tmp.path)
try:
_, dbname, collection = tmp.path.split('/')
except:
print >> sys.stderr, "Failed to parse '%s'"%(str(tmp.path))
raise
logger.info( 'DB %s'% dbname)
logger.info( 'COLLECTION %s'% collection)
if tmp.password is None:
if (tmp.username is not None) and pwfile:
password = open(pwfile).read()[:-1]
else:
password = None
else:
password = tmp.password
logger.info( 'PASS %s'% password)
return (protocol, tmp.username, password, tmp.hostname, tmp.port, dbname,
collection)
def connection_with_tunnel(host='localhost',
auth_dbname='admin', port=27017,
ssh=False, user='hyperopt', pw=None):
if ssh:
local_port=numpy.random.randint(low=27500, high=28000)
# -- forward from local to remote machine
ssh_tunnel = subprocess.Popen(
['ssh', '-NTf', '-L',
'%i:%s:%i'%(local_port, '127.0.0.1', port),
host],
#stdin=subprocess.PIPE,
#stdout=subprocess.PIPE,
#stderr=subprocess.PIPE,
)
# -- give the subprocess time to set up
time.sleep(.5)
connection = pymongo.Connection('127.0.0.1', local_port,
document_class=SON)
else:
connection = pymongo.Connection(host, port, document_class=SON)
if user:
if user == 'hyperopt':
authenticate_for_db(connection[auth_dbname])
else:
raise NotImplementedError()
ssh_tunnel=None
return connection, ssh_tunnel
def connection_from_string(s):
protocol, user, pw, host, port, db, collection = parse_url(s)
if protocol == 'mongo':
ssh=False
elif protocol in ('mongo+ssh', 'ssh+mongo'):
ssh=True
else:
raise ValueError('unrecognized protocol for MongoJobs', protocol)
connection, tunnel = connection_with_tunnel(
ssh=ssh,
user=user,
pw=pw,
host=host,
port=port,
)
return connection, tunnel, connection[db], connection[db][collection]
class MongoJobs(object):
"""
# Interface to a Jobs database structured like this
#
# Collections:
#
# db.jobs - structured {config_name, 'cmd', 'owner', 'book_time',
# 'refresh_time', 'state', 'exp_key', 'owner', 'result'}
# This is the collection that the worker nodes write to
#
# db.gfs - file storage via gridFS for all collections
#
"""
def __init__(self, db, jobs, gfs, conn, tunnel, config_name):
self.db = db
self.jobs = jobs
self.gfs = gfs
self.conn=conn
self.tunnel=tunnel
self.config_name = config_name
# TODO: rename jobs -> coll throughout
coll = property(lambda s : s.jobs)
@classmethod
def alloc(cls, dbname, host='localhost',
auth_dbname='admin', port=27017,
jobs_coll='jobs', gfs_coll='fs', ssh=False, user=None, pw=None):
connection, tunnel = connection_with_tunnel(
host, auth_dbname, port, ssh, user, pw)
db = connection[dbname]
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, db[jobs_coll], gfs, connection, tunnel)
@classmethod
def new_from_connection_str(cls, conn_str, gfs_coll='fs', config_name='spec'):
connection, tunnel, db, coll = connection_from_string(conn_str)
gfs = gridfs.GridFS(db, collection=gfs_coll)
return cls(db, coll, gfs, connection, tunnel, config_name)
def __iter__(self):
return self.jobs.find()
def __len__(self):
try:
return self.jobs.count()
except:
return 0
def create_jobs_indexes(self):
jobs = self.db.jobs
for k in ['exp_key', 'result.loss', 'book_time']:
jobs.create_index(k)
def create_drivers_indexes(self):
drivers = self.db.drivers
drivers.create_index('exp_key', unique=True)
def create_indexes(self):
self.create_jobs_indexes()
self.create_drivers_indexes()
def jobs_complete(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_DONE))
return c if cursor else list(c)
def jobs_error(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_ERROR))
return c if cursor else list(c)
def jobs_running(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(spec=dict(state=JOB_STATE_RUNNING)))
#TODO: mark some as MIA
rval = [r for r in rval if not r.get('MIA', False)]
return rval
def jobs_dead(self, cursor=False):
if cursor:
raise NotImplementedError()
rval = list(self.jobs.find(spec=dict(state=JOB_STATE_RUNNING)))
#TODO: mark some as MIA
rval = [r for r in rval if r.get('MIA', False)]
return rval
def jobs_queued(self, cursor=False):
c = self.jobs.find(spec=dict(state=JOB_STATE_NEW))
return c if cursor else list(c)
def insert(self, job, safe=True):
"""Return a job dictionary by inserting the job dict into the database"""
try:
cpy = copy.deepcopy(job)
# this call adds an _id field to cpy
_id = self.jobs.insert(cpy, safe=safe, check_keys=True)
# so now we return the dict with the _id field
assert _id == cpy['_id']
return cpy
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete(self, job, safe=True):
"""Delete job[s]"""
try:
self.jobs.remove(job, safe=safe)
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete_all(self, cond={}, safe=True):
"""Delete all jobs and attachments"""
try:
for d in self.jobs.find(spec=cond, fields=['_id', '_attachments']):
logger.info('deleting job %s' % d['_id'])
for name, file_id in d.get('_attachments', []):
try:
self.gfs.delete(file_id)
except gridfs.errors.NoFile:
logger.error('failed to remove attachment %s:%s' % (
name, file_id))
self.jobs.remove(d, safe=safe)
except pymongo.errors.OperationFailure, e:
raise OperationFailure(e)
def delete_all_error_jobs(self, safe=True):
return self.delete_all(cond={'state': JOB_STATE_ERROR}, safe=safe)
def reserve(self, host_id, cond=None, exp_key=None):
now = coarse_utcnow()
if cond is None:
cond = {}
else:
cond = copy.copy(cond) #copy is important, will be modified, but only the top-level
if exp_key is not None:
cond['exp_key'] = exp_key
#having an owner of None implies state==JOB_STATE_NEW, so this effectively
#acts as a filter to make sure that only new jobs get reserved.
if cond.get('owner') is not None:
raise ValueError('refusing to reserve owned job')
else:
cond['owner'] = None
cond['state'] = JOB_STATE_NEW #theoretically this is redundant, theoretically
try:
rval = self.jobs.find_and_modify(
cond,
{'$set':
{'owner': host_id,
'book_time': now,
'state': JOB_STATE_RUNNING,
'refresh_time': now,
}
},
new=True,
safe=True,
upsert=False)
except pymongo.errors.OperationFailure, e:
logger.error('Error during reserve_job: %s'%str(e))
rval = None
return rval
def refresh(self, doc, safe=False):
self.update(doc, dict(refresh_time=coarse_utcnow()), safe=False)
def update(self, doc, dct, safe=True, collection=None):
"""Return union of doc and dct, after making sure that dct has been
added to doc in `collection`.
This function does not modify either `doc` or `dct`.
safe=True means error-checking is done. safe=False means this function will succeed
regardless of what happens with the db.
"""
if collection is None:
collection = self.coll
dct = copy.deepcopy(dct)
if '_id' not in doc:
raise ValueError('doc must have an "_id" key to be updated')
if '_id' in dct:
if dct['_id'] != doc['_id']:
raise ValueError('cannot update the _id field')
del dct['_id']
if 'version' in dct:
if dct['version'] != doc['version']:
warnings.warn('Ignoring "version" field in update dictionary')
if 'version' in doc:
doc_query = dict(_id=doc['_id'], version=doc['version'])
dct['version'] = doc['version']+1
else:
doc_query = dict(_id=doc['_id'])
dct['version'] = 1
try:
# warning - if doc matches nothing then this function succeeds
# N.B. this matches *at most* one entry, and possibly zero
collection.update(
doc_query,
{'$set': dct},
safe=True,
upsert=False,
multi=False,)
except pymongo.errors.OperationFailure, e:
# translate pymongo failure into generic failure
raise OperationFailure(e)
# update doc in-place to match what happened on the server side
doc.update(dct)
if safe:
server_doc = collection.find_one(
dict(_id=doc['_id'], version=doc['version']))
if server_doc is None:
raise OperationFailure('updated doc not found : %s'
% str(doc))
elif server_doc != doc:
if 0:# This is all commented out because it is tripping on the fact that
# str('a') != unicode('a').
# TODO: eliminate false alarms and catch real ones
mismatching_keys = []
for k, v in server_doc.items():
if k in doc:
if doc[k] != v:
mismatching_keys.append((k, v, doc[k]))
else:
mismatching_keys.append((k, v, '<missing>'))
for k,v in doc.items():
if k not in server_doc:
mismatching_keys.append((k, '<missing>', v))
raise OperationFailure('local and server doc documents are out of sync: %s'%
repr((doc, server_doc, mismatching_keys)))
return doc
def attachment_names(self, doc):
def as_str(name_id):
assert isinstance(name_id[0], basestring), name_id
return str(name_id[0])
return map(as_str, doc.get('_attachments', []))
def set_attachment(self, doc, blob, name, collection=None):
"""Attach potentially large data string `blob` to `doc` by name `name`
blob must be a string
doc must have been saved in some collection (must have an _id), but not
necessarily the jobs collection.
name must be a string
Returns None
"""
# If there is already a file with the given name for this doc, then we will delete it
# after writing the new file
attachments = doc.get('_attachments', [])
name_matches = [a for a in attachments if a[0] == name]
# the filename is set to something so that fs.list() will display the file
new_file_id = self.gfs.put(blob, filename='%s_%s' % (doc['_id'], name))
logger.info('stored blob of %i bytes with id=%s and filename %s_%s' % (
len(blob), str(new_file_id), doc['_id'], name))
new_attachments = ([a for a in attachments if a[0] != name]
+ [(name, new_file_id)])
try:
ii = 0
doc = self.update(doc, {'_attachments': new_attachments},
collection=collection)
# there is a database leak until we actually delete the files that
# are no longer pointed to by new_attachments
while ii < len(name_matches):
self.gfs.delete(name_matches[ii][1])
ii += 1
except:
while ii < len(name_matches):
logger.warning("Leak during set_attachment: old_file_id=%s" % (
name_matches[ii][1]))
ii += 1
raise
assert len([n for n in self.attachment_names(doc) if n == name]) == 1
#return new_file_id
def get_attachment(self, doc, name):
"""Retrieve data attached to `doc` by `attach_blob`.
Raises OperationFailure if `name` does not correspond to an attached blob.
Returns the blob as a string.
"""
attachments = doc.get('_attachments', [])
file_ids = [a[1] for a in attachments if a[0] == name]
if not file_ids:
raise OperationFailure('Attachment not found: %s' % name)
if len(file_ids) > 1:
raise OperationFailure('multiple name matches', (name, file_ids))
return self.gfs.get(file_ids[0]).read()
def delete_attachment(self, doc, name, collection=None):
attachments = doc.get('_attachments', [])
file_id = None
for i,a in enumerate(attachments):
if a[0] == name:
file_id = a[1]
break
if file_id is None:
raise OperationFailure('Attachment not found: %s' % name)
#print "Deleting", file_id
del attachments[i]
self.update(doc, {'_attachments':attachments}, collection=collection)
self.gfs.delete(file_id)
class MongoTrials(Trials):
"""Trials maps on to an entire mongo collection. It's basically a wrapper
around MongoJobs for now.
As a concession to performance, this object permits trial filtering based
on the exp_key, but I feel that's a hack. The case of `cmd` is similar--
the exp_key and cmd are semantically coupled.
WRITING TO THE DATABASE
-----------------------
The trials object is meant for *reading* a trials database. Writing
to a database is different enough from writing to an in-memory
collection that no attempt has been made to abstract away that
difference. If you want to update the documents within
a MongoTrials collection, then retrieve the `.handle` attribute (a
MongoJobs instance) and use lower-level methods, or pymongo's
interface directly. When you are done writing, call refresh() or
refresh_tids() to bring the MongoTrials up to date.
"""
async = True
def __init__(self, arg, exp_key=None, cmd=None, workdir=None,
refresh=True):
if isinstance(arg, MongoJobs):
self.handle = arg
else:
connection_string = arg
self.handle = MongoJobs.new_from_connection_str(connection_string)
self.handle.create_indexes()
self._exp_key = exp_key
self.cmd = cmd
self.workdir = workdir
if refresh:
self.refresh()
def view(self, exp_key=None, cmd=None, workdir=None, refresh=True):
rval = self.__class__(self.handle,
exp_key=self._exp_key if exp_key is None else exp_key,
cmd=self.cmd if cmd is None else cmd,
workdir=self.workdir if workdir is None else workdir,
refresh=refresh)
return rval
def refresh_tids(self, tids):
""" Sync documents with `['tid']` in the list of `tids` from the
database (not *to* the database).
Local trial documents whose tid is not in `tids` are not
affected by this call. Local trial documents whose tid is in `tids` may
be:
* *deleted* (if db no longer has corresponding document), or
* *updated* (if db has an updated document) or,
* *left alone* (if db document matches local one).
Additionally, if the db has a matching document, but there is no
local trial with a matching tid, then the db document will be
*inserted* into the local collection.
"""
exp_key = self._exp_key
if exp_key != None:
query = {'exp_key' : exp_key}
else:
query = {}
t0 = time.time()
query['state'] = {'$ne': JOB_STATE_ERROR}
if tids is not None:
query['tid'] = {'$in': list(tids)}
orig_trials = getattr(self, '_trials', [])
_trials = orig_trials[:] #copy to make sure it doesn't get screwed up
if _trials:
db_data = list(self.handle.jobs.find(query,
fields=['_id', 'version']))
# -- pull down a fresh list of ids from mongo
if db_data:
#make numpy data arrays
db_data = numpy.rec.array([(x['_id'], int(x['version']))
for x in db_data],
names=['_id', 'version'])
db_data.sort(order=['_id', 'version'])
db_data = db_data[get_most_recent_inds(db_data)]
existing_data = numpy.rec.array([(x['_id'],
int(x['version'])) for x in _trials],
names=['_id', 'version'])
existing_data.sort(order=['_id', 'version'])
#which records are in db but not in existing, and vice versa
db_in_existing = fast_isin(db_data['_id'], existing_data['_id'])
existing_in_db = fast_isin(existing_data['_id'], db_data['_id'])
#filtering out out-of-date records
_trials = [_trials[_ind] for _ind in existing_in_db.nonzero()[0]]
#new data is what's in db that's not in existing
new_data = db_data[numpy.invert(db_in_existing)]
#having removed the new and out of data data,
#concentrating on data in db and existing for state changes
db_data = db_data[db_in_existing]
existing_data = existing_data[existing_in_db]
try:
assert len(db_data) == len(existing_data)
assert (existing_data['_id'] == db_data['_id']).all()
assert (existing_data['version'] <= db_data['version']).all()
except:
reportpath = os.path.join(os.getcwd(),
'hyperopt_refresh_crash_report_' + \
str(numpy.random.randint(1e8)) + '.pkl')
logger.error('HYPEROPT REFRESH ERROR: writing error file to %s' % reportpath)
_file = open(reportpath, 'w')
cPickle.dump({'db_data': db_data,
'existing_data': existing_data},
_file)
_file.close()
raise
same_version = existing_data['version'] == db_data['version']
_trials = [_trials[_ind] for _ind in same_version.nonzero()[0]]
version_changes = existing_data[numpy.invert(same_version)]
#actually get the updated records
update_ids = new_data['_id'].tolist() + version_changes['_id'].tolist()
num_new = len(update_ids)
update_query = copy.deepcopy(query)
update_query['_id'] = {'$in': update_ids}
updated_trials = list(self.handle.jobs.find(update_query))
_trials.extend(updated_trials)
else:
num_new = 0
_trials = []
else:
#this case is for performance, though should be able to be removed
#without breaking correctness.
_trials = list(self.handle.jobs.find(query))
if _trials:
_trials = [_trials[_i] for _i in get_most_recent_inds(_trials)]
num_new = len(_trials)
logger.debug('Refresh data download took %f seconds for %d ids' %
(time.time() - t0, num_new))
if tids is not None:
# -- If tids were given, then _trials only contains
# documents with matching tids. Here we augment these
# fresh matching documents, with our current ones whose
# tids don't match.
new_trials = _trials
tids_set = set(tids)
assert all(t['tid'] in tids_set for t in new_trials)
old_trials = [t for t in orig_trials if t['tid'] not in tids_set]
_trials = new_trials + old_trials
# -- reassign new trials to self, in order of increasing tid
jarray = numpy.array([j['_id'] for j in _trials])
jobsort = jarray.argsort()
self._trials = [_trials[_idx] for _idx in jobsort]
self._specs = [_trials[_idx]['spec'] for _idx in jobsort]
self._results = [_trials[_idx]['result'] for _idx in jobsort]
self._miscs = [_trials[_idx]['misc'] for _idx in jobsort]
def refresh(self):
self.refresh_tids(None)
def _insert_trial_docs(self, docs):
rval = []
for doc in docs:
rval.append(self.handle.jobs.insert(doc, safe=True))
return rval
def count_by_state_unsynced(self, arg):
exp_key = self._exp_key
# TODO: consider searching by SON rather than dict
if isinstance(arg, int):
if arg not in JOB_STATES:
raise ValueError('invalid state', arg)
query = dict(state=arg)
else:
assert hasattr(arg, '__iter__')
states = list(arg)
assert all([x in JOB_STATES for x in states])
query = dict(state={'$in': states})
if exp_key != None:
query['exp_key'] = exp_key
rval = self.handle.jobs.find(query).count()
return rval
def delete_all(self, cond=None):
if cond is None:
cond = {}
else:
cond = dict(cond)
if self._exp_key:
cond['exp_key'] = self._exp_key
# -- remove all documents matching condition
self.handle.delete_all(cond)
gfs = self.handle.gfs
for filename in gfs.list():
try:
fdoc = gfs.get_last_version(filename=filename, **cond)
except gridfs.errors.NoFile:
continue
gfs.delete(fdoc._id)
self.refresh()
def new_trial_ids(self, N):
db = self.handle.db
# N.B. that the exp key is *not* used here. It was once, but it caused
# a nasty bug: tids were generated by a global experiment
# with exp_key=None, running a BanditAlgo that introduced sub-experiments
# with exp_keys, which ran jobs that did result injection. The tids of
# injected jobs were sometimes unique within an experiment, and
# sometimes not. Hilarious!
#
# Solution: tids are generated to be unique across the db, not just
# within an exp_key.
#
# -- mongo docs say you can't upsert an empty document
query = {'a': 0}
doc = None
while doc is None:
doc = db.job_ids.find_and_modify(
query,
{'$inc' : {'last_id': N}},
upsert=True,
safe=True)
if doc is None:
logger.warning('no last_id found, re-trying')
time.sleep(1.0)
lid = doc.get('last_id', 0)
return range(lid, lid + N)
def trial_attachments(self, trial):
"""
Attachments to a single trial (e.g. learned weights)
Returns a dictionary interface to the attachments.
"""
# don't offer more here than in MongoCtrl
class Attachments(object):
def __contains__(_self, name):
return name in self.handle.attachment_names(doc=trial)
def __len__(_self):
return len(self.handle.attachment_names(doc=trial))
def __iter__(_self):
return iter(self.handle.attachment_names(doc=trial))
def __getitem__(_self, name):
try:
return self.handle.get_attachment(
doc=trial,
name=name)
except OperationFailure:
raise KeyError(name)
def __setitem__(_self, name, value):
self.handle.set_attachment(
doc=trial,
blob=value,
name=name,
collection=self.handle.db.jobs)
def __delitem__(_self, name):
raise NotImplementedError('delete trial_attachment')
def keys(self):
return [k for k in self]
def values(self):
return [self[k] for k in self]
def items(self):
return [(k, self[k]) for k in self]
return Attachments()
@property
def attachments(self):
"""
Attachments to a Trials set (such as bandit args).
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
gfs = self.handle.gfs
query = {}
if self._exp_key:
query['exp_key'] = self._exp_key
class Attachments(object):
def __iter__(_self):
if query:
# -- gfs.list does not accept query kwargs
# (at least, as of pymongo 2.4)
filenames = [fname
for fname in gfs.list()
if fname in _self]
else:
filenames = gfs.list()
return iter(filenames)
def __contains__(_self, name):
return gfs.exists(filename=name, **query)
def __getitem__(_self, name):
try:
rval = gfs.get_version(filename=name, **query).read()
return rval
except gridfs.NoFile:
raise KeyError(name)
def __setitem__(_self, name, value):
if gfs.exists(filename=name, **query):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
gfs.put(value, filename=name, **query)
def __delitem__(_self, name):
gout = gfs.get_last_version(filename=name, **query)
gfs.delete(gout._id)
return Attachments()
class MongoWorker(object):
poll_interval = 3.0 # -- seconds
workdir = None
def __init__(self, mj,
poll_interval=poll_interval,
workdir=workdir,
exp_key=None,
logfilename='logfile.txt',
):
"""
mj - MongoJobs interface to jobs collection
poll_interval - seconds
workdir - string
exp_key - restrict reservations to this key
"""
self.mj = mj
self.poll_interval = poll_interval
self.workdir = workdir
self.exp_key = exp_key
self.logfilename = logfilename
def make_log_handler(self):
self.log_handler = logging.FileHandler(self.logfilename)
self.log_handler.setFormatter(
logging.Formatter(
fmt='%(levelname)s (%(name)s): %(message)s'))
self.log_handler.setLevel(logging.INFO)
def run_one(self,
host_id=None,
reserve_timeout=None,
erase_created_workdir=False,
):
if host_id == None:
host_id = '%s:%i'%(socket.gethostname(), os.getpid()),
job = None
start_time = time.time()
mj = self.mj
while job is None:
if (reserve_timeout
and (time.time() - start_time) > reserve_timeout):
raise ReserveTimeout()
job = mj.reserve(host_id, exp_key=self.exp_key)
if not job:
interval = (1 +
numpy.random.rand()
* (float(self.poll_interval) - 1.0))
logger.info('no job found, sleeping for %.1fs' % interval)
time.sleep(interval)
logger.debug('job found: %s' % str(job))
# -- don't let the cmd mess up our trial object
spec = spec_from_misc(job['misc'])
ctrl = MongoCtrl(
trials=MongoTrials(mj, exp_key=job['exp_key'], refresh=False),
read_only=False,
current_trial=job)
if self.workdir is None:
workdir = job['misc'].get('workdir', os.getcwd())
if workdir is None:
workdir = ''
workdir = os.path.join(workdir, str(job['_id']))
else:
workdir = self.workdir
workdir = os.path.abspath(os.path.expanduser(workdir))
cwd = os.getcwd()
sentinal = None
if not os.path.isdir(workdir):
# -- figure out the closest point to the workdir in the filesystem
closest_dir = ''
for wdi in os.path.split(workdir):
if os.path.isdir(os.path.join(closest_dir, wdi)):
closest_dir = os.path.join(closest_dir, wdi)
else:
break
assert closest_dir != workdir
# -- touch a sentinal file so that recursive directory
# removal stops at the right place
sentinal = os.path.join(closest_dir, wdi + '.inuse')
logger.debug("touching sentinal file: %s" % sentinal)
open(sentinal, 'w').close()
# -- now just make the rest of the folders
logger.debug("making workdir: %s" % workdir)
os.makedirs(workdir)
try:
root_logger = logging.getLogger()
if self.logfilename:
self.make_log_handler()
root_logger.addHandler(self.log_handler)
cmd = job['misc']['cmd']
cmd_protocol = cmd[0]
try:
if cmd_protocol == 'cpickled fn':
worker_fn = cPickle.loads(cmd[1])
elif cmd_protocol == 'call evaluate':
bandit = cPickle.loads(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'token_load':
cmd_toks = cmd[1].split('.')
cmd_module = '.'.join(cmd_toks[:-1])
worker_fn = exec_import(cmd_module, cmd[1])
elif cmd_protocol == 'bandit_json evaluate':
bandit = json_call(cmd[1])
worker_fn = bandit.evaluate
elif cmd_protocol == 'driver_attachment':
#name = 'driver_attachment_%s' % job['exp_key']
blob = ctrl.trials.attachments[cmd[1]]
bandit_name, bandit_args, bandit_kwargs = cPickle.loads(blob)
worker_fn = json_call(bandit_name,
args=bandit_args,
kwargs=bandit_kwargs).evaluate
elif cmd_protocol == 'domain_attachment':
blob = ctrl.trials.attachments[cmd[1]]
try:
domain = cPickle.loads(blob)
except BaseException, e:
logger.info('Error while unpickling. Try installing dill via "pip install dill" for enhanced pickling support.')
raise
worker_fn = domain.evaluate
else:
raise ValueError('Unrecognized cmd protocol', cmd_protocol)
result = worker_fn(spec, ctrl)
result = SONify(result)
except BaseException, e:
#XXX: save exception to database, but if this fails, then
# at least raise the original traceback properly
logger.info('job exception: %s' % str(e))
ctrl.checkpoint()
mj.update(job,
{'state': JOB_STATE_ERROR,
'error': (str(type(e)), str(e))},
safe=True)
raise
finally:
if self.logfilename:
root_logger.removeHandler(self.log_handler)
os.chdir(cwd)
logger.info('job finished: %s' % str(job['_id']))
attachments = result.pop('attachments', {})
for aname, aval in attachments.items():
logger.info(
'mongoexp: saving attachment name=%s (%i bytes)' % (
aname, len(aval)))
ctrl.attachments[aname] = aval
ctrl.checkpoint(result)
mj.update(job, {'state': JOB_STATE_DONE}, safe=True)
if sentinal:
if erase_created_workdir:
logger.debug('MongoWorker.run_one: rmtree %s' % workdir)
shutil.rmtree(workdir)
# -- put it back so that recursive removedirs works
os.mkdir(workdir)
# -- recursive backtrack to sentinal
logger.debug('MongoWorker.run_one: removedirs %s'
% workdir)
os.removedirs(workdir)
# -- remove sentinal
logger.debug('MongoWorker.run_one: rm %s' % sentinal)
os.remove(sentinal)
class MongoCtrl(Ctrl):
"""
Attributes:
current_trial - current job document
jobs - MongoJobs object in which current_trial resides
read_only - True means don't change the db
"""
def __init__(self, trials, current_trial, read_only):
self.trials = trials
self.current_trial = current_trial
self.read_only = read_only
def debug(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.debug(*args, **kwargs)
def info(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.info(*args, **kwargs)
def warn(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.warn(*args, **kwargs)
def error(self, *args, **kwargs):
# XXX: This is supposed to log to db
return logger.error(*args, **kwargs)
def checkpoint(self, result=None):
if not self.read_only:
handle = self.trials.handle
handle.refresh(self.current_trial)
if result is not None:
return handle.update(self.current_trial, dict(result=result))
@property
def attachments(self):
"""
Support syntax for load: self.attachments[name]
Support syntax for store: self.attachments[name] = value
"""
return self.trials.trial_attachments(trial=self.current_trial)
@property
def set_attachment(self):
# XXX: Is there a better deprecation error?
raise RuntimeError(
'set_attachment deprecated. Use `self.attachments[name] = value`')
def exec_import(cmd_module, cmd):
worker_fn = None
exec('import %s; worker_fn = %s' % (cmd_module, cmd))
return worker_fn
def as_mongo_str(s):
if s.startswith('mongo://'):
return s
else:
return 'mongo://%s' % s
def main_worker_helper(options, args):
N = int(options.max_jobs)
if options.last_job_timeout is not None:
last_job_timeout = time.time() + float(options.last_job_timeout)
else:
last_job_timeout = None
def sighandler_shutdown(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise Shutdown(signum)
def sighandler_wait_quit(signum, frame):
logger.info('Caught signal %i, shutting down.' % signum)
raise WaitQuit(signum)
signal.signal(signal.SIGINT, sighandler_shutdown)
signal.signal(signal.SIGHUP, sighandler_shutdown)
signal.signal(signal.SIGTERM, sighandler_shutdown)
signal.signal(signal.SIGUSR1, sighandler_wait_quit)
if N > 1:
proc = None
cons_errs = 0
if last_job_timeout and time.time() > last_job_timeout:
logger.info("Exiting due to last_job_timeout")
return
while N and cons_errs < int(options.max_consecutive_failures):
try:
# recursive Popen, dropping N from the argv
# By using another process to run this job
# we protect ourselves from memory leaks, bad cleanup
# and other annoying details.
# The tradeoff is that a large dataset must be reloaded once for
# each subprocess.
sub_argv = [sys.argv[0],
'--poll-interval=%s' % options.poll_interval,
'--max-jobs=1',
'--mongo=%s' % options.mongo,
'--reserve-timeout=%s' % options.reserve_timeout]
if options.workdir is not None:
sub_argv.append('--workdir=%s' % options.workdir)
if options.exp_key is not None:
sub_argv.append('--exp-key=%s' % options.exp_key)
proc = subprocess.Popen(sub_argv)
retcode = proc.wait()
proc = None
except Shutdown:
#this is the normal way to stop the infinite loop (if originally N=-1)
if proc:
#proc.terminate() is only available as of 2.6
os.kill(proc.pid, signal.SIGTERM)
return proc.wait()
else:
return 0
except WaitQuit:
# -- sending SIGUSR1 to a looping process will cause it to
# break out of the loop after the current subprocess finishes
# normally.
if proc:
return proc.wait()
else:
return 0
if retcode != 0:
cons_errs += 1
else:
cons_errs = 0
N -= 1
logger.info("exiting with N=%i after %i consecutive exceptions" %(
N, cons_errs))
elif N == 1:
# XXX: the name of the jobs collection is a parameter elsewhere,
# so '/jobs' should not be hard-coded here
mj = MongoJobs.new_from_connection_str(
as_mongo_str(options.mongo) + '/jobs')
mworker = MongoWorker(mj,
float(options.poll_interval),
workdir=options.workdir,
exp_key=options.exp_key)
mworker.run_one(reserve_timeout=float(options.reserve_timeout))
else:
raise ValueError("N <= 0")
def main_worker():
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--exp-key",
dest='exp_key',
default = None,
metavar='str',
help="identifier for this workers's jobs")
parser.add_option("--last-job-timeout",
dest='last_job_timeout',
metavar='T',
default=None,
help="Do not reserve a job after T seconds have passed")
parser.add_option("--max-consecutive-failures",
dest="max_consecutive_failures",
metavar='N',
default=4,
help="stop if N consecutive jobs fail (default: 4)")
parser.add_option("--max-jobs",
dest='max_jobs',
default=sys.maxint,
help="stop after running this many jobs (default: inf)")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--poll-interval",
dest='poll_interval',
metavar='N',
default=5,
help="check work queue every 1 < T < N seconds (default: 5")
parser.add_option("--reserve-timeout",
dest='reserve_timeout',
metavar='T',
default=120.0,
help="poll database for up to T seconds to reserve a job")
parser.add_option("--workdir",
dest="workdir",
default=None,
help="root workdir (default: load from mongo)",
metavar="DIR")
(options, args) = parser.parse_args()
if args:
parser.print_help()
return -1
return main_worker_helper(options, args)
def bandit_from_options(options):
#
# Construct bandit
#
bandit_name = options.bandit
if options.bandit_argfile:
bandit_argfile_text = open(options.bandit_argfile).read()
bandit_argv, bandit_kwargs = cPickle.loads(bandit_argfile_text)
else:
bandit_argfile_text = ''
bandit_argv, bandit_kwargs = (), {}
bandit = json_call(bandit_name, bandit_argv, bandit_kwargs)
return (bandit,
(bandit_name, bandit_argv, bandit_kwargs),
bandit_argfile_text)
def algo_from_options(options, bandit):
#
# Construct algo
#
algo_name = options.bandit_algo
if options.bandit_algo_argfile:
# in theory this is easy just as above.
# need tests though, and it's just not done yet.
raise NotImplementedError('Option: --bandit-algo-argfile')
else:
algo_argfile_text = ''
algo_argv, algo_kwargs = (), {}
algo = json_call(algo_name, (bandit,) + algo_argv, algo_kwargs)
return (algo,
(algo_name, (bandit,) + algo_argv, algo_kwargs),
algo_argfile_text)
def expkey_from_options(options, bandit_stuff, algo_stuff):
#
# Determine exp_key
#
if None is options.exp_key:
# -- argfile texts
bandit_name = bandit_stuff[1][0]
algo_name = algo_stuff[1][0]
bandit_argfile_text = bandit_stuff[2]
algo_argfile_text = algo_stuff[2]
if bandit_argfile_text or algo_argfile_text:
m = hashlib.md5()
m.update(bandit_argfile_text)
m.update(algo_argfile_text)
exp_key = '%s/%s[arghash:%s]' % (
bandit_name, algo_name, m.hexdigest())
del m
else:
exp_key = '%s/%s' % (bandit_name, algo_name)
else:
exp_key = options.exp_key
return exp_key
def main_search_helper(options, args, input=input, cmd_type=None):
"""
input is an argument so that unittest can replace stdin
cmd_type can be set to "D.A." to force interpretation of bandit as driver
attachment. This mechanism is used by unit tests.
"""
assert getattr(options, 'bandit', None) is None
assert getattr(options, 'bandit_algo', None) is None
assert len(args) == 2
options.bandit = args[0]
options.bandit_algo = args[1]
bandit_stuff = bandit_from_options(options)
bandit, bandit_NAK, bandit_argfile_text = bandit_stuff
bandit_name, bandit_args, bandit_kwargs = bandit_NAK
algo_stuff = algo_from_options(options, bandit)
algo, algo_NAK, algo_argfile_text = algo_stuff
algo_name, algo_args, algo_kwargs = algo_NAK
exp_key = expkey_from_options(options, bandit_stuff, algo_stuff)
trials = MongoTrials(as_mongo_str(options.mongo) + '/jobs', exp_key)
if options.clear_existing:
print >> sys.stdout, "Are you sure you want to delete",
print >> sys.stdout, ("all %i jobs with exp_key: '%s' ?"
% (
trials.handle.db.jobs.find({'exp_key':exp_key}).count(),
str(exp_key)))
print >> sys.stdout, '(y/n)'
y, n = 'y', 'n'
if input() != 'y':
print >> sys.stdout, "aborting"
return 1
trials.delete_all()
#
# Construct MongoExperiment
#
if bandit_argfile_text or algo_argfile_text or cmd_type=='D.A.':
aname = 'driver_attachment_%s.pkl' % exp_key
if aname in trials.attachments:
atup = cPickle.loads(trials.attachments[aname])
if bandit_NAK != atup:
raise BanditSwapError((bandit_NAK, atup))
else:
try:
blob = cPickle.dumps(bandit_NAK)
except BaseException, e:
print >> sys.stdout, "Error pickling. Try installing dill via 'pip install dill'."
raise e
trials.attachments[aname] = blob
worker_cmd = ('driver_attachment', aname)
else:
worker_cmd = ('bandit_json evaluate', bandit_name)
algo.cmd = worker_cmd
algo.workdir=options.workdir
self = Experiment(trials,
bandit_algo=algo,
poll_interval_secs=(int(options.poll_interval))
if options.poll_interval else 5,
max_queue_len=options.max_queue_len)
self.run(options.steps, block_until_done=options.block)
def main_search():
parser = optparse.OptionParser(
usage="%prog [options] [<bandit> <bandit_algo>]")
parser.add_option("--clear-existing",
action="store_true",
dest="clear_existing",
default=False,
help="clear all jobs with the given exp_key")
parser.add_option("--exp-key",
dest='exp_key',
default = None,
metavar='str',
help="identifier for this driver's jobs")
parser.add_option('--force-lock',
action="store_true",
dest="force_lock",
default=False,
help="ignore concurrent experiments using same exp_key (only do this after a crash)")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--poll-interval",
dest='poll_interval',
metavar='N',
default=None,
help="check work queue every N seconds (default: 5")
parser.add_option("--no-save-on-exit",
action="store_false",
dest="save_on_exit",
default=True,
help="save driver state to mongo on exit")
parser.add_option("--steps",
dest='steps',
default=sys.maxint,
help="exit after queuing this many jobs (default: inf)")
parser.add_option("--workdir",
dest="workdir",
default=os.path.expanduser('~/.hyperopt.workdir'),
help="direct hyperopt-mongo-worker to chdir here",
metavar="DIR")
parser.add_option("--block",
dest="block",
action="store_true",
default=False,
help="block return until all queue is empty")
parser.add_option("--bandit-argfile",
dest="bandit_argfile",
default=None,
help="path to file containing arguments bandit constructor\n"
"file format: pickle of dictionary containing two keys,\n"
" {'args' : tuple of positional arguments,\n"
" 'kwargs' : dictionary of keyword arguments}")
parser.add_option("--bandit-algo-argfile",
dest="bandit_algo_argfile",
default=None,
help="path to file containing arguments for bandit_algo "
"constructor. File format is pickled dictionary containing "
"two keys:\n"
" 'args', a tuple of positional arguments, and \n"
" 'kwargs', a dictionary of keyword arguments. \n"
"NOTE: instantiated bandit is pre-pended as first element"
" of arg tuple.")
parser.add_option("--max-queue-len",
dest="max_queue_len",
default=1,
help="maximum number of jobs to allow in queue")
(options, args) = parser.parse_args()
if len(args) > 2:
parser.print_help()
return -1
return main_search_helper(options, args)
def main_show_helper(options, args):
if options.trials_pkl:
trials = cPickle.load(open(options.trials_pkl))
else:
bandit_stuff = bandit_from_options(options)
bandit, (bandit_name, bandit_args, bandit_kwargs), bandit_algo_argfile\
= bandit_stuff
algo_stuff = algo_from_options(options, bandit)
algo, (algo_name, algo_args, algo_kwargs), algo_algo_argfile\
= algo_stuff
exp_key = expkey_from_options(options, bandit_stuff, algo_stuff)
trials = MongoTrials(as_mongo_str(options.mongo) + '/jobs', exp_key)
cmd = args[0]
if 'history' == cmd:
if 0:
import matplotlib.pyplot as plt
self.refresh_trials_results()
yvals, colors = zip(*[(1 - r.get('best_epoch_test', .5), 'g')
for y, r in zip(self.losses(), self.results) if y is not None])
plt.scatter(range(len(yvals)), yvals, c=colors)
return plotting.main_plot_history(trials)
elif 'histogram' == cmd:
return plotting.main_plot_histogram(trials)
elif 'dump' == cmd:
raise NotImplementedError('TODO: dump jobs db to stdout as JSON')
elif 'dump_pickle' == cmd:
cPickle.dump(trials_from_docs(trials.trials),
open(args[1], 'w'))
elif 'vars' == cmd:
return plotting.main_plot_vars(trials, bandit=bandit)
else:
logger.error("Invalid cmd %s" % cmd)
parser.print_help()
print """Current supported commands are history, histogram, vars
"""
return -1
def main_show():
parser = optparse.OptionParser(
usage="%prog [options] cmd [...]")
parser.add_option("--exp-key",
dest='exp_key',
default = None,
metavar='str',
help="identifier for this driver's jobs")
parser.add_option("--bandit",
dest='bandit',
default = None,
metavar='json',
help="identifier for the bandit solved by the experiment")
parser.add_option("--bandit-argfile",
dest="bandit_argfile",
default=None,
help="path to file containing arguments bandit constructor\n"
"file format: pickle of dictionary containing two keys,\n"
" {'args' : tuple of positional arguments,\n"
" 'kwargs' : dictionary of keyword arguments}")
parser.add_option("--bandit-algo",
dest='bandit_algo',
default = None,
metavar='json',
help="identifier for the optimization algorithm for experiment")
parser.add_option("--bandit-algo-argfile",
dest="bandit_algo_argfile",
default=None,
help="path to file containing arguments for bandit_algo "
"constructor. File format is pickled dictionary containing "
"two keys:\n"
" 'args', a tuple of positional arguments, and \n"
" 'kwargs', a dictionary of keyword arguments. \n"
"NOTE: instantiated bandit is pre-pended as first element"
" of arg tuple.")
parser.add_option("--mongo",
dest='mongo',
default='localhost/hyperopt',
help="<host>[:port]/<db> for IPC and job storage")
parser.add_option("--trials",
dest="trials_pkl",
default="",
help="local trials file (e.g. created by dump_pickle command)")
parser.add_option("--workdir",
dest="workdir",
default=os.path.expanduser('~/.hyperopt.workdir'),
help="check for worker files here",
metavar="DIR")
(options, args) = parser.parse_args()
try:
cmd = args[0]
except:
parser.print_help()
return -1
return main_show_helper(options, args)
| gpl-3.0 |
sellberg/SACLA2016B8055 | scripts/03_plot_h5.py | 2 | 1294 | #!/home/doniach/dermen/epd731/bin/python
import numpy as np
import h5py
import matplotlib
import matplotlib.pyplot as plt
import argparse
import time
import pandas as pd
import sys
# -- default parameters
run = 448539
file_folder = '/UserData/fperakis/2016_6/01_test/' # h5 files folder
src_folder = '/home/fperakis/2016_06/python_scripts/src' # src files folder
# -- files and folders
file_name = '%d.h5'%(run)
file_path = file_folder+file_name
sys.path.insert(0, src_folder)
from img_class import *
# -- import data
fh5 = h5py.File(file_path, 'r')
run_key = [ k for k in fh5.keys() if k.startswith('run_') ][0]
tags = fh5['/%s/detector_2d_assembled_1'%run_key].keys()[1:]
# -- image generator
num_im = len(tags)
img_gen = ( fh5['%s/detector_2d_assembled_1/%s/detector_data'%(run_key,tag) ].value for tag in tags )
num_im = len(tags)
mean_int = np.zeros(num_im)
# -- average image
im = img_gen.next()
i=0
for im_next in img_gen:
t1 = time.time()
mean_int[i] = np.average(im_next.flatten())
im += im_next
i += 1
print 'R.%d | S.%d/%.d | %.1f Hz'%(run,i,num_im,1.0/(time.time() - t1))
im /= num_im
# -- run mean
total_mean = np.average(im.flatten())
# -- plot
title = 'r.%d - average %d shots'%(run,num_im)
i = img_class(im, title)
i.draw_img()
| bsd-2-clause |
pkruskal/scikit-learn | examples/cluster/plot_cluster_iris.py | 350 | 2593 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
K-means Clustering
=========================================================
The plots display firstly what a K-means algorithm would yield
using three clusters. It is then shown what the effect of a bad
initialization is on the classification process:
By setting n_init to only 1 (default is 10), the amount of
times that the algorithm will be run with different centroid
seeds is reduced.
The next plot displays what using eight clusters would deliver
and finally the ground truth.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import KMeans
from sklearn import datasets
np.random.seed(5)
centers = [[1, 1], [-1, -1], [1, -1]]
iris = datasets.load_iris()
X = iris.data
y = iris.target
estimators = {'k_means_iris_3': KMeans(n_clusters=3),
'k_means_iris_8': KMeans(n_clusters=8),
'k_means_iris_bad_init': KMeans(n_clusters=3, n_init=1,
init='random')}
fignum = 1
for name, est in estimators.items():
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
est.fit(X)
labels = est.labels_
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=labels.astype(np.float))
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
fignum = fignum + 1
# Plot the ground truth
fig = plt.figure(fignum, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=48, azim=134)
plt.cla()
for name, label in [('Setosa', 0),
('Versicolour', 1),
('Virginica', 2)]:
ax.text3D(X[y == label, 3].mean(),
X[y == label, 0].mean() + 1.5,
X[y == label, 2].mean(), name,
horizontalalignment='center',
bbox=dict(alpha=.5, edgecolor='w', facecolor='w'))
# Reorder the labels to have colors matching the cluster results
y = np.choose(y, [1, 2, 0]).astype(np.float)
ax.scatter(X[:, 3], X[:, 0], X[:, 2], c=y)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
ax.set_xlabel('Petal width')
ax.set_ylabel('Sepal length')
ax.set_zlabel('Petal length')
plt.show()
| bsd-3-clause |
berkeley-stat159/project-epsilon | code/utils/scripts/t_test_plot_script.py | 1 | 3981 | """
Purpose:
-----------------------------------------------------------
This script creates graphs for t-test for 4 conditions
For each subject each run each condition, plot the t statistics
-----------------------------------------------------------
"""
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
from t_stat import *
from smoothing import *
from matplotlib import colors
from plot_mosaic import *
import numpy as np
import nibabel as nib
import matplotlib.pyplot as plt
import matplotlib
# Create the necessary directories if they do not exist
dirs = ['../../../fig','../../../fig/t-test']
for d in dirs:
if not os.path.exists(d):
os.makedirs(d)
# locate the different paths
project_path = '../../../'
data_path = project_path + 'data/'
txt_path = project_path + 'txt_output/conv_high_res/'
#txt_path = project_path + 'txt_output/conv_normal/'
path_dict = {'data_filtered':{
'folder' : 'ds005/',
'bold_img_name' : 'filtered_func_data_mni.nii.gz',
'run_path' : 'model/model001/',
'feat' : '.feat/'
},
'data_original':{
'folder' : 'ds005/',
'bold_img_name' : 'bold.nii.gz',
'run_path' : 'BOLD/',
'feat' : '/'
}}
# TODO: uncomment for final version
#subject_list = [str(i) for i in range(1,17)]
subject_list = ['1','5']
run_list = [str(i) for i in range(1,2)]
cond_list = [str(i) for i in range(1,5)]
#TODO: Change to relevant path for data or other thing
d = path_dict['data_original']
#OR
#d = path_dict['data_filtered']
images_paths = [('ds005' +'_sub' + s.zfill(3) + '_t1r' + r, \
data_path + d['folder'] + 'sub%s/'%(s.zfill(3)) + d['run_path'] \
+ 'task001_run%s'%(r.zfill(3))+d['feat']+'%s'%( d['bold_img_name'])) \
for r in run_list \
for s in subject_list]
print("\n=====================================================")
thres = 375 #from analysis of the histograms
for image_path in images_paths:
name = image_path[0]
print("Starting t-test analysis and plot for subject "+name[9:12])
img = nib.load(image_path[1])
data_int = img.get_data()
data = data_int.astype(float)
vol_shape = data.shape[:-1]
n_trs = data.shape[-1]
#get the mean value
mean_data = np.mean(data, axis = -1)
#build the mask
in_brain_mask = mean_data > 375
#smooth the data set
smooth_data = smoothing(data, 1, range(n_trs))
#initialize design matrix for t test
p = 7
X_matrix = np.ones((data.shape[-1], p))
#build our design matrix
for cond in range(1,5):
convolved = np.loadtxt(txt_path + name + '_conv_' + str(cond).zfill(3) + '_high_res.txt')
#convolved = np.loadtxt(txt_path + name + '_conv_' + str(cond).zfill(3) + '_canonical.txt')
X_matrix[:,cond] = convolved
linear_drift = np.linspace(-1, 1, n_trs)
X_matrix[:,5] = linear_drift
quadratic_drift = linear_drift ** 2
quadratic_drift -= np.mean(quadratic_drift)
X_matrix[:,6] = quadratic_drift
beta, t, df, p = t_stat(smooth_data, X_matrix)
for cond in range(0,4):
print("Starting test for condition " + str(cond+1))
t_newshape = np.reshape(t[cond,:],vol_shape)
t_newshape[~in_brain_mask]=np.nan
t_T = np.zeros(vol_shape)
for z in range(vol_shape[2]):
t_T[:, :, z] = t_newshape[:,:, z].T
t_plot = plot_mosaic(t_T)
plt.imshow(t_plot,interpolation='nearest', cmap='seismic')
zero_out=max(abs(np.nanmin(t_T)),np.nanmax(t_T))
plt.title(name+'_t_statistics'+'_cond_'+'_%s'%(cond+1))
plt.clim(-zero_out,zero_out)
plt.colorbar()
plt.savefig(dirs[1]+'/'+ name +'_t-test_'+'cond'+str(cond+1)+'.png')
plt.close()
print("\nT-test analysis and plots done for selected subjects")
print("See mosaic plots in project-epsilon/fig/t-test/")
| bsd-3-clause |
MatthieuBizien/scikit-learn | examples/exercises/plot_cv_digits.py | 135 | 1223 | """
=============================================
Cross-validation on Digits Dataset Exercise
=============================================
A tutorial exercise using Cross-validation with an SVM on the Digits dataset.
This exercise is used in the :ref:`cv_generators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
from sklearn.model_selection import cross_val_score
from sklearn import datasets, svm
digits = datasets.load_digits()
X = digits.data
y = digits.target
svc = svm.SVC(kernel='linear')
C_s = np.logspace(-10, 0, 10)
scores = list()
scores_std = list()
for C in C_s:
svc.C = C
this_scores = cross_val_score(svc, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
# Do the plotting
import matplotlib.pyplot as plt
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.semilogx(C_s, scores)
plt.semilogx(C_s, np.array(scores) + np.array(scores_std), 'b--')
plt.semilogx(C_s, np.array(scores) - np.array(scores_std), 'b--')
locs, labels = plt.yticks()
plt.yticks(locs, list(map(lambda x: "%g" % x, locs)))
plt.ylabel('CV score')
plt.xlabel('Parameter C')
plt.ylim(0, 1.1)
plt.show()
| bsd-3-clause |
gprMax/gprMax | setup.py | 1 | 7984 | # Copyright (C) 2015-2020: The University of Edinburgh
# Authors: Craig Warren and Antonis Giannopoulos
#
# This file is part of gprMax.
#
# gprMax is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gprMax is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gprMax. If not, see <http://www.gnu.org/licenses/>.
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup
from distutils.extension import Extension
try:
import numpy as np
except ImportError:
raise ImportError('gprMax requires the NumPy package.')
import glob
import os
import pathlib
import re
import shutil
import sys
# Importing _version__.py before building can cause issues.
with open('gprMax/_version.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
# Parse package name from init file. Importing __init__.py / gprMax will break as gprMax depends on compiled .pyx files.
with open('gprMax/__init__.py', 'r') as fd:
packagename = re.search(r'^__name__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
packages = [packagename, 'tests', 'tools', 'user_libs']
# Parse long_description from README.rst file.
with open('README.rst','r') as fd:
long_description = fd.read()
# Python version
if sys.version_info[:2] < (3, 4):
sys.exit('\nExited: Requires Python 3.4 or newer!\n')
# Process 'build' command line argument
if 'build' in sys.argv:
print("Running 'build_ext --inplace'")
sys.argv.remove('build')
sys.argv.append('build_ext')
sys.argv.append('--inplace')
# Process '--no-cython' command line argument - either Cythonize or just compile the .c files
if '--no-cython' in sys.argv:
USE_CYTHON = False
sys.argv.remove('--no-cython')
else:
USE_CYTHON = True
# Build a list of all the files that need to be Cythonized looking in gprMax directory
cythonfiles = []
for root, dirs, files in os.walk(os.path.join(os.getcwd(), packagename), topdown=True):
for file in files:
if file.endswith('.pyx'):
cythonfiles.append(os.path.relpath(os.path.join(root, file)))
# Process 'cleanall' command line argument - cleanup Cython files
if 'cleanall' in sys.argv:
USE_CYTHON = False
for file in cythonfiles:
filebase = os.path.splitext(file)[0]
# Remove Cython C files
if os.path.isfile(filebase + '.c'):
try:
os.remove(filebase + '.c')
print('Removed: {}'.format(filebase + '.c'))
except OSError:
print('Could not remove: {}'.format(filebase + '.c'))
# Remove compiled Cython modules
libfile = glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.pyd') + glob.glob(os.path.join(os.getcwd(), os.path.splitext(file)[0]) + '*.so')
if libfile:
libfile = libfile[0]
try:
os.remove(libfile)
print('Removed: {}'.format(os.path.abspath(libfile)))
except OSError:
print('Could not remove: {}'.format(os.path.abspath(libfile)))
# Remove build, dist, egg and __pycache__ directories
shutil.rmtree(os.path.join(os.getcwd(), 'build'), ignore_errors=True)
shutil.rmtree(os.path.join(os.getcwd(), 'dist'), ignore_errors=True)
shutil.rmtree(os.path.join(os.getcwd(), 'gprMax.egg-info'), ignore_errors=True)
for p in pathlib.Path(os.getcwd()).rglob('__pycache__'):
shutil.rmtree(p, ignore_errors=True)
print('Removed: {}'.format(p))
# Now do a normal clean
sys.argv[1] = 'clean' # this is what distutils understands
# Set compiler options
# Windows
if sys.platform == 'win32':
compile_args = ['/O2', '/openmp', '/w'] # No static linking as no static version of OpenMP library; /w disables warnings
linker_args = []
extra_objects = []
libraries=[]
# Mac OS X - needs gcc (usually via HomeBrew) because the default compiler LLVM (clang) does not support OpenMP
# - with gcc -fopenmp option implies -pthread
elif sys.platform == 'darwin':
gccpath = glob.glob('/usr/local/bin/gcc-[4-9]*')
gccpath += glob.glob('/usr/local/bin/gcc-[10-11]*')
if gccpath:
# Use newest gcc found
os.environ['CC'] = gccpath[-1].split(os.sep)[-1]
rpath = '/usr/local/opt/gcc/lib/gcc/' + gccpath[-1].split(os.sep)[-1][-1] + '/'
else:
raise('Cannot find gcc 4-10 in /usr/local/bin. gprMax requires gcc to be installed - easily done through the Homebrew package manager (http://brew.sh). Note: gcc with OpenMP support is required.')
compile_args = ['-O3', '-w', '-fopenmp', '-march=native'] # Sometimes worth testing with '-fstrict-aliasing', '-fno-common'
linker_args = ['-fopenmp', '-Wl,-rpath,' + rpath]
libraries = ['iomp5', 'pthread']
extra_objects = []
# Linux
elif sys.platform == 'linux':
compile_args = ['-O3', '-w', '-fopenmp', '-march=native']
linker_args = ['-fopenmp']
extra_objects = []
libraries=[]
# Build a list of all the extensions
extensions = []
for file in cythonfiles:
tmp = os.path.splitext(file)
if USE_CYTHON:
fileext = tmp[1]
else:
fileext = '.c'
extension = Extension(tmp[0].replace(os.sep, '.'),
[tmp[0] + fileext],
language='c',
include_dirs=[np.get_include()],
libraries=libraries,
extra_compile_args=compile_args,
extra_link_args=linker_args,
extra_objects=extra_objects)
extensions.append(extension)
# Cythonize (build .c files)
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions,
compiler_directives={
'boundscheck': False,
'wraparound': False,
'initializedcheck': False,
'embedsignature': True,
'language_level': 3
},
annotate=False)
# SetupTools Required to make package
import setuptools
setup(name=packagename,
version=version,
author='Craig Warren and Antonis Giannopoulos',
url='http://www.gprmax.com',
description='Electromagnetic Modelling Software based on the Finite-Difference Time-Domain (FDTD) method',
long_description=long_description,
long_description_content_type="text/x-rst",
license='GPLv3+',
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: MacOS',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Cython',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering'
],
#requirements
python_requires=">3.6",
install_requires=[
"colorama",
"cython",
"h5py",
"jupyter",
"matplotlib",
"numpy",
"psutil",
"scipy",
"terminaltables",
"tqdm",
],
ext_modules=extensions,
packages=packages,
include_package_data=True,
include_dirs=[np.get_include()],
zip_safe=False)
| gpl-3.0 |
jstoxrocky/statsmodels | statsmodels/graphics/functional.py | 31 | 14477 | """Module for functional boxplots."""
from statsmodels.compat.python import combinations, range
import numpy as np
from scipy import stats
from scipy.misc import factorial
from . import utils
__all__ = ['fboxplot', 'rainbowplot', 'banddepth']
def fboxplot(data, xdata=None, labels=None, depth=None, method='MBD',
wfactor=1.5, ax=None, plot_opts={}):
"""Plot functional boxplot.
A functional boxplot is the analog of a boxplot for functional data.
Functional data is any type of data that varies over a continuum, i.e.
curves, probabillity distributions, seasonal data, etc.
The data is first ordered, the order statistic used here is `banddepth`.
Plotted are then the median curve, the envelope of the 50% central region,
the maximum non-outlying envelope and the outlier curves.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
labels : sequence of scalar or str, optional
The labels or identifiers of the curves in `data`. If given, outliers
are labeled in the plot.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
wfactor : float, optional
Factor by which the central 50% region is multiplied to find the outer
region (analog of "whiskers" of a classical boxplot).
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
plot_opts : dict, optional
A dictionary with plotting options. Any of the following can be
provided, if not present in `plot_opts` the defaults will be used::
- 'cmap_outliers', a Matplotlib LinearSegmentedColormap instance.
- 'c_inner', valid MPL color. Color of the central 50% region
- 'c_outer', valid MPL color. Color of the non-outlying region
- 'c_median', valid MPL color. Color of the median.
- 'lw_outliers', scalar. Linewidth for drawing outlier curves.
- 'lw_median', scalar. Linewidth for drawing the median curve.
- 'draw_nonout', bool. If True, also draw non-outlying curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
depth : ndarray
1-D array containing the calculated band depths of the curves.
ix_depth : ndarray
1-D array of indices needed to order curves (or `depth`) from most to
least central curve.
ix_outliers : ndarray
1-D array of indices of outlying curves in `data`.
See Also
--------
banddepth, rainbowplot
Notes
-----
The median curve is the curve with the highest band depth.
Outliers are defined as curves that fall outside the band created by
multiplying the central region by `wfactor`. Note that the range over
which they fall outside this band doesn't matter, a single data point
outside the band is enough. If the data is noisy, smoothing may therefore
be required.
The non-outlying region is defined as the band made up of all the
non-outlying curves.
References
----------
[1] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of Computational
and Graphical Statistics, vol. 20, pp. 1-19, 2011.
[2] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a functional boxplot. We see that the years 1982-83 and 1997-98 are
outliers; these are the years where El Nino (a climate pattern
characterized by warming up of the sea surface and higher air pressures)
occurred with unusual intensity.
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.fboxplot(data.raw_data[:, 1:], wfactor=2.58,
... labels=data.raw_data[:, 0].astype(int),
... ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_fboxplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if plot_opts.get('cmap_outliers') is None:
from matplotlib.cm import rainbow_r
plot_opts['cmap_outliers'] = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
# Inner area is 25%-75% region of band-depth ordered curves.
ix_depth = np.argsort(depth)[::-1]
median_curve = data[ix_depth[0], :]
ix_IQR = data.shape[0] // 2
lower = data[ix_depth[0:ix_IQR], :].min(axis=0)
upper = data[ix_depth[0:ix_IQR], :].max(axis=0)
# Determine region for outlier detection
inner_median = np.median(data[ix_depth[0:ix_IQR], :], axis=0)
lower_fence = inner_median - (inner_median - lower) * wfactor
upper_fence = inner_median + (upper - inner_median) * wfactor
# Find outliers.
ix_outliers = []
ix_nonout = []
for ii in range(data.shape[0]):
if np.any(data[ii, :] > upper_fence) or np.any(data[ii, :] < lower_fence):
ix_outliers.append(ii)
else:
ix_nonout.append(ii)
ix_outliers = np.asarray(ix_outliers)
# Plot envelope of all non-outlying data
lower_nonout = data[ix_nonout, :].min(axis=0)
upper_nonout = data[ix_nonout, :].max(axis=0)
ax.fill_between(xdata, lower_nonout, upper_nonout,
color=plot_opts.get('c_outer', (0.75,0.75,0.75)))
# Plot central 50% region
ax.fill_between(xdata, lower, upper,
color=plot_opts.get('c_inner', (0.5,0.5,0.5)))
# Plot median curve
ax.plot(xdata, median_curve, color=plot_opts.get('c_median', 'k'),
lw=plot_opts.get('lw_median', 2))
# Plot outliers
cmap = plot_opts.get('cmap_outliers')
for ii, ix in enumerate(ix_outliers):
label = str(labels[ix]) if labels is not None else None
ax.plot(xdata, data[ix, :],
color=cmap(float(ii) / (len(ix_outliers)-1)), label=label,
lw=plot_opts.get('lw_outliers', 1))
if plot_opts.get('draw_nonout', False):
for ix in ix_nonout:
ax.plot(xdata, data[ix, :], 'k-', lw=0.5)
if labels is not None:
ax.legend()
return fig, depth, ix_depth, ix_outliers
def rainbowplot(data, xdata=None, depth=None, method='MBD', ax=None,
cmap=None):
"""Create a rainbow plot for a set of curves.
A rainbow plot contains line plots of all curves in the dataset, colored in
order of functional depth. The median curve is shown in black.
Parameters
----------
data : sequence of ndarrays or 2-D ndarray
The vectors of functions to create a functional boxplot from. If a
sequence of 1-D arrays, these should all be the same size.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
xdata : ndarray, optional
The independent variable for the data. If not given, it is assumed to
be an array of integers 0..N with N the length of the vectors in
`data`.
depth : ndarray, optional
A 1-D array of band depths for `data`, or equivalent order statistic.
If not given, it will be calculated through `banddepth`.
method : {'MBD', 'BD2'}, optional
The method to use to calculate the band depth. Default is 'MBD'.
ax : Matplotlib AxesSubplot instance, optional
If given, this subplot is used to plot in instead of a new figure being
created.
cmap : Matplotlib LinearSegmentedColormap instance, optional
The colormap used to color curves with. Default is a rainbow colormap,
with red used for the most central and purple for the least central
curves.
Returns
-------
fig : Matplotlib figure instance
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
banddepth, fboxplot
References
----------
[1] R.J. Hyndman and H.L. Shang, "Rainbow Plots, Bagplots, and Boxplots for
Functional Data", vol. 19, pp. 29-25, 2010.
Examples
--------
Load the El Nino dataset. Consists of 60 years worth of Pacific Ocean sea
surface temperature data.
>>> import matplotlib.pyplot as plt
>>> import statsmodels.api as sm
>>> data = sm.datasets.elnino.load()
Create a rainbow plot:
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> res = sm.graphics.rainbowplot(data.raw_data[:, 1:], ax=ax)
>>> ax.set_xlabel("Month of the year")
>>> ax.set_ylabel("Sea surface temperature (C)")
>>> ax.set_xticks(np.arange(13, step=3) - 1)
>>> ax.set_xticklabels(["", "Mar", "Jun", "Sep", "Dec"])
>>> ax.set_xlim([-0.2, 11.2])
>>> plt.show()
.. plot:: plots/graphics_functional_rainbowplot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if cmap is None:
from matplotlib.cm import rainbow_r
cmap = rainbow_r
data = np.asarray(data)
if xdata is None:
xdata = np.arange(data.shape[1])
# Calculate band depth if required.
if depth is None:
if method not in ['MBD', 'BD2']:
raise ValueError("Unknown value for parameter `method`.")
depth = banddepth(data, method=method)
else:
if depth.size != data.shape[0]:
raise ValueError("Provided `depth` array is not of correct size.")
ix_depth = np.argsort(depth)[::-1]
# Plot all curves, colored by depth
num_curves = data.shape[0]
for ii in range(num_curves):
ax.plot(xdata, data[ix_depth[ii], :], c=cmap(ii / (num_curves - 1.)))
# Plot the median curve
median_curve = data[ix_depth[0], :]
ax.plot(xdata, median_curve, 'k-', lw=2)
return fig
def banddepth(data, method='MBD'):
"""Calculate the band depth for a set of functional curves.
Band depth is an order statistic for functional data (see `fboxplot`), with
a higher band depth indicating larger "centrality". In analog to scalar
data, the functional curve with highest band depth is called the median
curve, and the band made up from the first N/2 of N curves is the 50%
central region.
Parameters
----------
data : ndarray
The vectors of functions to create a functional boxplot from.
The first axis is the function index, the second axis the one along
which the function is defined. So ``data[0, :]`` is the first
functional curve.
method : {'MBD', 'BD2'}, optional
Whether to use the original band depth (with J=2) of [1]_ or the
modified band depth. See Notes for details.
Returns
-------
depth : ndarray
Depth values for functional curves.
Notes
-----
Functional band depth as an order statistic for functional data was
proposed in [1]_ and applied to functional boxplots and bagplots in [2]_.
The method 'BD2' checks for each curve whether it lies completely inside
bands constructed from two curves. All permutations of two curves in the
set of curves are used, and the band depth is normalized to one. Due to
the complete curve having to fall within the band, this method yields a lot
of ties.
The method 'MBD' is similar to 'BD2', but checks the fraction of the curve
falling within the bands. It therefore generates very few ties.
References
----------
.. [1] S. Lopez-Pintado and J. Romo, "On the Concept of Depth for
Functional Data", Journal of the American Statistical Association,
vol. 104, pp. 718-734, 2009.
.. [2] Y. Sun and M.G. Genton, "Functional Boxplots", Journal of
Computational and Graphical Statistics, vol. 20, pp. 1-19, 2011.
"""
def _band2(x1, x2, curve):
xb = np.vstack([x1, x2])
if np.any(curve < xb.min(axis=0)) or np.any(curve > xb.max(axis=0)):
res = 0
else:
res = 1
return res
def _band_mod(x1, x2, curve):
xb = np.vstack([x1, x2])
res = np.logical_and(curve >= xb.min(axis=0),
curve <= xb.max(axis=0))
return np.sum(res) / float(res.size)
if method == 'BD2':
band = _band2
elif method == 'MBD':
band = _band_mod
else:
raise ValueError("Unknown input value for parameter `method`.")
num = data.shape[0]
ix = np.arange(num)
depth = []
for ii in range(num):
res = 0
for ix1, ix2 in combinations(ix, 2):
res += band(data[ix1, :], data[ix2, :], data[ii, :])
# Normalize by number of combinations to get band depth
normfactor = factorial(num) / 2. / factorial(num - 2)
depth.append(float(res) / normfactor)
return np.asarray(depth)
| bsd-3-clause |
necozay/tulip-control | tulip/transys/export/graph2dot.py | 1 | 17106 | # Copyright (c) 2013-2014 by California Institute of Technology
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the California Institute of Technology nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL CALTECH
# OR THE CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
"""Convert labeled graph to dot using
pydot and custom filtering
"""
from __future__ import division
import logging
import re
from collections import Iterable
from textwrap import fill
from cStringIO import StringIO
import numpy as np
import networkx as nx
from networkx.utils import make_str
import pydot
# inline:
#
# import webcolors
logger = logging.getLogger(__name__)
def _states2dot_str(graph, to_pydot_graph, wrap=10,
tikz=False, rankdir='TB'):
"""Copy nodes to given Pydot graph, with attributes for dot export."""
# TODO generate LaTeX legend table for edge labels
states = graph.states
# get labeling def
if hasattr(graph, '_state_label_def'):
label_def = graph._state_label_def
if hasattr(graph, '_state_dot_label_format'):
label_format = graph._state_dot_label_format
else:
label_format = {'type?label': '', 'separator': '\n'}
for u, d in graph.nodes_iter(data=True):
# initial state ?
is_initial = u in states.initial
is_accepting = _is_accepting(graph, u)
# state annotation
node_dot_label = _form_node_label(
u, d, label_def,
label_format, wrap, tikz=tikz
)
# node_dot_label = fill(str(state), width=wrap)
rim_color = d.get('color', 'black')
if tikz:
_state2tikz(graph, to_pydot_graph, u,
is_initial, is_accepting, rankdir,
rim_color, d, node_dot_label)
else:
_state2dot(graph, to_pydot_graph, u,
is_initial, is_accepting,
rim_color, d, node_dot_label)
def _state2dot(graph, to_pydot_graph, state,
is_initial, is_accepting,
rim_color, d, node_dot_label):
if is_initial:
_add_incoming_edge(to_pydot_graph, state)
normal_shape = graph.dot_node_shape['normal']
accept_shape = graph.dot_node_shape.get('accepting', '')
shape = accept_shape if is_accepting else normal_shape
corners = 'rounded' if shape is 'rectangle' else ''
rim_color = '"' + _format_color(rim_color, 'dot') + '"'
fc = d.get('fillcolor', 'none')
filled = '' if fc is 'none' else 'filled'
if fc is 'gradient':
# top/bottom colors not supported for dot
lc = d.get('left_color', d['top_color'])
rc = d.get('right_color', d['bottom_color'])
if isinstance(lc, basestring):
fillcolor = lc
elif isinstance(lc, dict):
fillcolor = lc.keys()[0]
else:
raise TypeError('left_color must be str or dict.')
if isinstance(rc, basestring):
fillcolor += ':' + rc
elif isinstance(rc, dict):
fillcolor += ':' + rc.keys()[0]
else:
raise TypeError('right_color must be str or dict.')
else:
fillcolor = _format_color(fc, 'dot')
if corners and filled:
node_style = '"' + corners + ', ' + filled + '"'
elif corners:
node_style = '"' + corners + '"'
else:
node_style = '"' + filled + '"'
to_pydot_graph.add_node(
state,
label=node_dot_label,
shape=shape,
style=node_style,
color=rim_color,
fillcolor='"' + fillcolor + '"')
def _state2tikz(graph, to_pydot_graph, state,
is_initial, is_accepting, rankdir,
rim_color, d, node_dot_label):
style = 'state'
if rankdir is 'LR':
init_dir = 'initial left'
elif rankdir is 'RL':
init_dir = 'initial right'
elif rankdir is 'TB':
init_dir = 'initial above'
elif rankdir is 'BT':
init_dir = 'initial below'
else:
raise ValueError('Unknown rankdir')
if is_initial:
style += ', initial by arrow, ' + init_dir + ', initial text='
if is_accepting:
style += ', accepting'
if graph.dot_node_shape['normal'] is 'rectangle':
style += ', shape = rectangle, rounded corners'
# darken the rim
if 'black' in rim_color:
c = _format_color(rim_color, 'tikz')
else:
c = _format_color(rim_color, 'tikz') + '!black!30'
style += ', draw = ' + c
fill = d.get('fillcolor')
if fill is 'gradient':
s = {'top_color', 'bottom_color',
'left_color', 'right_color'}
for x in s:
if x in d:
style += ', ' + x + ' = ' + _format_color(d[x], 'tikz')
elif fill is not None:
# not gradient
style += ', fill = ' + _format_color(fill, 'tikz')
else:
logger.debug('fillcolor is None')
to_pydot_graph.add_node(
state,
texlbl=node_dot_label,
style=style)
def _format_color(color, prog='tikz'):
"""Encode color in syntax for given program.
@type color:
- C{str} for single color or
- C{dict} for weighted color mix
@type prog: 'tikz' or 'dot'
"""
if isinstance(color, basestring):
return color
if not isinstance(color, dict):
raise Exception('color must be str or dict')
if prog is 'tikz':
s = '!'.join([k + '!' + str(v) for k, v in color.iteritems()])
elif prog is 'dot':
t = sum(color.itervalues())
try:
import webcolors
# mix them
result = np.array((0.0, 0.0, 0.0))
for c, w in color.iteritems():
result += w/t * np.array(webcolors.name_to_rgb(c))
s = webcolors.rgb_to_hex(result)
except:
logger.warn('failed to import webcolors')
s = ':'.join([k + ';' + str(v/t) for k, v in color.iteritems()])
else:
raise ValueError('Unknown program: ' + str(prog) + '. '
"Available options are: 'dot' or 'tikz'.")
return s
def _place_initial_states(trs_graph, pd_graph, tikz):
init_subg = pydot.Subgraph('initial')
init_subg.set_rank('source')
for node in trs_graph.states.initial:
pd_node = pydot.Node(make_str(node))
init_subg.add_node(pd_node)
phantom_node = 'phantominit' + str(node)
pd_node = pydot.Node(make_str(phantom_node))
init_subg.add_node(pd_node)
pd_graph.add_subgraph(init_subg)
def _add_incoming_edge(g, state):
phantom_node = 'phantominit' + str(state)
g.add_node(phantom_node, label='""', shape='none', width='0')
g.add_edge(phantom_node, state)
def _form_node_label(state, state_data, label_def,
label_format, width=10, tikz=False):
# node itself
state_str = str(state)
state_str = state_str.replace("'", "")
# rm parentheses to reduce size of states in fig
if tikz:
state_str = state_str.replace('(', '')
state_str = state_str.replace(')', '')
# make indices subscripts
if tikz:
pattern = '([a-zA-Z]\d+)'
make_subscript = lambda x: x.group(0)[0] + '_' + x.group(0)[1:]
state_str = re.sub(pattern, make_subscript, state_str)
# SVG requires breaking the math environment into
# one math env per line. Just make 1st line math env
# if latex:
# state_str = '$' + state_str + '$'
# state_str = fill(state_str, width=width)
node_dot_label = state_str
# newline between state name and label, only if state is labeled
if len(state_data) != 0:
node_dot_label += r'\n'
# add node annotations from action, AP sets etc
# other key,values in state attr_dict ignored
pieces = list()
for (label_type, label_value) in state_data.iteritems():
if label_type not in label_def:
continue
# label formatting
type_name = label_format[label_type]
sep_type_value = label_format['type?label']
# avoid turning strings to lists,
# or non-iterables to lists
if isinstance(label_value, str):
label_str = fill(label_value, width=width)
elif isinstance(label_value, Iterable): # and not str
s = ', '.join([str(x) for x in label_value])
label_str = r'\\{' + fill(s, width=width) + r'\\}'
else:
label_str = fill(str(label_value), width=width)
pieces.append(type_name + sep_type_value + label_str)
sep_label_sets = label_format['separator']
node_dot_label += sep_label_sets.join(pieces)
if tikz:
# replace LF by latex newline
node_dot_label = node_dot_label.replace(r'\n', r'\\\\ ')
# dot2tex math mode doesn't handle newlines properly
node_dot_label = (
r'$\\begin{matrix} ' + node_dot_label +
r'\\end{matrix}$'
)
return node_dot_label
def _is_accepting(graph, state):
"""accepting state ?"""
# no accepting states defined ?
if not hasattr(graph.states, 'accepting'):
return False
return state in graph.states.accepting
def _transitions2dot_str(trans, to_pydot_graph, tikz=False):
"""Convert transitions to dot str.
@rtype: str
"""
if not hasattr(trans.graph, '_transition_label_def'):
return
if not hasattr(trans.graph, '_transition_dot_label_format'):
return
if not hasattr(trans.graph, '_transition_dot_mask'):
return
# get labeling def
label_def = trans.graph._transition_label_def
label_format = trans.graph._transition_dot_label_format
label_mask = trans.graph._transition_dot_mask
for (u, v, key, edge_data) in trans.graph.edges_iter(
data=True, keys=True
):
edge_dot_label = _form_edge_label(
edge_data, label_def,
label_format, label_mask, tikz
)
edge_color = edge_data.get('color', 'black')
to_pydot_graph.add_edge(u, v, key=key,
label=edge_dot_label,
color=edge_color)
def _form_edge_label(edge_data, label_def,
label_format, label_mask, tikz):
label = '' # dot label for edge
sep_label_sets = label_format['separator']
for label_type, label_value in edge_data.iteritems():
if label_type not in label_def:
continue
# masking defined ?
# custom filter hiding based on value
if label_type in label_mask:
# not show ?
if not label_mask[label_type](label_value):
continue
# label formatting
if label_type in label_format:
type_name = label_format[label_type]
sep_type_value = label_format['type?label']
else:
type_name = ':'
sep_type_value = r',\n'
# format iterable containers using
# mathematical set notation: {...}
if isinstance(label_value, basestring):
# str is Iterable: avoid turning it to list
label_str = label_value
elif isinstance(label_value, Iterable):
s = ', '.join([str(x) for x in label_value])
label_str = r'\\{' + fill(s) + r'\\}'
else:
label_str = str(label_value)
if tikz:
type_name = r'\mathrm' + '{' + type_name + '}'
label += (type_name + sep_type_value +
label_str + sep_label_sets)
if tikz:
label = r'\\begin{matrix}' + label + r'\\end{matrix}'
label = '"' + label + '"'
return label
def _graph2pydot(graph, wrap=10, tikz=False,
rankdir='TB'):
"""Convert (possibly labeled) state graph to dot str.
@type graph: L{LabeledDiGraph}
@rtype: str
"""
dummy_nx_graph = nx.MultiDiGraph()
_states2dot_str(graph, dummy_nx_graph, wrap=wrap, tikz=tikz,
rankdir=rankdir)
_transitions2dot_str(graph.transitions, dummy_nx_graph, tikz=tikz)
pydot_graph = nx.drawing.nx_pydot.to_pydot(dummy_nx_graph)
_place_initial_states(graph, pydot_graph, tikz)
pydot_graph.set_overlap('false')
# pydot_graph.set_size('"0.25,1"')
# pydot_graph.set_ratio('"compress"')
pydot_graph.set_nodesep(0.5)
pydot_graph.set_ranksep(0.1)
return pydot_graph
def graph2dot_str(graph, wrap=10, tikz=False):
"""Convert graph to dot string.
Requires pydot.
@type graph: L{LabeledDiGraph}
@param wrap: textwrap width
@rtype: str
"""
pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz)
return pydot_graph.to_string()
def save_dot(graph, path, fileformat, rankdir, prog, wrap, tikz=False):
"""Save state graph to dot file.
@type graph: L{LabeledDiGraph}
@return: True upon success
@rtype: bool
"""
pydot_graph = _graph2pydot(graph, wrap=wrap, tikz=tikz,
rankdir=rankdir)
if pydot_graph is None:
# graph2dot must have printed warning already
return False
pydot_graph.set_rankdir(rankdir)
pydot_graph.set_splines('true')
# turn off graphviz warnings caused by tikz labels
if tikz:
prog = [prog, '-q 1']
pydot_graph.write(path, format=fileformat, prog=prog)
return True
def plot_pydot(graph, prog='dot', rankdir='LR', wrap=10, ax=None):
"""Plot a networkx or pydot graph using dot.
No files written or deleted from the disk.
Note that all networkx graph classes are inherited
from networkx.Graph
See Also
========
dot & pydot documentation
@param graph: to plot
@type graph: networkx.Graph | pydot.Graph
@param prog: GraphViz programto use
@type prog: 'dot' | 'neato' | 'circo' | 'twopi'
| 'fdp' | 'sfdp' | etc
@param rankdir: direction to layout nodes
@type rankdir: 'LR' | 'TB'
@param ax: axes
"""
try:
pydot_graph = _graph2pydot(graph, wrap=wrap)
except:
if isinstance(graph, nx.Graph):
pydot_graph = nx.drawing.nx_pydot.to_pydot(graph)
else:
raise TypeError(
'graph not networkx or pydot class.' +
'Got instead: ' + str(type(graph)))
pydot_graph.set_rankdir(rankdir)
pydot_graph.set_splines('true')
pydot_graph.set_bgcolor('gray')
png_str = pydot_graph.create_png(prog=prog)
# installed ?
try:
from IPython.display import display, Image
logger.debug('IPython installed.')
# called by IPython ?
try:
cfg = get_ipython().config
logger.debug('Script called by IPython.')
# Caution!!! : not ordinary dict,
# but IPython.config.loader.Config
# qtconsole ?
if cfg['IPKernelApp']:
logger.debug('Within IPython QtConsole.')
display(Image(data=png_str))
return True
except:
print('IPython installed, but not called from it.')
except ImportError:
logger.warn('IPython not found.\nSo loaded dot images not inline.')
# not called from IPython QtConsole, try Matplotlib...
# installed ?
try:
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
except:
logger.debug('Matplotlib not installed.')
logger.warn('Neither IPython QtConsole nor Matplotlib available.')
return None
logger.debug('Matplotlib installed.')
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
sio = StringIO()
sio.write(png_str)
sio.seek(0)
img = mpimg.imread(sio)
ax.imshow(img, aspect='equal')
plt.show(block=False)
return ax
| bsd-3-clause |
jakobworldpeace/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
chungjjang80/FRETBursts | fretbursts/utils/examples/matplotlib_figure_mod_toolbar.py | 2 | 1276 | """
Example on how to add widgets the toolbar of a Matplotlib figure using the
QT backend.
No QT application is created, only the toolbar of the native MPL figure is
modified.
"""
from PySide import QtGui, QtCore
import matplotlib
def test():
plot([1,2,3], lw=2)
q = qt4_interface(gcf())
return q # WARNING: it's paramount to return the object otherwise, with
# no references, python deletes it and the GUI doesn't respond!
class qt4_interface:
def __init__(self,fig):
self.fig = fig
toolbar = fig.canvas.toolbar
self.line_edit = QtGui.QLineEdit()
toolbar.addWidget(self.line_edit)
self.line_edit.editingFinished.connect(self.do_something)
self.spinbox = QtGui.QDoubleSpinBox()
toolbar.addWidget(self.spinbox)
self.spinbox.valueChanged.connect(self.do_something2)
def do_something(self, *args):
self.fig.axes[0].set_title(self.line_edit.text())
self.fig.canvas.draw()
#f = open('l','a'); f.write('yes\n'); f.flush(); f.close()
def do_something2(self, *args):
self.fig.axes[0].set_xlim(0, self.spinbox.value())
self.fig.canvas.draw()
#f = open('l','a'); f.write('yes\n'); f.flush(); f.close()
| gpl-2.0 |
astroclark/bhextractor | bin/bhex_scalemassdemo.py | 1 | 4019 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2015 James Clark <james.clark@ligo.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
bhextractor_plotpca.py
Construct waveform catalogues and PCA for plotting and diagnostics
"""
import numpy as np
from matplotlib import pyplot as pl
import bhextractor_pca as bhex
import pycbc.types
import pycbc.filter
from pycbc.psd import aLIGOZeroDetHighPower
# -------------------------------
# USER INPUT
catalogue_name='Q'
theta=90.0
# END USER INPUT
# -------------------------------
# -------------------------------
# ANALYSIS
catlen=4
#
# Setup and then build the catalogue
#
catalogue = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048,
catalogue_len=catlen, mtotal_ref=250, Dist=1., theta=theta)
oriwave250 = np.copy(catalogue.aligned_catalogue[0,:])
#
# Do the PCA
#
pca = bhex.waveform_pca(catalogue)
#
# Build a 350 solar mass waveform from the 250 Msun PCs#
# Just use the first waveform
betas = pca.projection_plus[catalogue.waveform_names[0]]
times = np.arange(0,len(catalogue.aligned_catalogue[0,:])/2048.,1./2048)
recwave350 = bhex.reconstruct_waveform(pca.pca_plus, betas, len(catalogue.waveform_names),
mtotal_target=350.0)
#
# Now make a catalogue at 350 solar masses and then compute the overlap
#
catalogue350 = bhex.waveform_catalogue(catalogue_name=catalogue_name, fs=2048,
catalogue_len=catlen, mtotal_ref=350, Dist=1., theta=theta)
oriwave350 = np.copy(catalogue350.aligned_catalogue[0,:])
# Finally, compute the match between the reconstructed 350 Msun system and the
# system we generated at that mass in the first place
recwave350_pycbc = pycbc.types.TimeSeries(np.real(recwave350), delta_t=1./2048)
oriwave250_pycbc = pycbc.types.TimeSeries(np.real(oriwave250), delta_t=1./2048)
oriwave350_pycbc = pycbc.types.TimeSeries(np.real(oriwave350), delta_t=1./2048)
psd = aLIGOZeroDetHighPower(len(recwave350_pycbc.to_frequencyseries()),
recwave350_pycbc.to_frequencyseries().delta_f, low_freq_cutoff=10.0)
match_cat = pycbc.filter.match(oriwave250_pycbc.to_frequencyseries(),
oriwave350_pycbc.to_frequencyseries(), psd=psd,
low_frequency_cutoff=10)[0]
match_rec = pycbc.filter.match(recwave350_pycbc.to_frequencyseries(),
oriwave350_pycbc.to_frequencyseries(), psd=psd,
low_frequency_cutoff=10)[0]
print 'Match between 250 and 350 Msun catalogue waves: ', match_cat
print 'Match between 350 reconstruction and 350 catalogue wave: ', match_rec
#
# Make plots
#
if 1:
print "Plotting reconstructions"
fig, ax = pl.subplots(nrows=2,ncols=1)
ax[0].plot(times,np.real(oriwave250), 'b', label='250 M$_{\odot}$ catalogue')
ax[0].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue')
ax[0].set_xlim(0,2.5)
ax[0].set_title('Match = %f'% match_cat)
ax[0].legend(loc='upper left',prop={'size':10})
ax[1].plot(times,np.real(oriwave350), 'g', label='350 M$_{\odot}$ catalogue')
ax[1].plot(times,np.real(recwave350), 'r', label='350 M$_{\odot}$ reconstruction')
ax[1].set_xlim(0,2.5)
ax[1].set_xlabel('Time (s)')
ax[1].set_title('Match = %f'% match_rec)
ax[1].legend(loc='upper left',prop={'size':10})
fig.tight_layout()
fig.savefig('scalemassdemo.png')
| gpl-2.0 |
justrypython/EAST | svm_model_v2.py | 1 | 2801 | #encoding:UTF-8
import os
import numpy as np
import sys
import cv2
import matplotlib.pyplot as plt
from sklearn.svm import NuSVC, SVC
import datetime
import pickle
#calculate the area
def area(p):
p = p.reshape((-1, 2))
return 0.5 * abs(sum(x0*y1 - x1*y0
for ((x0, y0), (x1, y1)) in segments(p)))
def segments(p):
return zip(p, np.concatenate((p[1:], [p[0]])))
def calc_xy(p0, p1, p2):
cos = calc_cos(p0, p1, p2)
dis = calc_dis(p0, p2)
return dis * cos, dis * np.sqrt(1 - np.square(cos))
def calc_dis(p0, p1):
return np.sqrt(np.sum(np.square(p0-p1)))
def calc_cos(p0, p1, p2):
A = p1 - p0
B = p2 - p0
num = np.dot(A, B)
demon = np.linalg.norm(A) * np.linalg.norm(B)
return num / demon
def calc_new_xy(boxes):
box0 = boxes[:8]
box1 = boxes[8:]
x, y = calc_xy(box1[4:6], box1[6:], box0[:2])
dis = calc_dis(box1[4:6], box1[6:])
area0 = area(box0)
area1 = area(box1)
return x/dis, y/dis
if __name__ == '__main__':
test = True
path = '/media/zhaoke/b0685ee4-63e3-4691-ae02-feceacff6996/data/'
paths = os.listdir(path)
paths = [i for i in paths if '.txt' in i]
boxes = np.empty((800000, 9))
cnt = 0
for txt in paths:
f = open(path+txt, 'r')
lines = f.readlines()
f.close()
lines = [i.replace('\n', '').split(',') for i in lines]
lines = np.array(lines).astype(np.uint32)
boxes[cnt*10:cnt*10+len(lines)] = lines
cnt += 1
zeros = boxes==[0, 0, 0, 0, 0, 0, 0, 0, 0]
zeros_labels = zeros.all(axis=1)
zeros_labels = np.where(zeros_labels==True)
idboxes = boxes[boxes[:, 8]==7]
idboxes = np.tile(idboxes[:, :8], (1, 10))
idboxes = idboxes.reshape((-1, 8))
boxes = np.delete(boxes, zeros_labels[0], axis=0)
idboxes = np.delete(idboxes, zeros_labels[0], axis=0)
boxes_idboxes = np.concatenate((boxes[:, :8], idboxes), axis=1)
start_time = datetime.datetime.now()
print start_time
new_xy = np.apply_along_axis(calc_new_xy, 1, boxes_idboxes)
end_time = datetime.datetime.now()
print end_time - start_time
if test:
with open('clf_address_v2.pickle', 'rb') as f:
clf = pickle.load(f)
cnt = 0
for i, xy in enumerate(new_xy):
cls = int(clf.predict([xy])[0])
if cls == int(boxes[i, 8]):
cnt += 1
if i % 10000 == 0 and i != 0:
print i, ':', float(cnt) / i
else:
clf = SVC()
start_time = datetime.datetime.now()
print start_time
clf.fit(new_xy[:], boxes[:, 8])
end_time = datetime.datetime.now()
print end_time - start_time
with open('clf.pickle', 'wb') as f:
pickle.dump(clf, f)
print 'end' | gpl-3.0 |
mojoboss/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_scalability.py | 225 | 5719 | """
============================================
Scalability of Approximate Nearest Neighbors
============================================
This example studies the scalability profile of approximate 10-neighbors
queries using the LSHForest with ``n_estimators=20`` and ``n_candidates=200``
when varying the number of samples in the dataset.
The first plot demonstrates the relationship between query time and index size
of LSHForest. Query time is compared with the brute force method in exact
nearest neighbor search for the same index sizes. The brute force queries have a
very predictable linear scalability with the index (full scan). LSHForest index
have sub-linear scalability profile but can be slower for small datasets.
The second plot shows the speedup when using approximate queries vs brute force
exact queries. The speedup tends to increase with the dataset size but should
reach a plateau typically when doing queries on datasets with millions of
samples and a few hundreds of dimensions. Higher dimensional datasets tends to
benefit more from LSHForest indexing.
The break even point (speedup = 1) depends on the dimensionality and structure
of the indexed data and the parameters of the LSHForest index.
The precision of approximate queries should decrease slowly with the dataset
size. The speed of the decrease depends mostly on the LSHForest parameters and
the dimensionality of the data.
"""
from __future__ import division
print(__doc__)
# Authors: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
###############################################################################
import time
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Parameters of the study
n_samples_min = int(1e3)
n_samples_max = int(1e5)
n_features = 100
n_centers = 100
n_queries = 100
n_steps = 6
n_iter = 5
# Initialize the range of `n_samples`
n_samples_values = np.logspace(np.log10(n_samples_min),
np.log10(n_samples_max),
n_steps).astype(np.int)
# Generate some structured data
rng = np.random.RandomState(42)
all_data, _ = make_blobs(n_samples=n_samples_max + n_queries,
n_features=n_features, centers=n_centers, shuffle=True,
random_state=0)
queries = all_data[:n_queries]
index_data = all_data[n_queries:]
# Metrics to collect for the plots
average_times_exact = []
average_times_approx = []
std_times_approx = []
accuracies = []
std_accuracies = []
average_speedups = []
std_speedups = []
# Calculate the average query time
for n_samples in n_samples_values:
X = index_data[:n_samples]
# Initialize LSHForest for queries of a single neighbor
lshf = LSHForest(n_estimators=20, n_candidates=200,
n_neighbors=10).fit(X)
nbrs = NearestNeighbors(algorithm='brute', metric='cosine',
n_neighbors=10).fit(X)
time_approx = []
time_exact = []
accuracy = []
for i in range(n_iter):
# pick one query at random to study query time variability in LSHForest
query = queries[rng.randint(0, n_queries)]
t0 = time.time()
exact_neighbors = nbrs.kneighbors(query, return_distance=False)
time_exact.append(time.time() - t0)
t0 = time.time()
approx_neighbors = lshf.kneighbors(query, return_distance=False)
time_approx.append(time.time() - t0)
accuracy.append(np.in1d(approx_neighbors, exact_neighbors).mean())
average_time_exact = np.mean(time_exact)
average_time_approx = np.mean(time_approx)
speedup = np.array(time_exact) / np.array(time_approx)
average_speedup = np.mean(speedup)
mean_accuracy = np.mean(accuracy)
std_accuracy = np.std(accuracy)
print("Index size: %d, exact: %0.3fs, LSHF: %0.3fs, speedup: %0.1f, "
"accuracy: %0.2f +/-%0.2f" %
(n_samples, average_time_exact, average_time_approx, average_speedup,
mean_accuracy, std_accuracy))
accuracies.append(mean_accuracy)
std_accuracies.append(std_accuracy)
average_times_exact.append(average_time_exact)
average_times_approx.append(average_time_approx)
std_times_approx.append(np.std(time_approx))
average_speedups.append(average_speedup)
std_speedups.append(np.std(speedup))
# Plot average query time against n_samples
plt.figure()
plt.errorbar(n_samples_values, average_times_approx, yerr=std_times_approx,
fmt='o-', c='r', label='LSHForest')
plt.plot(n_samples_values, average_times_exact, c='b',
label="NearestNeighbors(algorithm='brute', metric='cosine')")
plt.legend(loc='upper left', fontsize='small')
plt.ylim(0, None)
plt.ylabel("Average query time in seconds")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Impact of index size on response time for first "
"nearest neighbors queries")
# Plot average query speedup versus index size
plt.figure()
plt.errorbar(n_samples_values, average_speedups, yerr=std_speedups,
fmt='o-', c='r')
plt.ylim(0, None)
plt.ylabel("Average speedup")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("Speedup of the approximate NN queries vs brute force")
# Plot average precision versus index size
plt.figure()
plt.errorbar(n_samples_values, accuracies, std_accuracies, fmt='o-', c='c')
plt.ylim(0, 1.1)
plt.ylabel("precision@10")
plt.xlabel("n_samples")
plt.grid(which='both')
plt.title("precision of 10-nearest-neighbors queries with index size")
plt.show()
| bsd-3-clause |
michaelld/gnuradio | gnuradio-runtime/apps/evaluation_random_numbers.py | 7 | 5284 | #!/usr/bin/env python
#
# Copyright 2015 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr
import numpy as np
from scipy.stats import norm, laplace, rayleigh
from matplotlib import pyplot as plt
# NOTE: scipy and matplotlib are optional packages and not included in the default gnuradio dependencies
#*** SETUP ***#
# Number of realisations per histogram
num_tests = 1000000
# Set number of bins in histograms
uniform_num_bins = 31
gauss_num_bins = 31
rayleigh_num_bins = 31
laplace_num_bins = 31
rndm = gr.random() # instance of gnuradio random class (gr::random)
print('All histograms contain',num_tests,'realisations.')
#*** GENERATE DATA ***#
uniform_values = np.zeros(num_tests)
gauss_values = np.zeros(num_tests)
rayleigh_values = np.zeros(num_tests)
laplace_values = np.zeros(num_tests)
for k in range(num_tests):
uniform_values[k] = rndm.ran1()
gauss_values[k] = rndm.gasdev()
rayleigh_values[k] = rndm.rayleigh()
laplace_values[k] = rndm.laplacian()
#*** HISTOGRAM DATA AND CALCULATE EXPECTED COUNTS ***#
uniform_bins = np.linspace(0,1,uniform_num_bins)
gauss_bins = np.linspace(-8,8,gauss_num_bins)
laplace_bins = np.linspace(-8,8,laplace_num_bins)
rayleigh_bins = np.linspace(0,10,rayleigh_num_bins)
uniform_hist = np.histogram(uniform_values,uniform_bins)
gauss_hist = np.histogram(gauss_values,gauss_bins)
rayleigh_hist = np.histogram(rayleigh_values,rayleigh_bins)
laplace_hist = np.histogram(laplace_values,laplace_bins)
uniform_expected = np.zeros(uniform_num_bins-1)
gauss_expected = np.zeros(gauss_num_bins-1)
rayleigh_expected = np.zeros(rayleigh_num_bins-1)
laplace_expected = np.zeros(laplace_num_bins-1)
for k in range(len(uniform_hist[0])):
uniform_expected[k] = num_tests / float(uniform_num_bins-1)
for k in range(len(gauss_hist[0])):
gauss_expected[k] = float(norm.cdf(gauss_hist[1][k+1])-norm.cdf(gauss_hist[1][k]))*num_tests
for k in range(len(rayleigh_hist[0])):
rayleigh_expected[k] = float(rayleigh.cdf(rayleigh_hist[1][k+1])-rayleigh.cdf(rayleigh_hist[1][k]))*num_tests
for k in range(len(laplace_hist[0])):
laplace_expected[k] = float(laplace.cdf(laplace_hist[1][k+1])-laplace.cdf(laplace_hist[1][k]))*num_tests
#*** PLOT HISTOGRAMS AND EXPECTATIONS TAKEN FROM SCIPY ***#
uniform_bins_center = uniform_bins[0:-1]+(uniform_bins[1]-uniform_bins[0]) / 2.0
gauss_bins_center = gauss_bins[0:-1]+(gauss_bins[1]-gauss_bins[0]) / 2.0
rayleigh_bins_center = rayleigh_bins[0:-1]+(rayleigh_bins[1]-rayleigh_bins[0]) / 2.0
laplace_bins_center = laplace_bins[0:-1]+(laplace_bins[1]-laplace_bins[0]) / 2.0
plt.figure(1)
plt.subplot(2,1,1)
plt.plot(uniform_bins_center,uniform_hist[0],'s--',uniform_bins_center,uniform_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Uniform: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(uniform_bins_center,uniform_hist[0] / uniform_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Uniform: Relative deviation to scipy')
plt.figure(2)
plt.subplot(2,1,1)
plt.plot(gauss_bins_center,gauss_hist[0],'s--',gauss_bins_center,gauss_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Gauss: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(gauss_bins_center,gauss_hist[0] / gauss_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Gauss: Relative deviation to scipy')
plt.figure(3)
plt.subplot(2,1,1)
plt.plot(rayleigh_bins_center,rayleigh_hist[0],'s--',rayleigh_bins_center,rayleigh_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Rayleigh: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(rayleigh_bins_center,rayleigh_hist[0] / rayleigh_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Rayleigh: Relative deviation to scipy')
plt.figure(4)
plt.subplot(2,1,1)
plt.plot(laplace_bins_center,laplace_hist[0],'s--',laplace_bins_center,laplace_expected,'o:')
plt.xlabel('Bins'), plt.ylabel('Count'), plt.title('Laplace: Distribution')
plt.legend(['histogram gr::random','calculation scipy'],loc=1)
plt.subplot(2,1,2)
plt.plot(laplace_bins_center,laplace_hist[0] / laplace_expected,'rs--')
plt.xlabel('Bins'), plt.ylabel('Relative deviation'), plt.title('Laplace: Relative deviation to scipy')
plt.show()
| gpl-3.0 |
jzt5132/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
iamgp/pyCa | pyCa/Graph.py | 1 | 2559 | from . import *
# Graphics Stuff
import matplotlib.pyplot as plt
class Graph(object):
"""docstring for Graph"""
def __init__(self, Experiment):
self.Experiment = Experiment
self.numberOfStimulantsAdded = 0
self.nameToUse = 0
def plot(self):
print ''
log(self.Experiment.name, colour="yellow")
log('==================', colour="yellow")
for i, col in self.Experiment.data.iteritems():
if i == 0:
col.name = "time"
if col.name == "time":
continue
fig, ax = plt.subplots(1)
plt.plot(self.Experiment.data.time, col, '-')
plt.title(col.name)
ax.set_ylim(
col.min() - (0.1 * col.min()), col.max() + (0.1 * col.max()))
self.nameToUse = 0
print ''
log(col.name, colour="red")
log('--------------------------------------', colour="red")
def onclick(event):
if self.numberOfStimulantsAdded == 0:
x1 = event.xdata
y1 = event.ydata
log(' > 1st point, adding x1:{} y1:{} to {}'.format(
x1, y1, self.Experiment.names[self.nameToUse]),
colour="black")
self.Experiment.currentCell.addFirstPoint(x1, y1)
self.numberOfStimulantsAdded = 1
elif self.numberOfStimulantsAdded == 1:
x2 = event.xdata
y2 = event.ydata
log(' > 2nd point, adding x2:{} y2:{} to {}'.format(
x2, y2, self.Experiment.names[self.nameToUse]),
colour="black")
self.Experiment.currentCell.addSecondPointWithName(
x2, y2, self.Experiment.names[self.nameToUse])
self.numberOfStimulantsAdded = 0
self.nameToUse = self.nameToUse + 1
fig.canvas.mpl_connect('button_press_event', onclick)
for t in self.Experiment.times:
plt.axvspan(t, t + 5, color='red', alpha=0.1)
plt.show()
self.Experiment.currentCell.cellname = col.name
self.Experiment.cells.append(self.Experiment.currentCell)
if self.Experiment.currentCell.describe() is not None:
log(self.Experiment.currentCell.describe(),
colour="black")
self.Experiment.currentCell = Cell()
| gpl-3.0 |
neuropoly/spinalcordtoolbox | spinalcordtoolbox/scripts/sct_maths.py | 1 | 20433 | #!/usr/bin/env python
#########################################################################################
#
# Perform mathematical operations on images
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: Julien Cohen-Adad, Sara Dupont
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import os
import sys
import pickle
import gzip
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import spinalcordtoolbox.math as sct_math
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.utils.shell import SCTArgumentParser, Metavar, list_type, display_viewer_syntax
from spinalcordtoolbox.utils.sys import init_sct, printv, set_global_loglevel
from spinalcordtoolbox.utils.fs import extract_fname
def get_parser():
parser = SCTArgumentParser(
description='Perform mathematical operations on images. Some inputs can be either a number or a 4d image or '
'several 3d images separated with ","'
)
mandatory = parser.add_argument_group("MANDATORY ARGUMENTS")
mandatory.add_argument(
"-i",
metavar=Metavar.file,
help="Input file. Example: data.nii.gz",
required=True)
mandatory.add_argument(
"-o",
metavar=Metavar.file,
help='Output file. Example: data_mean.nii.gz',
required=True)
optional = parser.add_argument_group("OPTIONAL ARGUMENTS")
optional.add_argument(
"-h",
"--help",
action="help",
help="Show this help message and exit")
basic = parser.add_argument_group('BASIC OPERATIONS')
basic.add_argument(
"-add",
metavar='',
nargs="+",
help='Add following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-sub",
metavar='',
nargs="+",
help='Subtract following input. Can be a number or an image.',
required=False)
basic.add_argument(
"-mul",
metavar='',
nargs="+",
help='Multiply by following input. Can be a number or multiple images (separated with space).',
required=False)
basic.add_argument(
"-div",
metavar='',
nargs="+",
help='Divide by following input. Can be a number or an image.',
required=False)
basic.add_argument(
'-mean',
help='Average data across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-rms',
help='Compute root-mean-squared across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
'-std',
help='Compute STD across dimension.',
required=False,
choices=('x', 'y', 'z', 't'))
basic.add_argument(
"-bin",
type=float,
metavar=Metavar.float,
help='Binarize image using specified threshold. Example: 0.5',
required=False)
thresholding = parser.add_argument_group("THRESHOLDING METHODS")
thresholding.add_argument(
'-otsu',
type=int,
metavar=Metavar.int,
help='Threshold image using Otsu algorithm (from skimage). Specify the number of bins (e.g. 16, 64, 128)',
required=False)
thresholding.add_argument(
"-adap",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Adaptive algorithm (from skimage). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-adap 7,0' corresponds to a block size of 7 and an "
"offset of 0.\n"
" - Block size: Odd size of pixel neighborhood which is used to calculate the threshold value. \n"
" - Offset: Constant subtracted from weighted mean of neighborhood to calculate the local threshold "
"value. Suggested offset is 0.",
required=False)
thresholding.add_argument(
"-otsu-median",
metavar=Metavar.list,
type=list_type(',', int),
help="R|Threshold image using Median Otsu algorithm (from dipy). Provide 2 values separated by ',' that "
"correspond to the parameters below. For example, '-otsu-median 3,5' corresponds to a filter size of 3 "
"repeated over 5 iterations.\n"
" - Size: Radius (in voxels) of the applied median filter.\n"
" - Iterations: Number of passes of the median filter.",
required=False)
thresholding.add_argument(
'-percent',
type=int,
help="Threshold image using percentile of its histogram.",
metavar=Metavar.int,
required=False)
thresholding.add_argument(
"-thr",
type=float,
help='Use following number to threshold image (zero below number).',
metavar=Metavar.float,
required=False)
mathematical = parser.add_argument_group("MATHEMATICAL MORPHOLOGY")
mathematical.add_argument(
'-dilate',
type=int,
metavar=Metavar.int,
help="Dilate binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-erode',
type=int,
metavar=Metavar.int,
help="Erode binary or greyscale image with specified size. If shape={'square', 'cube'}: size corresponds to the length of "
"an edge (size=1 has no effect). If shape={'disk', 'ball'}: size corresponds to the radius, not including "
"the center element (size=0 has no effect).",
required=False)
mathematical.add_argument(
'-shape',
help="R|Shape of the structuring element for the mathematical morphology operation. Default: ball.\n"
"If a 2D shape {'disk', 'square'} is selected, -dim must be specified.",
required=False,
choices=('square', 'cube', 'disk', 'ball'),
default='ball')
mathematical.add_argument(
'-dim',
type=int,
help="Dimension of the array which 2D structural element will be orthogonal to. For example, if you wish to "
"apply a 2D disk kernel in the X-Y plane, leaving Z unaffected, parameters will be: shape=disk, dim=2.",
required=False,
choices=(0, 1, 2))
filtering = parser.add_argument_group("FILTERING METHODS")
filtering.add_argument(
"-smooth",
metavar=Metavar.list,
type=list_type(',', float),
help='Gaussian smoothing filtering. Supply values for standard deviations in mm. If a single value is provided, '
'it will be applied to each axis of the image. If multiple values are provided, there must be one value '
'per image axis. (Examples: "-smooth 2.0,3.0,2.0" (3D image), "-smooth 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-laplacian',
metavar=Metavar.list,
type=list_type(',', float),
help='Laplacian filtering. Supply values for standard deviations in mm. If a single value is provided, it will '
'be applied to each axis of the image. If multiple values are provided, there must be one value per '
'image axis. (Examples: "-laplacian 2.0,3.0,2.0" (3D image), "-laplacian 2.0" (any-D image)).',
required=False)
filtering.add_argument(
'-denoise',
help='R|Non-local means adaptative denoising from P. Coupe et al. as implemented in dipy. Separate with ". Example: p=1,b=3\n'
' p: (patch radius) similar patches in the non-local means are searched for locally, inside a cube of side 2*p+1 centered at each voxel of interest. Default: p=1\n'
' b: (block radius) the size of the block to be used (2*b+1) in the blockwise non-local means implementation. Default: b=5 '
' Note, block radius must be smaller than the smaller image dimension: default value is lowered for small images)\n'
'To use default parameters, write -denoise 1',
required=False)
similarity = parser.add_argument_group("SIMILARITY METRIC")
similarity.add_argument(
'-mi',
metavar=Metavar.file,
help='Compute the mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mutual_info_score.html',
required=False)
similarity.add_argument(
'-minorm',
metavar=Metavar.file,
help='Compute the normalized mutual information (MI) between both input files (-i and -mi) as in: '
'http://scikit-learn.org/stable/modules/generated/sklearn.metrics.normalized_mutual_info_score.html',
required=False)
similarity.add_argument(
'-corr',
metavar=Metavar.file,
help='Compute the cross correlation (CC) between both input files (-i and -cc).',
required=False)
misc = parser.add_argument_group("MISC")
misc.add_argument(
'-symmetrize',
type=int,
help='Symmetrize data along the specified dimension.',
required=False,
choices=(0, 1, 2))
misc.add_argument(
'-type',
required=False,
help='Output type.',
choices=('uint8', 'int16', 'int32', 'float32', 'complex64', 'float64', 'int8', 'uint16', 'uint32', 'int64',
'uint64'))
optional.add_argument(
'-v',
metavar=Metavar.int,
type=int,
choices=[0, 1, 2],
default=1,
# Values [0, 1, 2] map to logging levels [WARNING, INFO, DEBUG], but are also used as "if verbose == #" in API
help="Verbosity. 0: Display only errors/warnings, 1: Errors/warnings + info messages, 2: Debug mode")
return parser
# MAIN
# ==========================================================================================
def main(argv=None):
"""
Main function
:param argv:
:return:
"""
parser = get_parser()
arguments = parser.parse_args(argv)
verbose = arguments.v
set_global_loglevel(verbose=verbose)
dim_list = ['x', 'y', 'z', 't']
fname_in = arguments.i
fname_out = arguments.o
output_type = arguments.type
# Open file(s)
im = Image(fname_in)
data = im.data # 3d or 4d numpy array
dim = im.dim
# run command
if arguments.otsu is not None:
param = arguments.otsu
data_out = sct_math.otsu(data, param)
elif arguments.adap is not None:
param = arguments.adap
data_out = sct_math.adap(data, param[0], param[1])
elif arguments.otsu_median is not None:
param = arguments.otsu_median
data_out = sct_math.otsu_median(data, param[0], param[1])
elif arguments.thr is not None:
param = arguments.thr
data_out = sct_math.threshold(data, param)
elif arguments.percent is not None:
param = arguments.percent
data_out = sct_math.perc(data, param)
elif arguments.bin is not None:
bin_thr = arguments.bin
data_out = sct_math.binarize(data, bin_thr=bin_thr)
elif arguments.add is not None:
data2 = get_data_or_scalar(arguments.add, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.sum(data_concat, axis=3)
elif arguments.sub is not None:
data2 = get_data_or_scalar(arguments.sub, data)
data_out = data - data2
elif arguments.laplacian is not None:
sigmas = arguments.laplacian
if len(sigmas) == 1:
sigmas = [sigmas for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -laplacian need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.laplacian(data, sigmas)
elif arguments.mul is not None:
data2 = get_data_or_scalar(arguments.mul, data)
data_concat = sct_math.concatenate_along_4th_dimension(data, data2)
data_out = np.prod(data_concat, axis=3)
elif arguments.div is not None:
data2 = get_data_or_scalar(arguments.div, data)
data_out = np.divide(data, data2)
elif arguments.mean is not None:
dim = dim_list.index(arguments.mean)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.mean(data, dim)
elif arguments.rms is not None:
dim = dim_list.index(arguments.rms)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.sqrt(np.mean(np.square(data.astype(float)), dim))
elif arguments.std is not None:
dim = dim_list.index(arguments.std)
if dim + 1 > len(np.shape(data)): # in case input volume is 3d and dim=t
data = data[..., np.newaxis]
data_out = np.std(data, dim, ddof=1)
elif arguments.smooth is not None:
sigmas = arguments.smooth
if len(sigmas) == 1:
sigmas = [sigmas[0] for i in range(len(data.shape))]
elif len(sigmas) != len(data.shape):
printv(parser.error('ERROR: -smooth need the same number of inputs as the number of image dimension OR only one input'))
# adjust sigma based on voxel size
sigmas = [sigmas[i] / dim[i + 4] for i in range(3)]
# smooth data
data_out = sct_math.smooth(data, sigmas)
elif arguments.dilate is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -dilate with 2D morphological kernel'))
data_out = sct_math.dilate(data, size=arguments.dilate, shape=arguments.shape, dim=arguments.dim)
elif arguments.erode is not None:
if arguments.shape in ['disk', 'square'] and arguments.dim is None:
printv(parser.error('ERROR: -dim is required for -erode with 2D morphological kernel'))
data_out = sct_math.erode(data, size=arguments.erode, shape=arguments.shape, dim=arguments.dim)
elif arguments.denoise is not None:
# parse denoising arguments
p, b = 1, 5 # default arguments
list_denoise = (arguments.denoise).split(",")
for i in list_denoise:
if 'p' in i:
p = int(i.split('=')[1])
if 'b' in i:
b = int(i.split('=')[1])
data_out = sct_math.denoise_nlmeans(data, patch_radius=p, block_radius=b)
elif arguments.symmetrize is not None:
data_out = (data + data[list(range(data.shape[0] - 1, -1, -1)), :, :]) / float(2)
elif arguments.mi is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.mi)
compute_similarity(im, im_2, fname_out, metric='mi', metric_full='Mutual information', verbose=verbose)
data_out = None
elif arguments.minorm is not None:
im_2 = Image(arguments.minorm)
compute_similarity(im, im_2, fname_out, metric='minorm', metric_full='Normalized Mutual information', verbose=verbose)
data_out = None
elif arguments.corr is not None:
# input 1 = from flag -i --> im
# input 2 = from flag -mi
im_2 = Image(arguments.corr)
compute_similarity(im, im_2, fname_out, metric='corr', metric_full='Pearson correlation coefficient', verbose=verbose)
data_out = None
# if no flag is set
else:
data_out = None
printv(parser.error('ERROR: you need to specify an operation to do on the input image'))
if data_out is not None:
# Write output
nii_out = Image(fname_in) # use header of input file
nii_out.data = data_out
nii_out.save(fname_out, dtype=output_type)
# TODO: case of multiple outputs
# assert len(data_out) == n_out
# if n_in == n_out:
# for im_in, d_out, fn_out in zip(nii, data_out, fname_out):
# im_in.data = d_out
# im_in.absolutepath = fn_out
# if arguments.w is not None:
# im_in.hdr.set_intent('vector', (), '')
# im_in.save()
# elif n_out == 1:
# nii[0].data = data_out[0]
# nii[0].absolutepath = fname_out[0]
# if arguments.w is not None:
# nii[0].hdr.set_intent('vector', (), '')
# nii[0].save()
# elif n_out > n_in:
# for dat_out, name_out in zip(data_out, fname_out):
# im_out = nii[0].copy()
# im_out.data = dat_out
# im_out.absolutepath = name_out
# if arguments.w is not None:
# im_out.hdr.set_intent('vector', (), '')
# im_out.save()
# else:
# printv(parser.usage.generate(error='ERROR: not the correct numbers of inputs and outputs'))
# display message
if data_out is not None:
display_viewer_syntax([fname_out], verbose=verbose)
else:
printv('\nDone! File created: ' + fname_out, verbose, 'info')
def get_data(list_fname):
"""
Get data from list of file names
:param list_fname:
:return: 3D or 4D numpy array.
"""
try:
nii = [Image(f_in) for f_in in list_fname]
except Exception as e:
printv(str(e), 1, 'error') # file does not exist, exit program
data0 = nii[0].data
data = nii[0].data
# check that every images have same shape
for i in range(1, len(nii)):
if not np.shape(nii[i].data) == np.shape(data0):
printv('\nWARNING: shape(' + list_fname[i] + ')=' + str(np.shape(nii[i].data)) + ' incompatible with shape(' + list_fname[0] + ')=' + str(np.shape(data0)), 1, 'warning')
printv('\nERROR: All input images must have same dimensions.', 1, 'error')
else:
data = sct_math.concatenate_along_4th_dimension(data, nii[i].data)
return data
def get_data_or_scalar(argument, data_in):
"""
Get data from list of file names (scenario 1) or scalar (scenario 2)
:param argument: list of file names of scalar
:param data_in: if argument is scalar, use data to get np.shape
:return: 3d or 4d numpy array
"""
# try to convert argument in float
try:
# build data2 with same shape as data
data_out = data_in[:, :, :] * 0 + float(argument[0])
# if conversion fails, it should be a string (i.e. file name)
except ValueError:
data_out = get_data(argument)
return data_out
def compute_similarity(img1: Image, img2: Image, fname_out: str, metric: str, metric_full: str, verbose):
"""
Sanitize input and compute similarity metric between two images data.
"""
if img1.data.size != img2.data.size:
raise ValueError(f"Input images don't have the same size! \nPlease use \"sct_register_multimodal -i im1.nii.gz -d im2.nii.gz -identity 1\" to put the input images in the same space")
res, data1_1d, data2_1d = sct_math.compute_similarity(img1.data, img2.data, metric=metric)
if verbose > 1:
matplotlib.use('Agg')
plt.plot(data1_1d, 'b')
plt.plot(data2_1d, 'r')
plt.title('Similarity: ' + metric_full + ' = ' + str(res))
plt.savefig('fig_similarity.png')
path_out, filename_out, ext_out = extract_fname(fname_out)
if ext_out not in ['.txt', '.pkl', '.pklz', '.pickle']:
raise ValueError(f"The output file should a text file or a pickle file. Received extension: {ext_out}")
if ext_out == '.txt':
with open(fname_out, 'w') as f:
f.write(metric_full + ': \n' + str(res))
elif ext_out == '.pklz':
pickle.dump(res, gzip.open(fname_out, 'wb'), protocol=2)
else:
pickle.dump(res, open(fname_out, 'w'), protocol=2)
if __name__ == "__main__":
init_sct()
main(sys.argv[1:])
| mit |
rafaelmds/fatiando | gallery/gridder/cutting.py | 6 | 1326 | """
Cutting a section from spacial data
-----------------------------------
The :func:`fatiando.gridder.cut` function extracts points from spatially
distributed data that are inside a given area. It doesn't matter whether or
not the points are on a regular grid.
"""
from fatiando import gridder
import matplotlib.pyplot as plt
import numpy as np
# Generate some synthetic data
area = (-100, 100, -60, 60)
x, y = gridder.scatter(area, 1000, seed=0)
data = np.sin(0.1*x)*np.cos(0.1*y)
# Select the data that fall inside "section"
section = [-40, 40, -25, 25]
# Tip: you pass more than one data array as input. Use this to cut multiple
# data sources (e.g., gravity + height + topography).
x_sub, y_sub, [data_sub] = gridder.cut(x, y, [data], section)
# Plot the original data besides the cut section
plt.figure(figsize=(8, 6))
plt.subplot(1, 2, 1)
plt.axis('scaled')
plt.title("Whole data")
plt.tricontourf(y, x, data, 30, cmap='RdBu_r')
plt.plot(y, x, 'xk')
x1, x2, y1, y2 = section
plt.plot([y1, y2, y2, y1, y1], [x1, x1, x2, x2, x1], '-k', linewidth=3)
plt.xlim(area[2:])
plt.ylim(area[:2])
plt.subplot(1, 2, 2)
plt.axis('scaled')
plt.title("Subsection")
plt.plot(y_sub, x_sub, 'xk')
plt.tricontourf(y_sub, x_sub, data_sub, 30, cmap='RdBu_r')
plt.xlim(section[2:])
plt.ylim(section[:2])
plt.tight_layout()
plt.show()
| bsd-3-clause |
danielhkl/matplotlib2tikz | matplotlib2tikz/color.py | 1 | 2761 | # -*- coding: utf-8 -*-
#
import matplotlib as mpl
import numpy
def mpl_color2xcolor(data, matplotlib_color):
'''Translates a matplotlib color specification into a proper LaTeX xcolor.
'''
# Convert it to RGBA.
my_col = numpy.array(mpl.colors.ColorConverter().to_rgba(matplotlib_color))
# If the alpha channel is exactly 0, then the color is really 'none'
# regardless of the RGB channels.
if my_col[-1] == 0.0:
return data, 'none', my_col
xcol = None
# RGB values (as taken from xcolor.dtx):
available_colors = {
'red': numpy.array([1, 0, 0]),
'green': numpy.array([0, 1, 0]),
'blue': numpy.array([0, 0, 1]),
'brown': numpy.array([0.75, 0.5, 0.25]),
'lime': numpy.array([0.75, 1, 0]),
'orange': numpy.array([1, 0.5, 0]),
'pink': numpy.array([1, 0.75, 0.75]),
'purple': numpy.array([0.75, 0, 0.25]),
'teal': numpy.array([0, 0.5, 0.5]),
'violet': numpy.array([0.5, 0, 0.5]),
'black': numpy.array([0, 0, 0]),
'darkgray': numpy.array([0.25, 0.25, 0.25]),
'gray': numpy.array([0.5, 0.5, 0.5]),
'lightgray': numpy.array([0.75, 0.75, 0.75]),
'white': numpy.array([1, 1, 1])
# The colors cyan, magenta, yellow, and olive are also
# predefined by xcolor, but their RGB approximation of the
# native CMYK values is not very good. Don't use them here.
}
available_colors.update(data['custom colors'])
# Check if it exactly matches any of the colors already available.
# This case is actually treated below (alpha==1), but that loop
# may pick up combinations with black before finding the exact
# match. Hence, first check all colors.
for name, rgb in available_colors.items():
if all(my_col[:3] == rgb):
xcol = name
return data, xcol, my_col
# Check if my_col is a multiple of a predefined color and 'black'.
for name, rgb in available_colors.items():
if name == 'black':
continue
if rgb[0] != 0.0:
alpha = my_col[0] / rgb[0]
elif rgb[1] != 0.0:
alpha = my_col[1] / rgb[1]
else:
assert rgb[2] != 0.0
alpha = my_col[2] / rgb[2]
# The cases 0.0 (my_col == black) and 1.0 (my_col == rgb) are
# already accounted for by checking in available_colors above.
if all(my_col[:3] == alpha * rgb) and 0.0 < alpha < 1.0:
xcol = name + ('!%r!black' % (alpha * 100))
return data, xcol, my_col
# Lookup failed, add it to the custom list.
xcol = 'color' + str(len(data['custom colors']))
data['custom colors'][xcol] = my_col[:3]
return data, xcol, my_col
| mit |
noahbenson/neuropythy | neuropythy/graphics/__init__.py | 1 | 1109 | ####################################################################################################
# neuropythy/graphics/__init__.py
# Simple tools for making matplotlib/pyplot graphics with neuropythy.
# By Noah C. Benson
'''
The neuropythy.graphics package contains definitions of the various tools for making plots with
cortical data. The primary entry point is the function cortex_plot.
'''
from .core import (
cmap_curvature,
cmap_polar_angle_sym, cmap_polar_angle_lh, cmap_polar_angle_rh, cmap_polar_angle,
cmap_theta_sym, cmap_theta_lh, cmap_theta_rh, cmap_theta,
cmap_eccentricity, cmap_log_eccentricity, cmap_radius, cmap_log_radius,
cmap_cmag, cmap_log_cmag, label_cmap,
vertex_curvature_color, vertex_weight,
vertex_angle, vertex_eccen, vertex_sigma, vertex_varea,
vertex_angle_color, vertex_eccen_color, vertex_sigma_color, vertex_varea_color,
angle_colors, eccen_colors, sigma_colors, radius_colors, varea_colors, to_rgba,
color_overlap, visual_field_legend, curvature_colors, cortex_plot, cortex_plot_colors,
ROIDrawer, trace_roi, scale_for_cmap)
| agpl-3.0 |
google/autocjk | src/model.py | 1 | 14838 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAN for generating CJK characters.
The vast majority of this code is adapted from the pix2pix GAN described in
https://www.tensorflow.org/tutorials/generative/pix2pix. Changes include the
specific tensor dimensions, some tuning of magic numbers, and some changes to
loss functions.
TODO(ambuc): This file has type annotations because they're useful for a human
reader, but the build system doesn't yet enforce them with a strictly-typed
python build rule.
"""
import time
from typing import List, Text, Tuple
from IPython import display
import matplotlib.pyplot as plt
import tensorflow as tf
_LAMBDA = 100
def _load_image(filename: Text) -> List[List[tf.Tensor]]:
"""Given the filename of a PNG, returns a list of three tensors: a, b, a+b.
Args:
filename: Path to a file. The file must be a PNG and greyscale and 256x256.
Returns:
A list of tensors: a, b, and a+b.
"""
image = tf.io.read_file(filename)
image = tf.image.decode_png(image, channels=1) # greyscale
# Our images have a width which is divisible by three.
w = tf.shape(image)[1] // 3
return [
tf.cast(image[:, n * w:(n + 1) * w, :], tf.float32) for n in range(3)
]
def make_datasets(files_glob: Text) -> Tuple[tf.data.Dataset, tf.data.Dataset]:
"""Makes the train/test datasets.
Args:
files_glob: A glob (like "/tmp/folder/*.png") of all the input images.
Returns:
A pair of train, test datasets of type tf.data.Dataset.
"""
ds = tf.data.Dataset.list_files(files_glob).map(
_load_image, num_parallel_calls=tf.data.AUTOTUNE).shuffle(400).batch(1)
train_dataset_a = ds.shard(num_shards=3, index=0)
train_dataset_b = ds.shard(num_shards=3, index=1)
train_ds = train_dataset_a.concatenate(train_dataset_b)
test_ds = ds.shard(num_shards=3, index=2)
return train_ds, test_ds
def _downsample(filters: int,
size: int,
apply_batchnorm: bool = True) -> tf.keras.Sequential:
"""Downsampler from https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_generator.
Args:
filters: The number of filters.
size: The size of the input tensor width at this step.
apply_batchnorm: Whether or not to apply batch normalization. Probably
should be false on the input layer, and true elsewhere.
Returns:
A sequential model.
"""
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2D(filters,
size,
strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.LeakyReLU())
return result
def _upsample(filters: int,
size: int,
apply_dropout: bool = False) -> tf.keras.Sequential:
"""Upsampler from https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_generator.
Args:
filters: The number of filters.
size: The size of the input tensor width at this step.
apply_dropout: Whether or not to apply dropout. Probably should be true for
the first few layers and false elsewhere.
Returns:
A sequential model.
"""
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters,
size,
strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
def make_generator() -> tf.keras.Model:
"""Creates a generator.
99% of this is copied directly from
https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_generator,
except for the input shape (now two channels, two greyscale images instead of
one RGB image) and output shape (one channel, one greyscale image instead of
one RGB image).
Returns:
a tf.keras.Model which returns a 256x256x1 tensor.
"""
inputs = tf.keras.layers.Input(shape=[256, 256, 2])
up_stack = [
_upsample(512, 4, apply_dropout=True), # (bs, 2, 2, 1024)
_upsample(512, 4, apply_dropout=True), # (bs, 4, 4, 1024)
_upsample(512, 4, apply_dropout=True), # (bs, 8, 8, 1024)
_upsample(512, 4), # (bs, 16, 16, 1024)
_upsample(256, 4), # (bs, 32, 32, 512)
_upsample(128, 4), # (bs, 64, 64, 256)
_upsample(64, 4), # (bs, 128, 128, 128)
]
x = inputs
skips = []
for down in [
_downsample(64, 4, apply_batchnorm=False), # (bs, 128, 128, 64)
_downsample(128, 4), # (bs, 64, 64, 128)
_downsample(256, 4), # (bs, 32, 32, 256)
_downsample(512, 4), # (bs, 16, 16, 512)
_downsample(512, 4), # (bs, 8, 8, 512)
_downsample(512, 4), # (bs, 4, 4, 512)
_downsample(512, 4), # (bs, 2, 2, 512)
_downsample(512, 4), # (bs, 1, 1, 512)
]:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = tf.keras.layers.Concatenate()([x, skip])
last = tf.keras.layers.Conv2DTranspose(
1, # one output channel, i.e. greyscale
4,
strides=2,
padding='same',
kernel_initializer=tf.random_normal_initializer(0., 0.02),
activation='tanh') # (bs, 256, 256, 3)
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x)
def generator_loss(loss_object: tf.keras.losses.Loss, disc_generated_output,
gen_output, target):
gan_loss = loss_object(tf.ones_like(disc_generated_output),
disc_generated_output)
# mean absolute error
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
total_gen_loss = gan_loss + (_LAMBDA * l1_loss)
return total_gen_loss, gan_loss, l1_loss
def make_discriminator() -> tf.keras.Model:
"""Returns a discriminator.
This is 99% the same as
https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_discriminator,
except that the shape of the input and output tensors are different.
Returns:
A tf.keras.model which accepts a 256x256x2 tensor and compares it to a
target 256x256x1 tensor.
"""
initializer = tf.random_normal_initializer(0., 0.02)
input_img = tf.keras.layers.Input(shape=[256, 256, 2], name='input_image')
target_img = tf.keras.layers.Input(shape=[256, 256, 1],
name='target_image')
x = tf.keras.layers.concatenate([input_img,
target_img]) # (bs, 256, 256, channels*2)
down1 = _downsample(64, 4, False)(x) # (bs, 128, 128, 64)
down2 = _downsample(128, 4)(down1) # (bs, 64, 64, 128)
down3 = _downsample(256, 4)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(512,
4,
strides=1,
kernel_initializer=initializer,
use_bias=False)(
zero_pad1) # (bs, 31, 31, 512)
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(
leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1,
4,
strides=1,
kernel_initializer=initializer)(
zero_pad2) # (bs, 30, 30, 1)
return tf.keras.Model(inputs=[input_img, target_img], outputs=last)
def discriminator_loss(loss_object: tf.keras.losses.Loss, disc_real_output,
disc_generated_output) -> float:
"""Returns discriminator loss.
100% the same as
https://www.tensorflow.org/tutorials/generative/pix2pix#build_the_discriminator.
Args:
loss_object: A reusable loss_object of type
tf.keras.losses.BinaryCrossentropy.
disc_real_output: A set of real images.
disc_generated_output: A set of generator images.
Returns:
The sum of some loss functions.
"""
real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = loss_object(tf.zeros_like(disc_generated_output),
disc_generated_output)
return real_loss + generated_loss
def generate_images(model: tf.keras.Model, input_a: tf.Tensor,
input_b: tf.Tensor, target: tf.Tensor) -> None:
"""In Colab, prints [a | b | real(a,b) | predicted(a,b)] to the display.
Args:
model: The generator to use.
input_a: the LHS image.
input_b: the RHS image.
target: The real(a,b) composition.
"""
x = tf.concat([input_a, input_b], 3)
x = tf.reshape(x, [256, 256, 2])
prediction = model(x[tf.newaxis, ...], training=True)
images = [input_a[0], input_b[0], target[0], prediction[0]]
fig, axes = plt.subplots(1, 4)
titles = [
'Input Image A', 'Input Image B', 'Ground Truth', 'Predicted Image'
]
for image, axis, title in zip(images, axes, titles):
axis.set_title(title)
axis.imshow(image[:, :, 0])
axis.axis('off')
fig.show()
@tf.function
def train_step(generator: tf.keras.Model,
generator_optimizer: tf.keras.optimizers.Optimizer,
discriminator: tf.keras.Model,
discriminator_optimizer: tf.keras.optimizers.Optimizer,
loss_object: tf.keras.losses.Loss, inp_a: tf.Tensor,
inp_b: tf.Tensor, target: tf.Tensor, epoch: int,
summary_writer: tf.summary.SummaryWriter) -> None:
"""Trains the models for one (1) epoch.
See https://www.tensorflow.org/tutorials/generative/pix2pix#training.
Args:
generator: A generator model,
generator_optimizer: and an optimizer for the generator.
discriminator: A discriminator model,
discriminator_optimizer: and an optimizer for the generator.
loss_object: A reusable BinaryCrossentropy object.
inp_a: A full-width image of the left-most component.
inp_b: A full-width image of the right-most component.
target: The human-authored image of the a+b character.
epoch: The index of the epoch we're in.
summary_writer: A SummaryWriter object for writing.... summaries.
"""
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
inp_x = tf.concat([inp_a, inp_b], 3)
gen_output = generator(inp_x, training=True)
disc_real_output = discriminator([inp_x, target], training=True)
disc_generated_output = discriminator([inp_x, gen_output],
training=True)
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(
loss_object, disc_generated_output, gen_output, target)
disc_loss = discriminator_loss(loss_object, disc_real_output,
disc_generated_output)
# TODO(ambuc): Should this simply be gen_l1_loss?
generator_gradients = gen_tape.gradient(gen_total_loss,
generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(
disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(
zip(generator_gradients, generator.trainable_variables))
discriminator_optimizer.apply_gradients(
zip(discriminator_gradients, discriminator.trainable_variables))
with summary_writer.as_default():
tf.summary.scalar('gen_total_loss', gen_total_loss, step=epoch)
tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=epoch)
tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=epoch)
tf.summary.scalar('disc_loss', disc_loss, step=epoch)
def fit(generator: tf.keras.Model,
generator_optimizer: tf.keras.optimizers.Optimizer,
discriminator: tf.keras.Model,
discriminator_optimizer: tf.keras.optimizers.Optimizer,
loss_object: tf.keras.losses.Loss, train_ds: tf.data.Dataset,
epochs: int, test_ds: tf.data.Dataset, checkpoint: tf.train.Checkpoint,
checkpoint_prefix: Text,
summary_writer: tf.summary.SummaryWriter) -> None:
"""Runs for |epochs| and trains the models.
Args:
generator: A generator model,
generator_optimizer: and an optimizer for the generator.
discriminator: A discriminator model,
discriminator_optimizer: and an optimizer for the generator.
loss_object: A reusable BinaryCrossentropy object.
train_ds:
epochs: The number of epochs to train for.
test_ds:
checkpoint:
checkpoint_prefix:
summary_writer: A SummaryWriter object for writing.... summaries.
"""
for epoch in range(epochs):
start = time.time()
display.clear_output(wait=True)
for a, b, ab in test_ds.take(1):
generate_images(generator, a, b, ab)
print('Epoch: ', epoch)
for n, (inp_a, inp_b, target) in train_ds.enumerate():
print('.', end='')
if (n + 1) % 100 == 0:
print()
train_step(generator, generator_optimizer, discriminator,
discriminator_optimizer, loss_object, inp_a, inp_b,
target, epoch, summary_writer)
print()
checkpoint.save(file_prefix=checkpoint_prefix)
print('Time taken for epoch {} is {} sec\n'.format(
epoch + 1,
time.time() - start))
checkpoint.save(file_prefix=checkpoint_prefix)
| apache-2.0 |
nikitasingh981/scikit-learn | examples/preprocessing/plot_scaling_importance.py | 45 | 5269 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Importance of Feature Scaling
=========================================================
Feature scaling though standardization (or Z-score normalization)
can be an important preprocessing step for many machine learning
algorithms. Standardization involves rescaling the features such
that they have the properties of a standard normal distribution
with a mean of zero and a standard deviation of one.
While many algorithms (such as SVM, K-nearest neighbors, and logistic
regression) require features to be normalized, intuitively we can
think of Principle Component Analysis (PCA) as being a prime example
of when normalization is important. In PCA we are interested in the
components that maximize the variance. If one component (e.g. human
height) varies less than another (e.g. weight) because of their
respective scales (meters vs. kilos), PCA might determine that the
direction of maximal variance more closely corresponds with the
'weight' axis, if those features are not scaled. As a change in
height of one meter can be considered much more important than the
change in weight of one kilogram, this is clearly incorrect.
To illustrate this, PCA is performed comparing the use of data with
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` applied,
to unscaled data. The results are visualized and a clear difference noted.
The 1st principal component in the unscaled set can be seen. It can be seen
that feature #13 dominates the direction, being a whole two orders of
magnitude above the other features. This is contrasted when observing
the principal component for the scaled version of the data. In the scaled
version, the orders of magnitude are roughly the same across all the features.
The dataset used is the Wine Dataset available at UCI. This dataset
has continuous features that are heterogeneous in scale due to differing
properties that they measure (i.e alcohol content, and malic acid).
The transformed data is then used to train a naive Bayes classifier, and a
clear difference in prediction accuracies is observed wherein the dataset
which is scaled before PCA vastly outperforms the unscaled version.
"""
from __future__ import print_function
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.naive_bayes import GaussianNB
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.datasets import load_wine
from sklearn.pipeline import make_pipeline
print(__doc__)
# Code source: Tyler Lanigan <tylerlanigan@gmail.com>
# Sebastian Raschka <mail@sebastianraschka.com>
# License: BSD 3 clause
RANDOM_STATE = 42
FIG_SIZE = (10, 7)
features, target = load_wine(return_X_y=True)
# Make a train/test split using 30% test size
X_train, X_test, y_train, y_test = train_test_split(features, target,
test_size=0.30,
random_state=RANDOM_STATE)
# Fit to data and predict using pipelined GNB and PCA.
unscaled_clf = make_pipeline(PCA(n_components=2), GaussianNB())
unscaled_clf.fit(X_train, y_train)
pred_test = unscaled_clf.predict(X_test)
# Fit to data and predict using pipelined scaling, GNB and PCA.
std_clf = make_pipeline(StandardScaler(), PCA(n_components=2), GaussianNB())
std_clf.fit(X_train, y_train)
pred_test_std = std_clf.predict(X_test)
# Show prediction accuracies in scaled and unscaled data.
print('\nPrediction accuracy for the normal test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test)))
print('\nPrediction accuracy for the standardized test dataset with PCA')
print('{:.2%}\n'.format(metrics.accuracy_score(y_test, pred_test_std)))
# Extract PCA from pipeline
pca = unscaled_clf.named_steps['pca']
pca_std = std_clf.named_steps['pca']
# Show first principal componenets
print('\nPC 1 without scaling:\n', pca.components_[0])
print('\nPC 1 with scaling:\n', pca_std.components_[0])
# Scale and use PCA on X_train data for visualization.
scaler = std_clf.named_steps['standardscaler']
X_train_std = pca_std.transform(scaler.transform(X_train))
# visualize standardized vs. untouched dataset with PCA performed
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=FIG_SIZE)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax1.scatter(X_train[y_train == l, 0], X_train[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
for l, c, m in zip(range(0, 3), ('blue', 'red', 'green'), ('^', 's', 'o')):
ax2.scatter(X_train_std[y_train == l, 0], X_train_std[y_train == l, 1],
color=c,
label='class %s' % l,
alpha=0.5,
marker=m
)
ax1.set_title('Training dataset after PCA')
ax2.set_title('Standardized training dataset after PCA')
for ax in (ax1, ax2):
ax.set_xlabel('1st principal component')
ax.set_ylabel('2nd principal component')
ax.legend(loc='upper right')
ax.grid()
plt.tight_layout()
plt.show()
| bsd-3-clause |
loganlinn/mlia | resources/Ch10/kMeans.py | 3 | 6419 | '''
Created on Feb 16, 2011
k Means Clustering for Ch10 of Machine Learning in Action
@author: Peter Harrington
'''
from numpy import *
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float,curLine) #map all elements to float()
dataMat.append(fltLine)
return dataMat
def distEclud(vecA, vecB):
return sqrt(sum(power(vecA - vecB, 2))) #la.norm(vecA-vecB)
def randCent(dataSet, k):
n = shape(dataSet)[1]
centroids = mat(zeros((k,n)))#create centroid mat
for j in range(n):#create random cluster centers, within bounds of each dimension
minJ = min(dataSet[:,j])
rangeJ = float(max(dataSet[:,j]) - minJ)
centroids[:,j] = mat(minJ + rangeJ * random.rand(k,1))
return centroids
def kMeans(dataSet, k, distMeas=distEclud, createCent=randCent):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))#create mat to assign data points
#to a centroid, also holds SE of each point
centroids = createCent(dataSet, k)
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m):#for each data point assign it to the closest centroid
minDist = inf; minIndex = -1
for j in range(k):
distJI = distMeas(centroids[j,:],dataSet[i,:])
if distJI < minDist:
minDist = distJI; minIndex = j
if clusterAssment[i,0] != minIndex: clusterChanged = True
clusterAssment[i,:] = minIndex,minDist**2
print centroids
for cent in range(k):#recalculate centroids
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]#get all the point in this cluster
centroids[cent,:] = mean(ptsInClust, axis=0) #assign centroid to mean
return centroids, clusterAssment
def biKmeans(dataSet, k, distMeas=distEclud):
m = shape(dataSet)[0]
clusterAssment = mat(zeros((m,2)))
centroid0 = mean(dataSet, axis=0).tolist()[0]
centList =[centroid0] #create a list with one centroid
for j in range(m):#calc initial Error
clusterAssment[j,1] = distMeas(mat(centroid0), dataSet[j,:])**2
while (len(centList) < k):
lowestSSE = inf
for i in range(len(centList)):
ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A==i)[0],:]#get the data points currently in cluster i
centroidMat, splitClustAss = kMeans(ptsInCurrCluster, 2, distMeas)
sseSplit = sum(splitClustAss[:,1])#compare the SSE to the currrent minimum
sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A!=i)[0],1])
print "sseSplit, and notSplit: ",sseSplit,sseNotSplit
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[nonzero(bestClustAss[:,0].A == 1)[0],0] = len(centList) #change 1 to 3,4, or whatever
bestClustAss[nonzero(bestClustAss[:,0].A == 0)[0],0] = bestCentToSplit
print 'the bestCentToSplit is: ',bestCentToSplit
print 'the len of bestClustAss is: ', len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0,:].tolist()[0]#replace a centroid with two best centroids
centList.append(bestNewCents[1,:].tolist()[0])
clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:]= bestClustAss#reassign new clusters, and SSE
return mat(centList), clusterAssment
import urllib
import json
def geoGrab(stAddress, city):
apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder
params = {}
params['flags'] = 'J'#JSON return type
params['appid'] = 'aaa0VN6k'
params['location'] = '%s %s' % (stAddress, city)
url_params = urllib.urlencode(params)
yahooApi = apiStem + url_params #print url_params
print yahooApi
c=urllib.urlopen(yahooApi)
return json.loads(c.read())
from time import sleep
def massPlaceFind(fileName):
fw = open('places.txt', 'w')
for line in open(fileName).readlines():
line = line.strip()
lineArr = line.split('\t')
retDict = geoGrab(lineArr[1], lineArr[2])
if retDict['ResultSet']['Error'] == 0:
lat = float(retDict['ResultSet']['Results'][0]['latitude'])
lng = float(retDict['ResultSet']['Results'][0]['longitude'])
print "%s\t%f\t%f" % (lineArr[0], lat, lng)
fw.write('%s\t%f\t%f\n' % (line, lat, lng))
else: print "error fetching"
sleep(1)
fw.close()
def distSLC(vecA, vecB):#Spherical Law of Cosines
a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180)
b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
cos(pi * (vecB[0,0]-vecA[0,0]) /180)
return arccos(a + b)*6371.0 #pi is imported with numpy
import matplotlib
import matplotlib.pyplot as plt
def clusterClubs(numClust=5):
datList = []
for line in open('places.txt').readlines():
lineArr = line.split('\t')
datList.append([float(lineArr[4]), float(lineArr[3])])
datMat = mat(datList)
myCentroids, clustAssing = biKmeans(datMat, numClust, distMeas=distSLC)
fig = plt.figure()
rect=[0.1,0.1,0.8,0.8]
scatterMarkers=['s', 'o', '^', '8', 'p', \
'd', 'v', 'h', '>', '<']
axprops = dict(xticks=[], yticks=[])
ax0=fig.add_axes(rect, label='ax0', **axprops)
imgP = plt.imread('Portland.png')
ax0.imshow(imgP)
ax1=fig.add_axes(rect, label='ax1', frameon=False)
for i in range(numClust):
ptsInCurrCluster = datMat[nonzero(clustAssing[:,0].A==i)[0],:]
markerStyle = scatterMarkers[i % len(scatterMarkers)]
ax1.scatter(ptsInCurrCluster[:,0].flatten().A[0], ptsInCurrCluster[:,1].flatten().A[0], marker=markerStyle, s=90)
ax1.scatter(myCentroids[:,0].flatten().A[0], myCentroids[:,1].flatten().A[0], marker='+', s=300)
plt.show()
| epl-1.0 |
frrp/trading-with-python | cookbook/getDataFromYahooFinance.py | 77 | 1391 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 16 18:37:23 2011
@author: jev
"""
from urllib import urlretrieve
from urllib2 import urlopen
from pandas import Index, DataFrame
from datetime import datetime
import matplotlib.pyplot as plt
sDate = (2005,1,1)
eDate = (2011,10,1)
symbol = 'SPY'
fName = symbol+'.csv'
try: # try to load saved csv file, otherwise get from the net
fid = open(fName)
lines = fid.readlines()
fid.close()
print 'Loaded from ' , fName
except Exception as e:
print e
urlStr = 'http://ichart.finance.yahoo.com/table.csv?s={0}&a={1}&b={2}&c={3}&d={4}&e={5}&f={6}'.\
format(symbol.upper(),sDate[1]-1,sDate[2],sDate[0],eDate[1]-1,eDate[2],eDate[0])
print 'Downloading from ', urlStr
urlretrieve(urlStr,symbol+'.csv')
lines = urlopen(urlStr).readlines()
dates = []
data = [[] for i in range(6)]
#high
# header : Date,Open,High,Low,Close,Volume,Adj Close
for line in lines[1:]:
fields = line.rstrip().split(',')
dates.append(datetime.strptime( fields[0],'%Y-%m-%d'))
for i,field in enumerate(fields[1:]):
data[i].append(float(field))
idx = Index(dates)
data = dict(zip(['open','high','low','close','volume','adj_close'],data))
# create a pandas dataframe structure
df = DataFrame(data,index=idx).sort()
df.plot(secondary_y=['volume'])
| bsd-3-clause |
elijah513/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
jenfly/atmos-read | scripts/merra-replace-data.py | 1 | 5275 | """
Replace corrupted data files with daily data re-downloaded with wget
"""
import sys
sys.path.append('/home/jwalker/dynamics/python/atmos-tools')
sys.path.append('/home/jwalker/dynamics/python/atmos-read')
import os
import shutil
import xarray as xray
import numpy as np
import collections
import time
import matplotlib.pyplot as plt
import pandas as pd
import atmos as atm
import precipdat
import merra
# ----------------------------------------------------------------------
datadir = '/net/eady/data1/jwalker/datastore/merra2/wget/'
savedir = '/net/eady/data1/jwalker/datastore/merra2/merged/'
probdata = pd.read_csv('scripts/merra_urls/merge_data.csv', index_col=0)
# For each corrupted data file:
# - load the corrupted data file
# - load the new downloaded file for the problem day
# - calculate d/dp and other stuff
# - merge the data for the affected day
# - save into data file for the year
def latlon_filestr(lat1, lat2, lon1, lon2):
"""Return nicely formatted string for lat-lon range."""
latstr = atm.latlon_str(lat1, lat2, 'lat')
lonstr = atm.latlon_str(lon1, lon2, 'lon')
return lonstr + '_' + latstr
def latlon_data(var, lat1, lat2, lon1, lon2, plev=None):
"""Extract lat-lon subset of data."""
name = var.name
varnm = name
subset_dict = {'lat' : (lat1, lat2), 'lon' : (lon1, lon2)}
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
if plev is not None:
name = name + '%d' % plev
subset_dict['plev'] = (plev, plev)
var = atm.subset(var, subset_dict, copy=False, squeeze=True)
var.name = name
var.attrs['filestr'] = '%s_%s' % (name, latlonstr)
var.attrs['varnm'] = varnm
return var
def pgradient(var, lat1, lat2, lon1, lon2, plev):
"""Return d/dp of a lat-lon variable."""
pwidth = 100
p1, p2 = plev - pwidth, plev + pwidth
var = atm.subset(var, {'lat' : (lat1, lat2), 'lon' : (lon1, lon2),
'plev' : (p1, p2)}, copy=False, squeeze=True)
latlonstr = latlon_filestr(lat1, lat2, lon1, lon2)
attrs = var.attrs
pname = atm.get_coord(var, 'plev', 'name')
pdim = atm.get_coord(var, 'plev', 'dim')
pres = var[pname]
pres = atm.pres_convert(pres, pres.attrs['units'], 'Pa')
dvar_dp = atm.gradient(var, pres, axis=pdim)
dvar_dp = atm.subset(dvar_dp, {pname : (plev, plev)}, copy=False,
squeeze=True)
varnm = 'D%sDP' % var.name
name = '%s%d' % (varnm, plev)
dvar_dp.name = name
attrs['long_name'] = 'd/dp of ' + var.attrs['long_name']
attrs['standard_name'] = 'd/dp of ' + var.attrs['standard_name']
attrs['units'] = ('(%s)/Pa' % attrs['units'])
attrs[pname] = plev
attrs['filestr'] = '%s_%s' % (name, latlonstr)
attrs['varnm'] = varnm
dvar_dp.attrs = attrs
return dvar_dp
def var_calcs(filenm, varnm, plev, latlon=(-90, 90, 40, 120)):
"""Process a single variable from a single day."""
lat1, lat2, lon1, lon2 = latlon
if varnm == 'DUDP':
nm, dp = 'U', True
elif varnm == 'DOMEGADP':
nm, dp = 'OMEGA', True
else:
nm, dp = varnm, False
with xray.open_dataset(filenm) as ds:
var = ds[nm].load()
if dp:
print('Computing d/dp')
var = pgradient(var, lat1, lat2, lon1, lon2, plev)
else:
var = latlon_data(var, lat1, lat2, lon1, lon2, plev)
return var
def process_row(row, datadir, savedir):
filenm1 = row['filename']
year = row['year']
varnm = row['varnm']
plev = row['plev']
jday = row['jday']
filenm2 = datadir + row['datfile']
savefile1 = filenm1
savefile2 = savedir + os.path.split(filenm1)[1]
print('%d, %s, plev=%d' % (year, varnm, plev))
print('Reading original data from ' + filenm1)
with xray.open_dataset(filenm1) as ds:
var1 = ds[varnm].load()
print('Processing new data from ' + filenm2)
var2 = var_calcs(filenm2, varnm, plev)
print('Merging data for jday %d' % jday)
var = var1.copy()
ind = jday - 1
days = atm.get_coord(var1, 'day')
if not days[ind] == jday:
raise ValueError('Days not indexed from 1, need to edit code to handle')
var[ind] = var2
print('Saving to ' + savefile1)
var.to_netcdf(savefile1)
print('Saving to ' + savefile2)
var.to_netcdf(savefile2)
data = {'orig' : var1, 'new' : var2, 'merged' : var}
return data
# Make a copy of each of the original files -- only run this code once!
# for filenm in probdata['filename']:
# shutil.copyfile(filenm, filenm.replace('.nc', '_orig.nc'))
for i, row in probdata.iterrows():
data = process_row(row, datadir, savedir)
# Plot data to check
def plot_data(probdata, savedir, i):
row = probdata.iloc[i]
filenm = row['filename']
filenm = savedir + os.path.split(filenm)[1]
jday = row['jday']
varnm = row['varnm']
with xray.open_dataset(filenm) as ds:
var = ds[varnm].load()
plt.figure(figsize=(16, 8))
plt.suptitle(os.path.split(filenm)[1])
plt.subplot(1, 3, 1)
atm.pcolor_latlon(var.sel(day=(jday-1)))
plt.title(jday - 1)
plt.subplot(1, 3, 2)
atm.pcolor_latlon(var.sel(day=jday))
plt.title(jday)
plt.subplot(1, 3, 3)
atm.pcolor_latlon(var.sel(day=(jday+1)))
plt.title(jday + 1) | mit |
ezietsman/msc-thesis | images/makeunflat2.py | 1 | 1059 | from pylab import *
import astronomy as ast
# to format the labels better
from matplotlib.ticker import FormatStrFormatter
fmt = FormatStrFormatter('%1.2g') # or whatever
X1 = load('ec2117ans_1_c.dat')
x1 = X1[:,0]
y1 = 10**(X1[:,2]/(-2.5))
y1 /= average(y1)
T0 = 2453964.3307097
P = 0.1545255
figure(figsize=(6,4))
subplots_adjust(hspace=0.6,left=0.16)
ax = subplot(211)
#plot(x1,y1,'.')
scatter((x1-T0)/P,y1,s=0.8,faceted=False)
xlabel('Orbital Phase')
ylabel('Intensity')
title('Original Lightcurve')
#ylim(min(y1)-0.0000005,max(y1)+0.0000005)
ax.yaxis.set_major_formatter(fmt)
ax = subplot(212)
x2,y2 = ast.signal.dft(x1,y1,0,7000,1)
plot(x2,y2,'k-')
xlabel('Frequency (cycles/day)')
ylabel('Amplitude')
#vlines(3560,0.000000025,0.00000003,color='k',linestyle='solid')
#vlines(950,0.000000025,0.00000003,color='k',linestyle='solid')
#text(3350,0.000000035,'DNO',fontsize=10)
#text(700,0.000000035,'lpDNO',fontsize=10)
xlim(0,7000)
ylim(0,0.004)
title('Periodogram')
#ax.yaxis.set_major_formatter(fmt)
savefig('unflattened.png')
show()
| mit |
Parallel-in-Time/pySDC | pySDC/playgrounds/Allen_Cahn/AllenCahn_contracting_circle_standard_integrators.py | 1 | 5930 | import time
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
from pySDC.implementations.datatype_classes.mesh import mesh, imex_mesh
from pySDC.implementations.problem_classes.AllenCahn_2D_FD import allencahn_fullyimplicit, allencahn_semiimplicit
# http://www.personal.psu.edu/qud2/Res/Pre/dz09sisc.pdf
def setup_problem():
problem_params = dict()
problem_params['nu'] = 2
problem_params['nvars'] = (128, 128)
problem_params['eps'] = 0.04
problem_params['newton_maxiter'] = 100
problem_params['newton_tol'] = 1E-07
problem_params['lin_tol'] = 1E-08
problem_params['lin_maxiter'] = 100
problem_params['radius'] = 0.25
return problem_params
def run_implicit_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
u_new = problem.solve_system(rhs=u, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_imex_Euler(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_semiimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=imex_mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0) / dt)
startt = time.time()
t = t0
for n in range(nsteps):
f = problem.eval_f(u, t)
rhs = u + dt * f.expl
u_new = problem.solve_system(rhs=rhs, factor=dt, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def run_CrankNicholson(t0, dt, Tend):
"""
Routine to run particular SDC variant
Args:
Tend (float): end time for dumping
"""
problem = allencahn_fullyimplicit(problem_params=setup_problem(), dtype_u=mesh, dtype_f=mesh)
u = problem.u_exact(t0)
radius = []
exact_radius = []
nsteps = int((Tend - t0)/dt)
startt = time.time()
t = t0
for n in range(nsteps):
rhs = u + dt / 2 * problem.eval_f(u, t)
u_new = problem.solve_system(rhs=rhs, factor=dt / 2, u0=u, t=t)
u = u_new
t += dt
r, re = compute_radius(u, problem.dx, t, problem.params.radius)
radius.append(r)
exact_radius.append(re)
print(' ... done with time = %6.4f, step = %i / %i' % (t, n + 1, nsteps))
print('Time to solution: %6.4f sec.' % (time.time() - startt))
fname = 'data/AC_reference_Tend{:.1e}'.format(Tend) + '.npz'
loaded = np.load(fname)
uref = loaded['uend']
err = np.linalg.norm(uref - u, np.inf)
print('Error vs. reference solution: %6.4e' % err)
return err, radius, exact_radius
def compute_radius(u, dx, t, init_radius):
c = np.count_nonzero(u >= 0.0)
radius = np.sqrt(c / np.pi) * dx
exact_radius = np.sqrt(max(init_radius ** 2 - 2.0 * t, 0))
return radius, exact_radius
def plot_radius(xcoords, exact_radius, radii):
fig, ax = plt.subplots()
plt.plot(xcoords, exact_radius, color='k', linestyle='--', linewidth=1, label='exact')
for type, radius in radii.items():
plt.plot(xcoords, radius, linestyle='-', linewidth=2, label=type)
ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%1.2f'))
ax.set_ylabel('radius')
ax.set_xlabel('time')
ax.grid()
ax.legend(loc=3)
fname = 'data/AC_contracting_circle_standard_integrators'
plt.savefig('{}.pdf'.format(fname), bbox_inches='tight')
# plt.show()
def main_radius(cwd=''):
"""
Main driver
Args:
cwd (str): current working directory (need this for testing)
"""
# setup parameters "in time"
t0 = 0.0
dt = 0.001
Tend = 0.032
radii = {}
_, radius, exact_radius = run_implicit_Euler(t0=t0, dt=dt, Tend=Tend)
radii['implicit-Euler'] = radius
_, radius, exact_radius = run_imex_Euler(t0=t0, dt=dt, Tend=Tend)
radii['imex-Euler'] = radius
_, radius, exact_radius = run_CrankNicholson(t0=t0, dt=dt, Tend=Tend)
radii['CrankNicholson'] = radius
xcoords = [t0 + i * dt for i in range(int((Tend - t0) / dt))]
plot_radius(xcoords, exact_radius, radii)
def main_error(cwd=''):
t0 = 0
Tend = 0.032
errors = {}
# err, _, _ = run_implicit_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['implicit-Euler'] = err
# err, _, _ = run_imex_Euler(t0=t0, dt=0.001/512, Tend=Tend)
# errors['imex-Euler'] = err
err, _, _ = run_CrankNicholson(t0=t0, dt=0.001/64, Tend=Tend)
errors['CrankNicholson'] = err
if __name__ == "__main__":
main_error()
# main_radius()
| bsd-2-clause |
fluxcapacitor/source.ml | jupyterhub.ml/notebooks/train_deploy/zz_under_construction/zz_old/TensorFlow/Word2Vec/word2vec_basic.py | 8 | 8995 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import os
import random
import zipfile
import numpy as np
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
# Step 1: Download the data.
url = 'http://mattmahoney.net/dc/'
def maybe_download(filename, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
filename = maybe_download('text8.zip', 31344016)
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words"""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
words = read_data(filename)
print('Data size', len(words))
# Step 2: Build the dictionary and replace rare words with UNK token.
vocabulary_size = 50000
def build_dataset(words):
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(vocabulary_size - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
del words # Hint to reduce memory.
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10], [reverse_dictionary[i] for i in data[:10]])
data_index = 0
# Step 3: Function to generate a training batch for the skip-gram model.
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # target label at the center of the buffer
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels
batch, labels = generate_batch(batch_size=8, num_skips=2, skip_window=1)
for i in range(8):
print(batch[i], reverse_dictionary[batch[i]],
'->', labels[i, 0], reverse_dictionary[labels[i, 0]])
# Step 4: Build and train a skip-gram model.
batch_size = 128
embedding_size = 128 # Dimension of the embedding vector.
skip_window = 1 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels,
num_sampled, vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.initialize_all_variables()
# Step 5: Begin training.
num_steps = 100001
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print("Initialized")
average_loss = 0
for step in xrange(num_steps):
batch_inputs, batch_labels = generate_batch(
batch_size, num_skips, skip_window)
feed_dict = {train_inputs : batch_inputs, train_labels : batch_labels}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print("Average loss at step ", step, ": ", average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in xrange(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log_str = "Nearest to %s:" % valid_word
for k in xrange(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = "%s %s," % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
# Step 6: Visualize the embeddings.
def plot_with_labels(low_dim_embs, labels, filename='tsne.png'):
assert low_dim_embs.shape[0] >= len(labels), "More labels than embeddings"
plt.figure(figsize=(18, 18)) #in inches
for i, label in enumerate(labels):
x, y = low_dim_embs[i,:]
plt.scatter(x, y)
plt.annotate(label,
xy=(x, y),
xytext=(5, 2),
textcoords='offset points',
ha='right',
va='bottom')
plt.savefig(filename)
try:
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000)
plot_only = 500
low_dim_embs = tsne.fit_transform(final_embeddings[:plot_only,:])
labels = [reverse_dictionary[i] for i in xrange(plot_only)]
plot_with_labels(low_dim_embs, labels)
except ImportError:
print("Please install sklearn, matplotlib, and scipy to visualize embeddings.")
| apache-2.0 |
INM-6/elephant | elephant/sta.py | 2 | 13537 | # -*- coding: utf-8 -*-
"""
Functions to calculate spike-triggered average and spike-field coherence of
analog signals.
.. autosummary::
:toctree: _toctree/sta
spike_triggered_average
spike_field_coherence
:copyright: Copyright 2014-2020 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import warnings
import numpy as np
import quantities as pq
import scipy.signal
from neo.core import AnalogSignal, SpikeTrain
from .conversion import BinnedSpikeTrain
__all__ = [
"spike_triggered_average",
"spike_field_coherence"
]
def spike_triggered_average(signal, spiketrains, window):
"""
Calculates the spike-triggered averages of analog signals in a time window
relative to the spike times of a corresponding spiketrain for multiple
signals each. The function receives n analog signals and either one or
n spiketrains. In case it is one spiketrain this one is muliplied n-fold
and used for each of the n analog signals.
Parameters
----------
signal : neo AnalogSignal object
'signal' contains n analog signals.
spiketrains : one SpikeTrain or one numpy ndarray or a list of n of either of these.
'spiketrains' contains the times of the spikes in the spiketrains.
window : tuple of 2 Quantity objects with dimensions of time.
'window' is the start time and the stop time, relative to a spike, of
the time interval for signal averaging.
If the window size is not a multiple of the sampling interval of the
signal the window will be extended to the next multiple.
Returns
-------
result_sta : neo AnalogSignal object
'result_sta' contains the spike-triggered averages of each of the
analog signals with respect to the spikes in the corresponding
spiketrains. The length of 'result_sta' is calculated as the number
of bins from the given start and stop time of the averaging interval
and the sampling rate of the analog signal. If for an analog signal
no spike was either given or all given spikes had to be ignored
because of a too large averaging interval, the corresponding returned
analog signal has all entries as nan. The number of used spikes and
unused spikes for each analog signal are returned as annotations to
the returned AnalogSignal object.
Examples
--------
>>> signal = neo.AnalogSignal(np.array([signal1, signal2]).T, units='mV',
... sampling_rate=10/ms)
>>> stavg = spike_triggered_average(signal, [spiketrain1, spiketrain2],
... (-5 * ms, 10 * ms))
"""
# checking compatibility of data and data types
# window_starttime: time to specify the start time of the averaging
# interval relative to a spike
# window_stoptime: time to specify the stop time of the averaging
# interval relative to a spike
window_starttime, window_stoptime = window
if not (isinstance(window_starttime, pq.quantity.Quantity) and
window_starttime.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError("The start time of the window (window[0]) "
"must be a time quantity.")
if not (isinstance(window_stoptime, pq.quantity.Quantity) and
window_stoptime.dimensionality.simplified ==
pq.Quantity(1, "s").dimensionality):
raise TypeError("The stop time of the window (window[1]) "
"must be a time quantity.")
if window_stoptime <= window_starttime:
raise ValueError("The start time of the window (window[0]) must be "
"earlier than the stop time of the window (window[1]).")
# checks on signal
if not isinstance(signal, AnalogSignal):
raise TypeError(
"Signal must be an AnalogSignal, not %s." % type(signal))
if len(signal.shape) > 1:
# num_signals: number of analog signals
num_signals = signal.shape[1]
else:
raise ValueError("Empty analog signal, hence no averaging possible.")
if window_stoptime - window_starttime > signal.t_stop - signal.t_start:
raise ValueError("The chosen time window is larger than the "
"time duration of the signal.")
# spiketrains type check
if isinstance(spiketrains, (np.ndarray, SpikeTrain)):
spiketrains = [spiketrains]
elif isinstance(spiketrains, list):
for st in spiketrains:
if not isinstance(st, (np.ndarray, SpikeTrain)):
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a "
"list of one of those, not %s." % type(spiketrains))
else:
raise TypeError(
"spiketrains must be a SpikeTrain, a numpy ndarray, or a list of "
"one of those, not %s." % type(spiketrains))
# multiplying spiketrain in case only a single spiketrain is given
if len(spiketrains) == 1 and num_signals != 1:
template = spiketrains[0]
spiketrains = []
for i in range(num_signals):
spiketrains.append(template)
# checking for matching numbers of signals and spiketrains
if num_signals != len(spiketrains):
raise ValueError(
"The number of signals and spiketrains has to be the same.")
# checking the times of signal and spiketrains
for i in range(num_signals):
if spiketrains[i].t_start < signal.t_start:
raise ValueError(
"The spiketrain indexed by %i starts earlier than "
"the analog signal." % i)
if spiketrains[i].t_stop > signal.t_stop:
raise ValueError(
"The spiketrain indexed by %i stops later than "
"the analog signal." % i)
# *** Main algorithm: ***
# window_bins: number of bins of the chosen averaging interval
window_bins = int(np.ceil(((window_stoptime - window_starttime) *
signal.sampling_rate).simplified))
# result_sta: array containing finally the spike-triggered averaged signal
result_sta = AnalogSignal(np.zeros((window_bins, num_signals)),
sampling_rate=signal.sampling_rate, units=signal.units)
# setting of correct times of the spike-triggered average
# relative to the spike
result_sta.t_start = window_starttime
used_spikes = np.zeros(num_signals, dtype=int)
unused_spikes = np.zeros(num_signals, dtype=int)
total_used_spikes = 0
for i in range(num_signals):
# summing over all respective signal intervals around spiketimes
for spiketime in spiketrains[i]:
# checks for sufficient signal data around spiketime
if (spiketime + window_starttime >= signal.t_start and
spiketime + window_stoptime <= signal.t_stop):
# calculating the startbin in the analog signal of the
# averaging window for spike
startbin = int(np.floor(((spiketime + window_starttime -
signal.t_start) * signal.sampling_rate).simplified))
# adds the signal in selected interval relative to the spike
result_sta[:, i] += signal[
startbin: startbin + window_bins, i]
# counting of the used spikes
used_spikes[i] += 1
else:
# counting of the unused spikes
unused_spikes[i] += 1
# normalization
result_sta[:, i] = result_sta[:, i] / used_spikes[i]
total_used_spikes += used_spikes[i]
if total_used_spikes == 0:
warnings.warn(
"No spike at all was either found or used for averaging")
result_sta.annotate(used_spikes=used_spikes, unused_spikes=unused_spikes)
return result_sta
def spike_field_coherence(signal, spiketrain, **kwargs):
"""
Calculates the spike-field coherence between a analog signal(s) and a
(binned) spike train.
The current implementation makes use of scipy.signal.coherence(). Additional
kwargs will will be directly forwarded to scipy.signal.coherence(),
except for the axis parameter and the sampling frequency, which will be
extracted from the input signals.
The spike_field_coherence function receives an analog signal array and
either a binned spike train or a spike train containing the original spike
times. In case of original spike times the spike train is binned according
to the sampling rate of the analog signal array.
The AnalogSignal object can contain one or multiple signal traces. In case
of multiple signal traces, the spike field coherence is calculated
individually for each signal trace and the spike train.
Parameters
----------
signal : neo AnalogSignal object
'signal' contains n analog signals.
spiketrain : SpikeTrain or BinnedSpikeTrain
Single spike train to perform the analysis on. The bin_size of the
binned spike train must match the sampling_rate of signal.
**kwargs:
All kwargs are passed to `scipy.signal.coherence()`.
Returns
-------
coherence : complex Quantity array
contains the coherence values calculated for each analog signal trace
in combination with the spike train. The first dimension corresponds to
the frequency, the second to the number of the signal trace.
frequencies : Quantity array
contains the frequency values corresponding to the first dimension of
the 'coherence' array
Examples
--------
Plot the SFC between a regular spike train at 20 Hz, and two sinusoidal
time series at 20 Hz and 23 Hz, respectively.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from quantities import s, ms, mV, Hz, kHz
>>> import neo, elephant
>>> t = pq.Quantity(range(10000),units='ms')
>>> f1, f2 = 20. * Hz, 23. * Hz
>>> signal = neo.AnalogSignal(np.array([
np.sin(f1 * 2. * np.pi * t.rescale(s)),
np.sin(f2 * 2. * np.pi * t.rescale(s))]).T,
units=pq.mV, sampling_rate=1. * kHz)
>>> spiketrain = neo.SpikeTrain(
range(t[0], t[-1], 50), units='ms',
t_start=t[0], t_stop=t[-1])
>>> sfc, freqs = elephant.sta.spike_field_coherence(
signal, spiketrain, window='boxcar')
>>> plt.plot(freqs, sfc[:,0])
>>> plt.plot(freqs, sfc[:,1])
>>> plt.xlabel('Frequency [Hz]')
>>> plt.ylabel('SFC')
>>> plt.xlim((0, 60))
>>> plt.show()
"""
if not hasattr(scipy.signal, 'coherence'):
raise AttributeError('scipy.signal.coherence is not available. The sfc '
'function uses scipy.signal.coherence for '
'the coherence calculation. This function is '
'available for scipy version 0.16 or newer. '
'Please update you scipy version.')
# spiketrains type check
if not isinstance(spiketrain, (SpikeTrain, BinnedSpikeTrain)):
raise TypeError(
"spiketrain must be of type SpikeTrain or BinnedSpikeTrain, "
"not %s." % type(spiketrain))
# checks on analogsignal
if not isinstance(signal, AnalogSignal):
raise TypeError(
"Signal must be an AnalogSignal, not %s." % type(signal))
if len(signal.shape) > 1:
# num_signals: number of individual traces in the analog signal
num_signals = signal.shape[1]
elif len(signal.shape) == 1:
num_signals = 1
else:
raise ValueError("Empty analog signal.")
len_signals = signal.shape[0]
# bin spiketrain if necessary
if isinstance(spiketrain, SpikeTrain):
spiketrain = BinnedSpikeTrain(
spiketrain, bin_size=signal.sampling_period)
# check the start and stop times of signal and spike trains
if spiketrain.t_start < signal.t_start:
raise ValueError(
"The spiketrain starts earlier than the analog signal.")
if spiketrain.t_stop > signal.t_stop:
raise ValueError(
"The spiketrain stops later than the analog signal.")
# check equal time resolution for both signals
if spiketrain.bin_size != signal.sampling_period:
raise ValueError(
"The spiketrain and signal must have a "
"common sampling frequency / bin_size")
# calculate how many bins to add on the left of the binned spike train
delta_t = spiketrain.t_start - signal.t_start
if delta_t % spiketrain.bin_size == 0:
left_edge = int((delta_t / spiketrain.bin_size).magnitude)
else:
raise ValueError("Incompatible binning of spike train and LFP")
right_edge = int(left_edge + spiketrain.n_bins)
# duplicate spike trains
spiketrain_array = np.zeros((1, len_signals))
spiketrain_array[0, left_edge:right_edge] = spiketrain.to_array()
spiketrains_array = np.repeat(spiketrain_array, repeats=num_signals, axis=0).transpose()
# calculate coherence
frequencies, sfc = scipy.signal.coherence(
spiketrains_array, signal.magnitude,
fs=signal.sampling_rate.rescale('Hz').magnitude,
axis=0, **kwargs)
return (pq.Quantity(sfc, units=pq.dimensionless),
pq.Quantity(frequencies, units=pq.Hz))
| bsd-3-clause |
aflaxman/scikit-learn | benchmarks/bench_sgd_regression.py | 50 | 5569 | # Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
"""
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
max_iter = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
max_iter=max_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25, tol=1e-3)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("max_iter", max_iter)
print("- benchmarking A-SGD")
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
max_iter=max_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05, tol=1e-3,
average=(max_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
plt.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("RMSE")
plt.title("Test error - %d features" % list_n_features[j])
i += 1
plt.subplot(m, 2, i + 1)
plt.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
plt.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
plt.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
plt.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
plt.legend(prop={"size": 10})
plt.xlabel("n_train")
plt.ylabel("Time [sec]")
plt.title("Training time - %d features" % list_n_features[j])
i += 1
plt.subplots_adjust(hspace=.30)
plt.show()
| bsd-3-clause |
ThomasSweijen/TPF | examples/adaptiveintegrator/simple-scene-plot-NewtonIntegrator.py | 6 | 2027 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
O.engines=[
ForceResetter(),
InsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
NewtonIntegrator(damping=0.0,gravity=(0,0,-9.81)),
###
### NOTE this extra engine:
###
### You want snapshot to be taken every 1 sec (realTimeLim) or every 50 iterations (iterLim),
### whichever comes soones. virtTimeLim attribute is unset, hence virtual time period is not taken into account.
PyRunner(iterPeriod=20,command='myAddPlotData()')
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=.002*PWaveTimeStep()
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
O.run(int(5./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
caltech-chimera/pychimera | scripts/multiphot.py | 1 | 9783 | #!/usr/bin/env python
"""
--------------------------------------------------------------------------
Routine to perform aperture photometry on CHIMERA science frames.
Usage: python fastphot.py [options] image coords
Authors:
Navtej Saini, Lee Rosenthal
Organization:
Caltech, Pasadena, CA, USA
Version:
7 January 2016 0.1 Initial implementation
9 February 2016 0.2 User input for photometric zero point
28 July 2017 0.3 Allow processing of multiple stars.
--------------------------------------------------------------------------
"""
import os, sys
import numpy as np, warnings
from StringIO import StringIO
from optparse import OptionParser
try:
import matplotlib.pylab as plt
except ImportError:
plot_flag = False
else:
try:
import seaborn
except ImportError:
pass
plot_flag = True
import chimera
def plotter(phot_data, nframes, exptime, outfile):
"""
Plot light curve.
Parameters
----------
phot_data : numpy array
Photometry array
nframes : int
Number of image cube frames
exptime : float
Kinetic or accumulation time
outfile : string
Name of the out png image
Returns
-------
None
"""
params = {'backend': 'ps',
'font.size': 10,
'axes.labelweight': 'medium',
'figure.dpi' : 300,
'savefig.dpi': 300,
'savefig.jpeg_quality': 100
}
plt.rcParams.update(params)
ts = np.linspace(0, nframes*exptime, nframes)
plt.figure(figsize=(6,4))
plt.title("Normalized Light Curve : %s" %phot_data[0]['DATETIME'].split('T')[0])
plt.xlabel("Time (secs)")
plt.ylabel("Normalized Flux")
plt.plot(ts, phot_data['FLUX_ADU']/np.mean(phot_data['FLUX_ADU']), "r-")
plt.savefig(outfile, dpi = 300, bbox_inches = "tight")
return
def process(infile, coords, method, inner_radius, outer_radius, cen_method, window_size, output, zmag):
"""
Entry point function to process science image.
Parameters
----------
infile : string
Science image or list of science images
coords : string
Input text file with coordinates of stars
method : string
FWHM of the stelar psf in pixels
inner_radius : float
Sky background sigma
outer_radius : int
Inner sky annulus radius in pixels
cen_method : string
Centroid method
window_size : int
Centroid finding window size in pixels
output : string
Output file name
zmag : float
Photometric zero point
Returns
-------
None
"""
print "FASTPHOT: CHIMERA Fast Aperture Photometry Routine"
inner_radius = float(inner_radius)
outer_radius = float(outer_radius)
# Check if input is a string of FITS images or a text file with file names
if infile[0] == "@":
infile = infile[1:]
if not os.path.exists(infile):
print "REGISTER: Not able to locate file %s" %infile
image_cubes = []
with open(infile, "r") as fd:
for line in fd.readlines():
if len(line) > 1:
image_cubes.append(line.replace("\n", ""))
else:
image_cubes = infile.split(",")
# Number of images
ncubes = len(image_cubes)
pos = np.loadtxt(coords, ndmin = 2)
nstars = len(pos)
total_phot_data = []
for i in range(ncubes):
sci_file = image_cubes[i]
print " Processing science image %s" %sci_file
# Read FITS image and star coordinate
image = chimera.fitsread(sci_file)
# Instantiate an Aperphot object
ap = chimera.Aperphot(sci_file, coords)
# Set fwhmpsf, sigma, annulus, dannulus and zmag
ap.method = method
ap.inner_radius = inner_radius
ap.outer_radius = outer_radius
if zmag != "":
ap.zmag = float(zmag)
# Determine nominal aperture radius for photometry
if i == 0:
nom_aper = ap.cog(window_size, cen_method)
print " Nominal aperture radius : %4.1f pixels" %nom_aper
# Perform aperture photometry on all the frames
dtype = [("DATETIME", "S25"),("XCEN", "f4"),("YCEN", "f4"),("MSKY", "f8"),("NSKY", "f8"),("AREA", "f8"),("FLUX_ADU", "f8"),("FLUX_ELEC", "f8"),("FERR", "f8"),("MAG", "f8")]
phot_data = np.zeros([nstars, ap.nframes], dtype = dtype)
for j in range(ap.nframes):
print " Processing frame number : %d" %(j+1)
objpos = chimera.recenter(image[j,:,:], pos, window_size, cen_method)
aperphot_data = ap.phot(image[j,:,:], objpos, nom_aper)
pos = np.copy(objpos)
phot_data[:,j]['DATETIME'] = ap.addtime(j * ap.kintime).isoformat()
phot_data[:,j]['XCEN'] = aperphot_data["xcenter_raw"]
phot_data[:,j]['YCEN'] = aperphot_data["ycenter_raw"]
phot_data[:,j]['MSKY'] = aperphot_data["msky"]
phot_data[:,j]['NSKY'] = aperphot_data["nsky"]
phot_data[:,j]['AREA'] = aperphot_data["area"]
phot_data[:,j]['FLUX_ADU'] = aperphot_data["flux"]
phot_data[:,j]['FLUX_ELEC'] = phot_data[:,j]['FLUX_ADU'] * ap.epadu
phot_data[:,j]['MAG'] = ap.zmag - 2.5 * np.log10(phot_data[:,j]['FLUX_ELEC']/ap.exptime)
# Calculate error in flux - using the formula
# err = sqrt(flux * gain + npix * (1 + (npix/nsky)) * (flux_sky * gain + R**2))
phot_data[:,j]['FERR'] = np.sqrt(phot_data[:,j]['FLUX_ELEC'] + phot_data[:,j]['AREA'] * (1 + phot_data[j]['AREA']/phot_data[j]['NSKY']) * (phot_data[j]['MSKY'] * ap.epadu + ap.readnoise**2))
total_phot_data.append(phot_data)
# Save photometry data in numpy binary format
print " Saving photometry data as numpy binary"
if output != "":
npy_outfile = output + ".npy"
else:
npy_outfile = sci_file.replace(".fits", ".phot.npy")
if os.path.exists(npy_outfile):
os.remove(npy_outfile)
#np.save(npy_outfile, phot_data)
# Plot first pass light curve
if plot_flag:
print " Plotting normalized light curve"
if output != "":
plt_outfile = output + ".png"
else:
plt_outfile = sci_file.replace(".fits", ".lc.png")
plotter(phot_data, ap.nframes, ap.kintime, plt_outfile)
# Convert the total_phot_data to array and reshape it
print ' Saving consolidated photometry data...'
total_phot_data_arr = np.concatenate(total_phot_data, axis=1)
# Save the array as npy file
if output != "":
np.save(output+"phot_total.npy", total_phot_data_arr)
else: np.save("phot_total.npy", total_phot_data_arr)
return
if __name__ == "__main__":
usage = "Usage: python %prog [options] sci_image coords"
description = "Description. Utility to perform fast aperture photometry in CHIMERA science images."
parser = OptionParser(usage = usage, version = "%prog 0.2", description = description)
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default = False,
help = "print result messages to stdout"
)
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default = True,
help = "don't print result messages to stdout"
)
parser.add_option("-m", "--method", dest = "method",
action="store", metavar="METHOD", help = "Method to use for determining overlap between aperture and pixels (default is exact)",
default = "exact"
)
parser.add_option("-i", "--inner_radius", dest = "inner_radius",
action="store", metavar="INNER_RADIUS", help = "Inner radius of sky annlus in pixels (default is 14)",
default = 14
)
parser.add_option("-d", "--outer_radius", dest = "outer_radius",
action="store", metavar="OUTER_RADIUS", help = "Radius of sky annulus in pixels (default is 16)",
default = 16
)
parser.add_option("-c", "--cen_method", dest = "cen_method",
action="store", metavar="CEN_METHOD", help = "Centroid method (default is 2dg)",
default = "2dg"
)
parser.add_option("-w", "--window_size", dest = "window_size",
action="store", metavar="WINDOW_SIZE", help = "Window size for centroid (default is 35)",
default = 35
)
parser.add_option("-o", "--output", dest = "output",
action="store", metavar="OUTPUT", help = "Output file name",
default = ""
)
parser.add_option("-z", "--zmag", dest = "zmag",
action="store", metavar="ZMAG", help = "Photometric zeroo point",
default = ""
)
(options, args) = parser.parse_args()
if len(args) != 2:
parser.error("FASTPHOT: Incorrect number of arguments")
# Check verbosity
if not options.verbose:
output = StringIO()
old_stdout = sys.stdout
sys.stdout = output
# Switch off warnings
warnings.filterwarnings('ignore')
process(args[0], args[1], options.method, options.inner_radius, options.outer_radius, options.cen_method, options.window_size, options.output, options.zmag)
# Reset verbosity
if not options.verbose:
sys.stdout = old_stdout
| mit |