repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
btabibian/scikit-learn | examples/cluster/plot_cluster_comparison.py | 46 | 6620 | """
=========================================================
Comparing different clustering algorithms on toy datasets
=========================================================
This example shows characteristics of different
clustering algorithms on datasets that are "interesting"
but still in 2D. With the exception of the last dataset,
the parameters of each of these dataset-algorithm pairs
has been tuned to produce good clustering results. Some
algorithms are more sensitive to parameter values than
others.
The last dataset is an example of a 'null' situation for
clustering: the data is homogeneous, and there is no good
clustering. For this example, the null dataset uses the
same parameters as the dataset in the row above it, which
represents a mismatch in the parameter values and the
data structure.
While these examples give some intuition about the
algorithms, this intuition might not apply to very high
dimensional data.
"""
print(__doc__)
import time
import warnings
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cluster, datasets, mixture
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import StandardScaler
from itertools import cycle, islice
np.random.seed(0)
# ============
# Generate datasets. We choose the size big enough to see the scalability
# of the algorithms, but not too big to avoid too long running times
# ============
n_samples = 1500
noisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,
noise=.05)
noisy_moons = datasets.make_moons(n_samples=n_samples, noise=.05)
blobs = datasets.make_blobs(n_samples=n_samples, random_state=8)
no_structure = np.random.rand(n_samples, 2), None
# Anisotropicly distributed data
random_state = 170
X, y = datasets.make_blobs(n_samples=n_samples, random_state=random_state)
transformation = [[0.6, -0.6], [-0.4, 0.8]]
X_aniso = np.dot(X, transformation)
aniso = (X_aniso, y)
# blobs with varied variances
varied = datasets.make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
# ============
# Set up cluster parameters
# ============
plt.figure(figsize=(9 * 2 + 3, 12.5))
plt.subplots_adjust(left=.02, right=.98, bottom=.001, top=.96, wspace=.05,
hspace=.01)
plot_num = 1
default_base = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
datasets = [
(noisy_circles, {'damping': .77, 'preference': -240,
'quantile': .2, 'n_clusters': 2}),
(noisy_moons, {'damping': .75, 'preference': -220, 'n_clusters': 2}),
(varied, {'eps': .18, 'n_neighbors': 2}),
(aniso, {'eps': .15, 'n_neighbors': 2}),
(blobs, {}),
(no_structure, {})]
for i_dataset, (dataset, algo_params) in enumerate(datasets):
# update parameters with dataset-specific values
params = default_base.copy()
params.update(algo_params)
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# estimate bandwidth for mean shift
bandwidth = cluster.estimate_bandwidth(X, quantile=params['quantile'])
# connectivity matrix for structured Ward
connectivity = kneighbors_graph(
X, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
# ============
# Create cluster objects
# ============
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
t0 = time.time()
# catch warnings related to kneighbors_graph
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
algorithm.fit(X)
t1 = time.time()
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
plt.subplot(len(datasets), len(clustering_algorithms), plot_num)
if i_dataset == 0:
plt.title(name, size=18)
colors = np.array(list(islice(cycle(['#377eb8', '#ff7f00', '#4daf4a',
'#f781bf', '#a65628', '#984ea3',
'#999999', '#e41a1c', '#dede00']),
int(max(y_pred) + 1))))
plt.scatter(X[:, 0], X[:, 1], s=10, color=colors[y_pred])
plt.xlim(-2.5, 2.5)
plt.ylim(-2.5, 2.5)
plt.xticks(())
plt.yticks(())
plt.text(.99, .01, ('%.2fs' % (t1 - t0)).lstrip('0'),
transform=plt.gca().transAxes, size=15,
horizontalalignment='right')
plot_num += 1
plt.show()
| bsd-3-clause |
asteca/ASteCA | packages/best_fit/DEPRECATED/abcpmc_algor_DEPRECATED.py | 1 | 9822 |
import numpy as np
from scipy.optimize import differential_evolution as DE
import time as t
from .abcpmc import sampler, threshold
from ..synth_clust import synth_cluster
from . import likelihood
from .emcee_algor import varPars, closeSol, discreteParams, convergenceVals
def main(
lkl_method, e_max, err_lst, completeness, max_mag_syn,
fundam_params, obs_clust, theor_tracks, R_V, ext_coefs, st_dist_mass,
N_fc, cmpl_rnd, err_rnd, nwalkers_abc, nsteps_abc, nburn_abc,
priors_abc):
varIdxs, ndim, ranges = varPars(fundam_params)
def dist(synth_clust, obs_clust):
lkl = np.inf
if synth_clust:
lkl = likelihood.main(lkl_method, synth_clust, obs_clust)
return lkl
def postfn(model):
# Re-scale z and M
model_scale = [
model[0] / 100., model[1], model[2], model[3] * 10.,
model[4] * 1000., model[5]]
check_ranges = [
r[0] <= p <= r[1] for p, r in zip(*[model_scale, ranges[varIdxs]])]
synth_clust = []
# If some parameter is outside of the given ranges, don't bother
# obtaining the proper model.
if all(check_ranges):
model_proper = closeSol(fundam_params, varIdxs, model_scale)
# Metallicity and age indexes to identify isochrone.
m_i = fundam_params[0].index(model_proper[0])
a_i = fundam_params[1].index(model_proper[1])
isochrone = theor_tracks[m_i][a_i]
# Generate synthetic cluster.
synth_clust = synth_cluster.main(
e_max, err_lst, completeness, max_mag_syn, st_dist_mass,
isochrone, R_V, ext_coefs, N_fc, cmpl_rnd, err_rnd,
model_proper)
return synth_clust
# TODO add these parameters to the input params file
alpha, init_eps = 95, None
N_conv, tol_conv = 50., 0.01
max_secs = 22. * 60. * 60.
# Break out when AF is low.
# af_low, af_min_steps = 0.001, .1
max_t_walker = 30.
# eps_stuck_perc, N_eps_stuck_max = .005, 100
# Start timing.
elapsed = 0.
available_secs = max(30, max_secs)
start_t = t.time()
abcsampler = sampler.Sampler(
N=nwalkers_abc, Y=obs_clust, postfn=postfn, dist=dist)
# Set proposal
# sampler.particle_proposal_cls = sampler.OLCMParticleProposal
if init_eps is None:
# Estimate initial threshold value using DE.
def lnprob(model):
synth_clust = postfn(model)
return dist(synth_clust, obs_clust)
# Scale parameters bounds.
bounds = [
ranges[0] * 100., ranges[1], ranges[2], ranges[3] / 10.,
ranges[4] / 1000., ranges[5]]
result = DE(lnprob, bounds, maxiter=20)
init_eps = 4. * result.fun
print(" Initial threshold value: {:.2f}".format(init_eps))
# old_eps = init_eps
# TODO pass type of threshold from params file
# eps = threshold.LinearEps(T, 5000, init_eps)
eps = threshold.ConstEps(nsteps_abc, init_eps)
# Stddev values as full range.
std = np.eye(ndim) * (ranges.max(axis=1) - ranges.min(axis=1))
# Means as middle points in ranges.
means = (ranges.max(axis=1) + ranges.min(axis=1)) / 2.
# Scale values.
std[0], means[0] = std[0] * 100, means[0] * 100
std[3], means[3] = std[3] / 10, means[3] / 10
std[4], means[4] = std[4] / 1000., means[4] / 1000.
# Gaussian prior.
print(means)
print(std)
prior = sampler.GaussianPrior(mu=means, sigma=std)
# # We'll track how the average autocorrelation time estimate changes
# tau_index, autocorr_vals = 0, np.empty(nsteps_abc)
# # This will be useful to testing convergence
# old_tau = np.inf
# Check for convergence every 2% of steps or 100, whichever value
# is lower.
# N_steps_conv = min(int(nsteps_abc * 0.02), 100)
map_sol_old, N_models, prob_mean = [[], np.inf], 0, []
# N_eps_stuck = 0
chains_nruns, maf_steps, map_lkl = [], [], []
milestones = list(range(5, 101, 5))
for pool in abcsampler.sample(prior, eps):
print(
pool.t, pool.eps, pool.ratio, np.min(pool.dists),
np.mean(pool.dists))
chains_nruns.append(pool.thetas)
maf = pool.ratio
maf_steps.append([pool.t, maf])
N_models += nwalkers_abc / maf
# reduce eps value
# old_eps = eps.eps
eps.eps = np.percentile(pool.dists, alpha)
# # Check if threshold is stuck.
# if abs(eps.eps - old_eps) < eps_stuck_perc * eps.eps:
# N_eps_stuck += 1
# else:
# N_eps_stuck = 0
# if N_eps_stuck > N_eps_stuck_max:
# print(" Threshold is stuck (runs={}).".format(pool.t + 1))
# break
# if maf < af_low and pool.t > int(af_min_steps * nsteps_abc):
# print(" AF<{} (runs={})".format(af_low, pool.t + 1))
# break
if t.time() - start_t > (max_t_walker * nwalkers_abc):
print(" Sampler is stuck (runs={})".format(pool.t + 1))
break
elapsed += t.time() - start_t
if elapsed >= available_secs:
print(" Time consumed (runs={})".format(pool.t + 1))
break
start_t = t.time()
# # Only check convergence every 'N_steps_conv' steps
# if (pool.t + 1) % N_steps_conv:
# continue
# # Compute the autocorrelation time so far. Using tol=0 means that
# # we'll always get an estimate even if it isn't trustworthy.
# try:
# tau = autocorr.integrated_time(np.array(chains_nruns), tol=0)
# autocorr_vals[tau_index] = np.nanmean(tau)
# tau_index += 1
# # Check convergence
# converged = np.all(tau * N_conv < (pool.t + 1))
# converged &= np.all(np.abs(old_tau - tau) / tau < tol_conv)
# if converged:
# print(" Convergence achieved (runs={}).".format(pool.t + 1))
# break
# old_tau = tau
# except FloatingPointError:
# pass
# Store MAP solution in this iteration.
prob_mean.append([pool.t, np.mean(pool.dists)])
idx_best = np.argmin(pool.dists)
# Update if a new optimal solution was found.
if pool.dists[idx_best] < map_sol_old[1]:
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [pars[0] / 100., pars[1], pars[2], pars[3] * 10.,
pars[4] * 1000., pars[5]]
map_sol_old = [
closeSol(fundam_params, varIdxs, pars),
pool.dists[idx_best]]
map_lkl.append([pool.t, map_sol_old[1]])
# Print progress.
percentage_complete = (100. * (pool.t + 1) / nsteps_abc)
if len(milestones) > 0 and percentage_complete >= milestones[0]:
map_sol, logprob = map_sol_old
print("{:>3}% ({:.3f}) LP={:.1f} ({:g}, {:g}, {:.3f}, {:.2f}"
", {:g}, {:.2f})".format(
milestones[0], maf, logprob, *map_sol) +
" [{:.0f} m/s]".format(N_models / elapsed))
milestones = milestones[1:]
runs = pool.t + 1
# Evolution of the mean autocorrelation time.
tau_autocorr = np.array([np.nan] * 10) # autocorr_vals[:tau_index]
tau_index = np.nan
N_steps_conv = runs
# Final MAP fit.
idx_best = np.argmin(pool.dists)
pars = pool.thetas[idx_best]
# pars = scaleParams(model)
pars = [
pars[0] / 100., pars[1], pars[2], pars[3] * 10., pars[4] * 1000.,
pars[5]]
map_sol = closeSol(fundam_params, varIdxs, pars)
map_lkl_final = pool.dists[idx_best]
abcsampler.close()
# Shape: (runs, nwalkers, ndim)
chains_nruns = np.array(chains_nruns)
# De-scale parameters.
chains_nruns[:, :, 0] = chains_nruns[:, :, 0] / 100.
chains_nruns[:, :, 3] = chains_nruns[:, :, 3] * 10.
chains_nruns[:, :, 4] = chains_nruns[:, :, 4] * 1000.
# Burn-in range.
Nb = int(runs * nburn_abc)
# Burn-in. Shape: (ndim, nwalkers, runs)
pars_chains_bi = discreteParams(
fundam_params, varIdxs, chains_nruns[:Nb, :, :]).T
# Change values for the discrete parameters with the closest valid values.
chains_nruns = discreteParams(
fundam_params, varIdxs, chains_nruns[Nb:, :, :])
mcmc_trace = chains_nruns.reshape(-1, ndim).T
# import matplotlib.pyplot as plt
# import corner
# corner.corner(
# mcmc_trace.T, quantiles=[0.16, 0.5, 0.84], show_titles=True)
# # levels=(1 - np.exp(-0.5),))
# plt.savefig("corner.png", dpi=300)
# Convergence parameters.
acorr_t, max_at_c, min_at_c, geweke_z, emcee_acorf, mcmc_ess, minESS,\
mESS, mESS_epsilon = convergenceVals(
'abc', ndim, varIdxs, N_conv, chains_nruns, mcmc_trace)
# Store mean solution.
mean_sol = closeSol(fundam_params, varIdxs, np.mean(mcmc_trace, axis=1))
isoch_fit_params = {
'varIdxs': varIdxs, 'nsteps_abc': runs, 'mean_sol': mean_sol,
'nburn_abc': Nb, 'map_sol': map_sol, 'map_lkl': map_lkl,
'map_lkl_final': map_lkl_final, 'prob_mean': prob_mean,
'mcmc_elapsed': elapsed, 'mcmc_trace': mcmc_trace,
'pars_chains_bi': pars_chains_bi, 'pars_chains': chains_nruns.T,
'maf_steps': maf_steps, 'autocorr_time': acorr_t,
'max_at_c': max_at_c, 'min_at_c': min_at_c,
'minESS': minESS, 'mESS': mESS, 'mESS_epsilon': mESS_epsilon,
'emcee_acorf': emcee_acorf, 'geweke_z': geweke_z,
'mcmc_ess': mcmc_ess,
'N_steps_conv': N_steps_conv, 'N_conv': N_conv, 'tol_conv': tol_conv,
'tau_index': tau_index, 'tau_autocorr': tau_autocorr
}
return isoch_fit_params
| gpl-3.0 |
sadimanna/computer_vision | clustering/kmeansppclustering_with_gap_statistic.py | 1 | 2599 | #K-Means++ Clustering with Gap Statistic to determine the optimal number of clusters
import sys
import numpy as np
import scipy.io as sio
#import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.svm import SVC
filename = sys.argv[1]
datafile = sio.loadmat(filename)
data = datafile['bow']
sizedata=[len(data), len(data[0])]
disp = []
optimal_ks = []
#Determining the optimal number of k with gap statistic method
def gap_statistic(data):
sizedata = [len(data),len(data[0])]
SD = []
gap = []
for knum in xrange(1,20):
#I assumed that the number of clusters in my data won't be more than 20, this can be changed accordingly
print knum
#Clustering original Data
kmeanspp = KMeans(n_clusters=knum,init = 'k-means++',max_iter = 100,n_jobs = 1)
kmeanspp.fit(data)
dispersion = kmeanspp.inertia_
#Clustering Reference Data
nrefs = 10
refDisp = np.zeros(nrefs)
for nref in xrange(nrefs):
refdata = np.random.random_sample(tuple(sizedata))
refkmeans = KMeans(n_clusters=knum,init='k-means++',max_iter=100,n_jobs=1)
refkmeans.fit(refdata)
refdisp = refkmeans.inertia_
refDisp[nref]=np.log(refdisp)
mean_log_refdisp = np.mean(refDisp)
gap.append(mean_log_refdisp-np.log(dispersion))
sd = (sum([(r-m)**2 for r,m in zip(refDisp,[mean_log_refdisp]*nrefs)])/nrefs)**0.5
SD.append(sd)
SD = [sd*((1+(1/nrefs))**0.5) for sd in SD]
opt_k = None
diff = []
for i in xrange(len(gap)-1):
diff = (SD[i+1]-(gap[i+1]-gap[i]))
if diff>0:
opt_k = i+10
break
if opt_k < 20:
#print opt_k
return opt_k
else:
return 20
#Returning 20 if opt_k is more than 20 in my case, as I wanted not to search more than 20.
# Not required if range is larger.
ntrials = 50
for ntrial in xrange(ntrials):
print 'ntrial: ',ntrial
optimal_ks.append(gap_statistic(data))
#For plotting the gap statistic measure
#plt.plot(np.linspace(10,19,10,True),gap)
#plt.show()
unique_opt_k = list(set(optimal_ks))
k_count = {}
count_opt_k = 0
second_opt_k = 0
opt_k = 0
for u_o_k in unique_opt_k:
count = optimal_ks.count(u_o_k)
k_count[u_o_k]=count
if count>count_opt_k:
count_opt_k = count
opt_k = u_o_k
elif count==count_opt_k:
second_opt_k = u_o_k
print opt_k
print k_count
#Clusterin with optimal number of k
kmeanspp = KMeans(n_clusters = opt_k,init='k-means++',max_iter=100,n_jobs=1)
kmeanspp.fit(data)
centers = kmeanspp.cluster_centers_
clusterlabels = kmeanspp.labels_
print clusterlabels
mdict = {}
mdict['clusterlabels'] = clusterlabels
sio.savemat('clusterlabels.mat',mdict,format = '4',oned_as = 'column')
print 'dan dana dan done...'
| gpl-3.0 |
eusoubrasileiro/fatiando | fatiando/vis/mpl.py | 2 | 35331 | """
Wrappers for :mod:`matplotlib` functions to facilitate plotting grids,
2D objects, etc.
This module loads all functions from :mod:`matplotlib.pyplot`, adds new
functions and overwrites some others (like :func:`~fatiando.vis.mpl.contour`,
:func:`~fatiando.vis.mpl.pcolor`, etc).
**Grids**
* :func:`~fatiando.vis.mpl.contour`
* :func:`~fatiando.vis.mpl.contourf`
* :func:`~fatiando.vis.mpl.pcolor`
Grids are automatically reshaped and interpolated if desired or necessary.
**2D objects**
* :func:`~fatiando.vis.mpl.points`
* :func:`~fatiando.vis.mpl.paths`
* :func:`~fatiando.vis.mpl.square`
* :func:`~fatiando.vis.mpl.squaremesh`
* :func:`~fatiando.vis.mpl.polygon`
* :func:`~fatiando.vis.mpl.layers`
* :func:`~fatiando.vis.mpl.seismic_image`
* :func:`~fatiando.vis.mpl.seismic_wiggle`
**Interactive**
* :func:`~fatiando.vis.mpl.draw_polygon`
* :func:`~fatiando.vis.mpl.draw_layers`
* :func:`~fatiando.vis.mpl.pick_points`
**Basemap (map projections)**
* :func:`~fatiando.vis.mpl.basemap`
* :func:`~fatiando.vis.mpl.draw_geolines`
* :func:`~fatiando.vis.mpl.draw_countries`
* :func:`~fatiando.vis.mpl.draw_coastlines`
**Auxiliary**
* :func:`~fatiando.vis.mpl.set_area`
* :func:`~fatiando.vis.mpl.m2km`
----
"""
import numpy
from matplotlib import pyplot, widgets
# Quick hack so that the docs can build using the mocks for readthedocs
# Ideal would be to log an error message saying that functions from pyplot
# were not imported
try:
from matplotlib.pyplot import *
except:
pass
import fatiando.gridder
# Dummy variable to laizy import the basemap toolkit (slow)
Basemap = None
def draw_polygon(area, axes, style='-', marker='o', color='k', width=2,
alpha=0.5, xy2ne=False):
"""
Draw a polygon by clicking with the mouse.
INSTRUCTIONS:
* Left click to pick the edges of the polygon;
* Draw edges CLOCKWISE;
* Press 'e' to erase the last edge;
* Right click to close the polygon;
* Close the figure window to finish;
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the area containing the polygon
* axes : matplotlib Axes
The figure to use for drawing the polygon.
To get an Axes instace, just do::
from matplotlib import pyplot
axes = pyplot.figure().add_subplot(1,1,1)
You can plot things to ``axes`` before calling this function so that
they'll appear on the background.
* style : str
Line style (as in matplotlib.pyplot.plot)
* marker : str
Style of the point markers (as in matplotlib.pyplot.plot)
* color : str
Line color (as in matplotlib.pyplot.plot)
* width : float
The line width (as in matplotlib.pyplot.plot)
* alpha : float
Transparency of the fill of the polygon. 0 for transparent, 1 for
opaque (fills the polygon once done drawing)
* xy2ne : True or False
If True, will exchange the x and y axis so that x points north.
Use this when drawing on a map viewed from above. If the y-axis of the
plot is supposed to be z (depth), then use ``xy2ne=False``.
Returns:
* edges : list of lists
List of ``[x, y]`` pairs with the edges of the polygon
"""
axes.set_title("Click to draw polygon. Right click when done.")
if xy2ne:
axes.set_xlim(area[2], area[3])
axes.set_ylim(area[0], area[1])
else:
axes.set_xlim(area[0], area[1])
axes.set_ylim(area[2], area[3])
# start with an empty line
line, = axes.plot([], [], marker=marker, linestyle=style, color=color,
linewidth=width)
tmpline, = axes.plot([], [], marker=marker, linestyle=style, color=color,
linewidth=width)
draw = axes.figure.canvas.draw
x = []
y = []
plotx = []
ploty = []
# Hack because Python 2 doesn't like nonlocal variables that change value.
# Lists it doesn't mind.
picking = [True]
def draw_guide(px, py):
if len(x) != 0:
tmpline.set_data([x[-1], px], [y[-1], py])
def move(event):
if event.inaxes != axes:
return 0
if picking[0]:
draw_guide(event.xdata, event.ydata)
draw()
def pick(event):
if event.inaxes != axes:
return 0
if event.button == 1 and picking[0]:
x.append(event.xdata)
y.append(event.ydata)
plotx.append(event.xdata)
ploty.append(event.ydata)
if event.button == 3 or event.button == 2 and picking[0]:
if len(x) < 3:
axes.set_title("Need at least 3 points to make a polygon")
else:
picking[0] = False
axes.set_title("Done! You can close the window now.")
plotx.append(x[0])
ploty.append(y[0])
tmpline.set_data([], [])
axes.fill(plotx, ploty, color=color, alpha=alpha)
line.set_data(plotx, ploty)
draw()
def erase(event):
if event.key == 'e' and picking[0]:
x.pop()
y.pop()
plotx.pop()
ploty.pop()
line.set_data(plotx, ploty)
draw_guide(event.xdata, event.ydata)
draw()
line.figure.canvas.mpl_connect('button_press_event', pick)
line.figure.canvas.mpl_connect('key_press_event', erase)
line.figure.canvas.mpl_connect('motion_notify_event', move)
pyplot.show()
if len(x) < 3:
raise ValueError("Need at least 3 points to make a polygon")
if xy2ne:
verts = numpy.transpose([y, x])
else:
verts = numpy.transpose([x, y])
return verts
def pick_points(area, axes, marker='o', color='k', size=8, xy2ne=False):
"""
Get the coordinates of points by clicking with the mouse.
INSTRUCTIONS:
* Left click to pick the points;
* Press 'e' to erase the last point picked;
* Close the figure window to finish;
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the area containing the points
* axes : matplotlib Axes
The figure to use for drawing the polygon.
To get an Axes instace, just do::
from matplotlib import pyplot
axes = pyplot.figure().add_subplot(1,1,1)
You can plot things to ``axes`` before calling this function so that
they'll appear on the background.
* marker : str
Style of the point markers (as in matplotlib.pyplot.plot)
* color : str
Line color (as in matplotlib.pyplot.plot)
* size : float
Marker size (as in matplotlib.pyplot.plot)
* xy2ne : True or False
If True, will exchange the x and y axis so that x points north.
Use this when drawing on a map viewed from above. If the y-axis of the
plot is supposed to be z (depth), then use ``xy2ne=False``.
Returns:
* points : list of lists
List of ``[x, y]`` coordinates of the points
"""
axes.set_title("Click to pick points. Close window when done.")
if xy2ne:
axes.set_xlim(area[2], area[3])
axes.set_ylim(area[0], area[1])
else:
axes.set_xlim(area[0], area[1])
axes.set_ylim(area[2], area[3])
# start with an empty set
line, = axes.plot([], [], marker=marker, color=color, markersize=size)
line.figure.canvas.draw()
x = []
y = []
plotx = []
ploty = []
# Hack because Python 2 doesn't like nonlocal variables that change value.
# Lists it doesn't mind.
picking = [True]
def pick(event):
if event.inaxes != axes:
return 0
if event.button == 1 and picking[0]:
x.append(event.xdata)
y.append(event.ydata)
plotx.append(event.xdata)
ploty.append(event.ydata)
line.set_color(color)
line.set_marker(marker)
line.set_markersize(size)
line.set_linestyle('')
line.set_data(plotx, ploty)
line.figure.canvas.draw()
def erase(event):
if event.key == 'e' and picking[0]:
x.pop()
y.pop()
plotx.pop()
ploty.pop()
line.set_data(plotx, ploty)
line.figure.canvas.draw()
line.figure.canvas.mpl_connect('button_press_event', pick)
line.figure.canvas.mpl_connect('key_press_event', erase)
pyplot.show()
if xy2ne:
points = numpy.transpose([y, x])
else:
points = numpy.transpose([x, y])
return points
def draw_layers(area, axes, style='-', marker='o', color='k', width=2):
"""
Draw series of horizontal layers by clicking with the mouse.
The y-axis is assumed to be depth, the x-axis is the physical property of
each layer.
INSTRUCTIONS:
* Click to make a new layer;
* Press 'e' to erase the last layer;
* Close the figure window to finish;
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the area containing the polygon
* axes : matplotlib Axes
The figure to use for drawing the polygon.
To get an Axes instace, just do::
from matplotlib import pyplot
axes = pyplot.figure().add_subplot(1,1,1)
You can plot things to ``axes`` before calling this function so that
they'll appear on the background.
* style : str
Line style (as in matplotlib.pyplot.plot)
* marker : str
Style of the point markers (as in matplotlib.pyplot.plot)
* color : str
Line color (as in matplotlib.pyplot.plot)
* width : float
The line width (as in matplotlib.pyplot.plot)
Returns:
* layers : list = [thickness, values]
* thickness : list
The thickness of each layer, in order of increasing depth
* values : list
The physical property value of each layer, in the same order
"""
axes.set_title("Click to set a layer. Close the window when done.")
axes.grid()
vmin, vmax, zmin, zmax = area
axes.set_xlim(vmin, vmax)
axes.set_ylim(zmax, zmin)
# start with an empty line
line, = axes.plot([], [], marker=marker, linestyle=style,
color=color, linewidth=width)
midv = 0.5 * (vmax + vmin)
# this is the line that moves around with the mouse
tmpline, = axes.plot([midv], [zmin], marker=marker, linestyle='--',
color=color, linewidth=width)
# Make a proxy for drawing
draw = axes.figure.canvas.draw
depths = [zmin]
values = []
plotv = []
plotz = []
tmpz = [zmin]
# Hack because Python 2 doesn't like nonlocal variables that change value.
# Lists it doesn't mind.
picking = [True]
def draw_guide(v, z):
if len(values) == 0:
tmpline.set_data([v, v], [tmpz[0], z])
else:
if z > tmpz[0]:
tmpline.set_data([values[-1], v, v], [tmpz[0], tmpz[0], z])
else:
tmpline.set_data([values[-1], v], [tmpz[0], tmpz[0]])
def move(event):
if event.inaxes != axes:
return 0
v, z = event.xdata, event.ydata
if picking[0]:
draw_guide(v, z)
draw()
def pick(event):
if event.inaxes != axes:
return 0
if event.button == 1 and picking[0]:
v, z = event.xdata, event.ydata
if z > tmpz[0]:
depths.append(z)
values.append(v)
plotz.extend([tmpz[0], z])
plotv.extend([v, v])
tmpz[0] = z
line.set_data(plotv, plotz)
draw()
def erase(event):
if picking[0] and len(values) > 0 and event.key == 'e':
depths.pop()
values.pop()
tmpz[0] = depths[-1]
plotv.pop()
plotv.pop()
plotz.pop()
plotz.pop()
line.set_data(plotv, plotz)
draw_guide(event.xdata, event.ydata)
draw()
line.figure.canvas.mpl_connect('button_press_event', pick)
line.figure.canvas.mpl_connect('key_press_event', erase)
line.figure.canvas.mpl_connect('motion_notify_event', move)
pyplot.show()
thickness = [depths[i + 1] - depths[i] for i in xrange(len(depths) - 1)]
return thickness, values
def draw_geolines(area, dlon, dlat, basemap, linewidth=1):
"""
Draw the parallels and meridians on a basemap plot.
Parameters:
* area : list
``[west, east, south, north]``, i.e., the area where the lines will
be plotted
* dlon, dlat : float
The spacing between the lines in the longitude and latitude directions,
respectively (in decimal degrees)
* basemap : mpl_toolkits.basemap.Basemap
The basemap used for plotting (see :func:`~fatiando.vis.mpl.basemap`)
* linewidth : float
The width of the lines
"""
west, east, south, north = area
meridians = basemap.drawmeridians(numpy.arange(west, east, dlon),
labels=[0, 0, 0, 1], linewidth=linewidth)
parallels = basemap.drawparallels(numpy.arange(south, north, dlat),
labels=[1, 0, 0, 0], linewidth=linewidth)
def draw_countries(basemap, linewidth=1, style='dashed'):
"""
Draw the country borders using the given basemap.
Parameters:
* basemap : mpl_toolkits.basemap.Basemap
The basemap used for plotting (see :func:`~fatiando.vis.mpl.basemap`)
* linewidth : float
The width of the lines
* style : str
The style of the lines. Can be: 'solid', 'dashed', 'dashdot' or
'dotted'
"""
lines = basemap.drawcountries(linewidth=linewidth)
lines.set_linestyles(style)
def draw_coastlines(basemap, linewidth=1, style='solid'):
"""
Draw the coastlines using the given basemap.
Parameters:
* basemap : mpl_toolkits.basemap.Basemap
The basemap used for plotting (see :func:`~fatiando.vis.mpl.basemap`)
* linewidth : float
The width of the lines
* style : str
The style of the lines. Can be: 'solid', 'dashed', 'dashdot' or
'dotted'
"""
lines = basemap.drawcoastlines(linewidth=linewidth)
lines.set_linestyles(style)
def basemap(area, projection, resolution='c'):
"""
Make a basemap to use when plotting with map projections.
Uses the matplotlib basemap toolkit.
Parameters:
* area : list
``[west, east, south, north]``, i.e., the area of the data that is
going to be plotted
* projection : str
The name of the projection you want to use. Choose from:
* 'ortho': Orthographic
* 'geos': Geostationary
* 'robin': Robinson
* 'cass': Cassini
* 'merc': Mercator
* 'poly': Polyconic
* 'lcc': Lambert Conformal
* 'stere': Stereographic
* resolution : str
The resolution for the coastlines. Can be 'c' for crude, 'l' for low,
'i' for intermediate, 'h' for high
Returns:
* basemap : mpl_toolkits.basemap.Basemap
The basemap
"""
if projection not in ['ortho', 'aeqd', 'geos', 'robin', 'cass', 'merc',
'poly', 'lcc', 'stere']:
raise ValueError("Unsuported projection '%s'" % (projection))
global Basemap
if Basemap is None:
try:
from mpl_toolkits.basemap import Basemap
except ImportError:
raise
west, east, south, north = area
lon_0 = 0.5 * (east + west)
lat_0 = 0.5 * (north + south)
if projection == 'ortho':
bm = Basemap(projection=projection, lon_0=lon_0, lat_0=lat_0,
resolution=resolution)
elif projection == 'geos' or projection == 'robin':
bm = Basemap(projection=projection, lon_0=lon_0, resolution=resolution)
elif (projection == 'cass' or
projection == 'poly'):
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_0=lat_0,
lon_0=lon_0, resolution=resolution)
elif projection == 'merc':
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_ts=lat_0,
resolution=resolution)
elif projection == 'lcc':
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_0=lat_0,
lon_0=lon_0, rsphere=(6378137.00, 6356752.3142),
lat_1=lat_0, resolution=resolution)
elif projection == 'stere':
bm = Basemap(projection=projection, llcrnrlon=west, urcrnrlon=east,
llcrnrlat=south, urcrnrlat=north, lat_0=lat_0,
lon_0=lon_0, lat_ts=lat_0, resolution=resolution)
return bm
def m2km(axis=None):
"""
Convert the x and y tick labels from meters to kilometers.
Parameters:
* axis : matplotlib axis instance
The plot.
.. tip:: Use ``fatiando.vis.gca()`` to get the current axis. Or the value
returned by ``fatiando.vis.subplot`` or ``matplotlib.pyplot.subplot``.
"""
if axis is None:
axis = pyplot.gca()
axis.set_xticklabels(['%g' % (0.001 * l) for l in axis.get_xticks()])
axis.set_yticklabels(['%g' % (0.001 * l) for l in axis.get_yticks()])
def set_area(area):
"""
Set the area of a Matplolib plot using xlim and ylim.
Parameters:
* area : list = [x1, x2, y1, y2]
Coordinates of the top right and bottom left corners of the area
"""
x1, x2, y1, y2 = area
pyplot.xlim(x1, x2)
pyplot.ylim(y1, y2)
def points(pts, style='.k', size=10, label=None, xy2ne=False):
"""
Plot a list of points.
Parameters:
* pts : list of lists
List of [x, y] pairs with the coordinates of the points
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* size : int
Size of the plotted points
* label : str
If not None, then the string that will show in the legend
* xy2ne : True or False
If True, will exchange the x and y axis so that the x coordinates of
the polygon are north. Use this when drawing on a map viewed from
above. If the y-axis of the plot is supposed to be z (depth), then use
``xy2ne=False``.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
x, y = numpy.array(pts).T
if xy2ne:
x, y = y, x
kwargs = {}
if label is not None:
kwargs['label'] = label
return pyplot.plot(x, y, style, markersize=size, **kwargs)
def paths(pts1, pts2, style='-k', linewidth=1, label=None):
"""
Plot paths between the two sets of points.
Parameters:
* pts1 : list of lists
List of (x, y) pairs with the coordinates of the points
* pts2 : list of lists
List of (x, y) pairs with the coordinates of the points
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* linewidth : float
The width of the lines representing the paths
* label : str
If not None, then the string that will show in the legend
"""
kwargs = {'linewidth': linewidth}
if label is not None:
kwargs['label'] = label
for p1, p2 in zip(pts1, pts2):
pyplot.plot([p1[0], p2[0]], [p1[1], p2[1]], style, **kwargs)
def layers(thickness, values, style='-k', z0=0., linewidth=1, label=None,
**kwargs):
"""
Plot a series of layers and values associated to each layer.
Parameters:
* thickness : list
The thickness of each layer in order of increasing depth
* values : list
The value associated with each layer in order of increasing
depth
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* z0 : float
The depth of the top of the first layer
* linewidth : float
Line width
* label : str
label associated with the square.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
if len(thickness) != len(values):
raise ValueError("thickness and values must have same length")
nlayers = len(thickness)
interfaces = [z0 + sum(thickness[:i]) for i in xrange(nlayers + 1)]
ys = [interfaces[0]]
for y in interfaces[1:-1]:
ys.append(y)
ys.append(y)
ys.append(interfaces[-1])
xs = []
for x in values:
xs.append(x)
xs.append(x)
kwargs['linewidth'] = linewidth
if label is not None:
kwargs['label'] = label
plot, = pyplot.plot(xs, ys, style, **kwargs)
return plot
def square(area, style='-k', linewidth=1, fill=None, alpha=1., label=None,
xy2ne=False):
"""
Plot a square.
Parameters:
* area : list = [x1, x2, y1, y2]
Borders of the square
* style : str
String with the color and line style (as in matplotlib.pyplot.plot)
* linewidth : float
Line width
* fill : str
A color string used to fill the square. If None, the square is not
filled
* alpha : float
Transparency of the fill (1 >= alpha >= 0). 0 is transparent and 1 is
opaque
* label : str
label associated with the square.
* xy2ne : True or False
If True, will exchange the x and y axis so that the x coordinates of
the polygon are north. Use this when drawing on a map viewed from
above. If the y-axis of the plot is supposed to be z (depth), then use
``xy2ne=False``.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
x1, x2, y1, y2 = area
if xy2ne:
x1, x2, y1, y2 = y1, y2, x1, x2
xs = [x1, x1, x2, x2, x1]
ys = [y1, y2, y2, y1, y1]
kwargs = {'linewidth': linewidth}
if label is not None:
kwargs['label'] = label
plot, = pyplot.plot(xs, ys, style, **kwargs)
if fill is not None:
pyplot.fill(xs, ys, color=fill, alpha=alpha)
return plot
def squaremesh(mesh, prop, cmap=pyplot.cm.jet, vmin=None, vmax=None):
"""
Make a pseudo-color plot of a mesh of squares
Parameters:
* mesh : :class:`fatiando.mesher.SquareMesh` or compatible
The mesh (a compatible mesh must implement the methods ``get_xs`` and
``get_ys``)
* prop : str
The physical property of the squares to use as the color scale.
* cmap : colormap
Color map to be used. (see pyplot.cm module)
* vmin, vmax : float
Saturation values of the colorbar.
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
if prop not in mesh.props:
raise ValueError("Can't plot because 'mesh' doesn't have property '%s'"
% (prop))
xs = mesh.get_xs()
ys = mesh.get_ys()
X, Y = numpy.meshgrid(xs, ys)
V = numpy.reshape(mesh.props[prop], mesh.shape)
plot = pyplot.pcolor(X, Y, V, cmap=cmap, vmin=vmin, vmax=vmax, picker=True)
pyplot.xlim(xs.min(), xs.max())
pyplot.ylim(ys.min(), ys.max())
return plot
def polygon(polygon, style='-k', linewidth=1, fill=None, alpha=1., label=None,
xy2ne=False, linealpha=1.):
"""
Plot a polygon.
Parameters:
* polygon : :class:`fatiando.mesher.Polygon`
The polygon
* style : str
Color and line style string (as in matplotlib.pyplot.plot)
* linewidth : float
Line width
* fill : str
A color string used to fill the polygon. If None, the polygon is not
filled
* alpha : float
Transparency of the fill (1 >= alpha >= 0). 0 is transparent and 1 is
opaque
* linealpha : float
Transparency of the line (1 >= alpha >= 0). 0 is transparent and 1 is
opaque
* label : str
String with the label identifying the polygon in the legend
* xy2ne : True or False
If True, will exchange the x and y axis so that the x coordinates of
the polygon are north. Use this when drawing on a map viewed from
above. If the y-axis of the plot is supposed to be z (depth), then use
``xy2ne=False``.
Returns:
* lines : matplotlib Line object
Line corresponding to the polygon plotted
"""
if xy2ne:
tmpx = [y for y in polygon.y]
tmpx.append(polygon.y[0])
tmpy = [x for x in polygon.x]
tmpy.append(polygon.x[0])
else:
tmpx = [x for x in polygon.x]
tmpx.append(polygon.x[0])
tmpy = [y for y in polygon.y]
tmpy.append(polygon.y[0])
kwargs = {'linewidth': linewidth, 'alpha': linealpha}
if label is not None:
kwargs['label'] = label
line, = pyplot.plot(tmpx, tmpy, style, **kwargs)
if fill is not None:
pyplot.fill(tmpx, tmpy, color=fill, alpha=alpha)
return line
def contour(x, y, v, shape, levels, interp=False, extrapolate=False, color='k',
label=None, clabel=True, style='solid', linewidth=1.0,
basemap=None):
"""
Make a contour plot of the data.
Parameters:
* x, y : array
Arrays with the x and y coordinates of the grid points. If the data is
on a regular grid, then assume x varies first (ie, inner loop), then y.
* v : array
The scalar value assigned to the grid points.
* shape : tuple = (ny, nx)
Shape of the regular grid.
If interpolation is not False, then will use *shape* to grid the data.
* levels : int or list
Number of contours to use or a list with the contour values.
* interp : True or False
Wether or not to interpolate before trying to plot. If data is not on
regular grid, set to True!
* extrapolate : True or False
Wether or not to extrapolate the data when interp=True
* color : str
Color of the contour lines.
* label : str
String with the label of the contour that would show in a legend.
* clabel : True or False
Wether or not to print the numerical value of the contour lines
* style : str
The style of the contour lines. Can be ``'dashed'``, ``'solid'`` or
``'mixed'`` (solid lines for positive contours and dashed for negative)
* linewidth : float
Width of the contour lines
* basemap : mpl_toolkits.basemap.Basemap
If not None, will use this basemap for plotting with a map projection
(see :func:`~fatiando.vis.mpl.basemap` for creating basemaps)
Returns:
* levels : list
List with the values of the contour levels
"""
if style not in ['solid', 'dashed', 'mixed']:
raise ValueError("Invalid contour style %s" % (style))
if x.shape != y.shape != v.shape:
raise ValueError("Input arrays x, y, and v must have same shape!")
if interp:
x, y, v = fatiando.gridder.interp(x, y, v, shape,
extrapolate=extrapolate)
X = numpy.reshape(x, shape)
Y = numpy.reshape(y, shape)
V = numpy.reshape(v, shape)
kwargs = dict(colors=color, picker=True)
if basemap is None:
ct_data = pyplot.contour(X, Y, V, levels, **kwargs)
pyplot.xlim(X.min(), X.max())
pyplot.ylim(Y.min(), Y.max())
else:
lon, lat = basemap(X, Y)
ct_data = basemap.contour(lon, lat, V, levels, **kwargs)
if clabel:
ct_data.clabel(fmt='%g')
if label is not None:
ct_data.collections[0].set_label(label)
if style != 'mixed':
for c in ct_data.collections:
c.set_linestyle(style)
for c in ct_data.collections:
c.set_linewidth(linewidth)
return ct_data.levels
def contourf(x, y, v, shape, levels, interp=False, extrapolate=False,
vmin=None, vmax=None, cmap=pyplot.cm.jet, basemap=None):
"""
Make a filled contour plot of the data.
Parameters:
* x, y : array
Arrays with the x and y coordinates of the grid points. If the data is
on a regular grid, then assume x varies first (ie, inner loop), then y.
* v : array
The scalar value assigned to the grid points.
* shape : tuple = (ny, nx)
Shape of the regular grid.
If interpolation is not False, then will use *shape* to grid the data.
* levels : int or list
Number of contours to use or a list with the contour values.
* interp : True or False
Wether or not to interpolate before trying to plot. If data is not on
regular grid, set to True!
* extrapolate : True or False
Wether or not to extrapolate the data when interp=True
* vmin, vmax
Saturation values of the colorbar. If provided, will overwrite what is
set by *levels*.
* cmap : colormap
Color map to be used. (see pyplot.cm module)
* basemap : mpl_toolkits.basemap.Basemap
If not None, will use this basemap for plotting with a map projection
(see :func:`~fatiando.vis.mpl.basemap` for creating basemaps)
Returns:
* levels : list
List with the values of the contour levels
"""
if x.shape != y.shape != v.shape:
raise ValueError("Input arrays x, y, and v must have same shape!")
if interp:
x, y, v = fatiando.gridder.interp(x, y, v, shape,
extrapolate=extrapolate)
X = numpy.reshape(x, shape)
Y = numpy.reshape(y, shape)
V = numpy.reshape(v, shape)
kwargs = dict(vmin=vmin, vmax=vmax, cmap=cmap, picker=True)
if basemap is None:
ct_data = pyplot.contourf(X, Y, V, levels, **kwargs)
pyplot.xlim(X.min(), X.max())
pyplot.ylim(Y.min(), Y.max())
else:
lon, lat = basemap(X, Y)
ct_data = basemap.contourf(lon, lat, V, levels, **kwargs)
return ct_data.levels
def pcolor(x, y, v, shape, interp=False, extrapolate=False, cmap=pyplot.cm.jet,
vmin=None, vmax=None, basemap=None):
"""
Make a pseudo-color plot of the data.
Parameters:
* x, y : array
Arrays with the x and y coordinates of the grid points. If the data is
on a regular grid, then assume x varies first (ie, inner loop), then y.
* v : array
The scalar value assigned to the grid points.
* shape : tuple = (ny, nx)
Shape of the regular grid.
If interpolation is not False, then will use *shape* to grid the data.
* interp : True or False
Wether or not to interpolate before trying to plot. If data is not on
regular grid, set to True!
* extrapolate : True or False
Wether or not to extrapolate the data when interp=True
* cmap : colormap
Color map to be used. (see pyplot.cm module)
* vmin, vmax
Saturation values of the colorbar.
* basemap : mpl_toolkits.basemap.Basemap
If not None, will use this basemap for plotting with a map projection
(see :func:`~fatiando.vis.mpl.basemap` for creating basemaps)
Returns:
* axes : ``matplitlib.axes``
The axes element of the plot
"""
if x.shape != y.shape != v.shape:
raise ValueError("Input arrays x, y, and v must have same shape!")
if vmin is None:
vmin = v.min()
if vmax is None:
vmax = v.max()
if interp:
x, y, v = fatiando.gridder.interp(x, y, v, shape,
extrapolate=extrapolate)
X = numpy.reshape(x, shape)
Y = numpy.reshape(y, shape)
V = numpy.reshape(v, shape)
if basemap is None:
plot = pyplot.pcolor(X, Y, V, cmap=cmap, vmin=vmin, vmax=vmax,
picker=True)
pyplot.xlim(X.min(), X.max())
pyplot.ylim(Y.min(), Y.max())
else:
lon, lat = basemap(X, Y)
plot = basemap.pcolor(lon, lat, V, cmap=cmap, vmin=vmin, vmax=vmax,
picker=True)
return plot
def seismic_wiggle(section, dt=0.004, ranges=None, scale=1.,
color='k', normalize=False):
"""
Plot a seismic section (numpy 2D array matrix) as wiggles.
Parameters:
* section : 2D array
matrix of traces (first dimension time, second dimension traces)
* dt : float
sample rate in seconds (default 4 ms)
* ranges : (x1, x2)
min and max horizontal values (default trace number)
* scale : float
scale factor multiplied by the section values before plotting
* color : tuple of strings
Color for filling the wiggle, positive and negative lobes.
* normalize :
True to normalizes all trace in the section using global max/min
data will be in the range (-0.5, 0.5) zero centered
.. warning::
Slow for more than 200 traces, in this case decimate your
data or use ``seismic_image``.
"""
npts, ntraces = section.shape # time/traces
if ntraces < 1:
raise IndexError("Nothing to plot")
if npts < 1:
raise IndexError("Nothing to plot")
t = numpy.linspace(0, dt*npts, npts)
amp = 1. # normalization factor
gmin = 0. # global minimum
toffset = 0. # offset in time to make 0 centered
if normalize:
gmax = section.max()
gmin = section.min()
amp = (gmax-gmin)
toffset = 0.5
pyplot.ylim(max(t), 0)
if ranges is None:
ranges = (0, ntraces)
x0, x1 = ranges
# horizontal increment
dx = float((x1-x0)/ntraces)
pyplot.xlim(x0, x1)
for i, trace in enumerate(section.transpose()):
tr = (((trace-gmin)/amp)-toffset)*scale*dx
x = x0+i*dx # x positon for this trace
pyplot.plot(x+tr, t, 'k')
pyplot.fill_betweenx(t, x+tr, x, tr > 0, color=color)
def seismic_image(section, dt=0.004, ranges=None, cmap=pyplot.cm.gray,
aspect=None, vmin=None, vmax=None):
"""
Plot a seismic section (numpy 2D array matrix) as an image.
Parameters:
* section : 2D array
matrix of traces (first dimension time, second dimension traces)
* dt : float
sample rate in seconds (default 4 ms)
* ranges : (x1, x2)
min and max horizontal values (default trace number)
* cmap : colormap
color map to be used. (see pyplot.cm module)
* aspect : float
matplotlib imshow aspect parameter, ratio between axes
* vmin, vmax : float
min and max values for imshow
"""
npts, maxtraces = section.shape # time/traces
if maxtraces < 1:
raise IndexError("Nothing to plot")
if npts < 1:
raise IndexError("Nothing to plot")
t = numpy.linspace(0, dt*npts, npts)
data = section
if ranges is None:
ranges = (0, maxtraces)
x0, x1 = ranges
extent = (x0, x1, t[-1:], t[0])
if aspect is None: # guarantee a rectangular picture
aspect = numpy.round((x1-x0)/numpy.max(t))
aspect -= aspect*0.2
pyplot.imshow(data, aspect=aspect, cmap=cmap, origin='upper',
extent=extent, vmin=vmin, vmax=vmax)
| bsd-3-clause |
mattsep/TDSE | src/animate.py | 1 | 1444 | import scipy as sp
import matplotlib.pyplot as plt
import matplotlib.animation as animation
# animation of the probability density of the wavefunction over the course
# of time
def probabilityDensity(x, t, V, psi):
# convert to the probability density
Nt = len(t)
rho = sp.real(sp.conjugate(psi)*psi)
# set the first frame properties and grab the line handles
fig, ax = plt.subplots()
line1, line2, line3, line4 = ax.plot(x, rho[:,1], 'k',
x, sp.real(psi[:,1]), 'b:',
x, sp.imag(psi[:,1]), 'r:',
x, V, 'm--',
linewidth=2.0)
ax.set_xlabel("Position")
ax.set_ylabel("Probability Density")
ax.set_ylim([-rho.max(), rho.max()])
ax.set_xlim([min(x), max(x)])
# the animation function, to be called repeatedly
def animate(i):
# set the new data each frame
line1.set_ydata(rho[:,i])
line2.set_ydata(sp.real(psi[:,i]))
line3.set_ydata(sp.imag(psi[:,i]))
return line1, line2, line3
# the initialization function, useful when blit=True
def init():
line1.set_ydata(sp.ma.array(x, mask=True))
line2.set_ydata(sp.ma.array(x, mask=True))
line3.set_ydata(sp.ma.array(x, mask=True))
return line1, line2, line3
# perform the animation
ani = animation.FuncAnimation(fig, animate, sp.arange(1,Nt),
init_func=init, interval=25, blit=True)
plt.show()
| gpl-3.0 |
jgoppert/pymola | test/xml_test.py | 1 | 4050 | #!/usr/bin/env python
"""
Test XML backend
"""
import os
import sys
import time
import unittest
import pymoca.parser as mo_parser
from pymoca.backends.xml import analysis, generator, sim_scipy
from pymoca.backends.xml import parser as xml_parser
# get matplotlib from analysis, since logic for plotting
# without display already handled there
from pymoca.backends.xml.analysis import plt
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
MODEL_DIR = os.path.join(TEST_DIR, 'models')
GENERATED_DIR = os.path.join(TEST_DIR, 'generated')
class XmlTest(unittest.TestCase):
"""
Xml tests
"""
def setUp(self):
pass
def tearDown(self):
pass
@staticmethod
def flush():
sys.stdout.flush()
sys.stdout.flush()
time.sleep(0.01)
def test_noise(self):
# compile to ModelicaXML
with open(os.path.join(MODEL_DIR, 'Noise.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
model_xml = generator.generate(ast_tree, 'Noise')
# save xml model to disk
with open(os.path.join(GENERATED_DIR, 'Noise.xml'), 'w') as f:
f.write(model_xml)
# load xml model
model = xml_parser.parse(model_xml, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 1, 'dt': 0.001, 'verbose': True})
# plot
analysis.plot(data, fields=['x', 'm'])
plt.draw()
plt.pause(0.1)
plt.close()
def test_simple_circuit(self):
# compile to ModelicaXML
with open(os.path.join(MODEL_DIR, 'SimpleCircuit.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
model_xml = generator.generate(ast_tree, 'SimpleCircuit')
# save xml model to disk
with open(os.path.join(GENERATED_DIR, 'SimpleCircuit.xml'), 'w') as f:
f.write(model_xml)
# load xml model
model = xml_parser.parse(model_xml, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 1, 'dt': 0.001, 'verbose': True})
# plot
analysis.plot(data, fields=['x', 'c', 'm'])
plt.draw()
plt.pause(0.1)
plt.close()
def test_bouncing_ball(self):
# generate
with open(os.path.join(MODEL_DIR, 'BouncingBall.mo'), 'r') as f:
txt = f.read()
ast_tree = mo_parser.parse(txt)
generator.generate(ast_tree, 'BouncingBall')
# parse
example_file = os.path.join(
MODEL_DIR, 'bouncing-ball.xml')
model = xml_parser.parse_file(example_file, verbose=False)
print(model)
# convert to ode
model_ode = model.to_ode() # type: model.HybridOde
model_ode.prop['x']['start'] = 1
print(model_ode)
# simulate
data = sim_scipy.sim(model_ode, {'tf': 3.5, 'dt': 0.01, 'verbose': True})
# plot
analysis.plot(data, linewidth=0.5, marker='.', markersize=0.5)
plt.draw()
plt.pause(0.1)
plt.close()
# simulate in soft real-time
do_realtime = False
if do_realtime:
print('\nsoft-realtime simulation')
time_start = time.time()
def realtime_callback(t, x, y, m, p, c):
t_real = time.time() - time_start
lag = t_real - t
if abs(lag) > 0.1:
print("real: {:10f} > sim: {:10f}, lag: {:10f}".format(t_real, t, lag))
elif lag < 0:
time.sleep(-lag)
data = sim_scipy.sim(model_ode, {'tf': 3.5, 'dt': 0.01, 'verbose': True},
user_callback=realtime_callback)
# plt.gca().set_ylim(-2, 2)
self.flush()
| bsd-3-clause |
trankmichael/scipy | scipy/signal/waveforms.py | 64 | 14818 | # Author: Travis Oliphant
# 2003
#
# Feb. 2010: Updated by Warren Weckesser:
# Rewrote much of chirp()
# Added sweep_poly()
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \
exp, cos, sin, polyval, polyint
__all__ = ['sawtooth', 'square', 'gausspulse', 'chirp', 'sweep_poly']
def sawtooth(t, width=1):
"""
Return a periodic sawtooth or triangle waveform.
The sawtooth waveform has a period ``2*pi``, rises from -1 to 1 on the
interval 0 to ``width*2*pi``, then drops from 1 to -1 on the interval
``width*2*pi`` to ``2*pi``. `width` must be in the interval [0, 1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
Time.
width : array_like, optional
Width of the rising ramp as a proportion of the total cycle.
Default is 1, producing a rising ramp, while 0 produces a falling
ramp. `width` = 0.5 produces a triangle wave.
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the sawtooth waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500)
>>> plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))
"""
t, w = asarray(t), asarray(width)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# take t modulo 2*pi
tmod = mod(t, 2 * pi)
# on the interval 0 to width*2*pi function is
# tmod / (pi*w) - 1
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
tsub = extract(mask2, tmod)
wsub = extract(mask2, w)
place(y, mask2, tsub / (pi * wsub) - 1)
# on the interval width*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
tsub = extract(mask3, tmod)
wsub = extract(mask3, w)
place(y, mask3, (pi * (wsub + 1) - tsub) / (pi * (1 - wsub)))
return y
def square(t, duty=0.5):
"""
Return a periodic square-wave waveform.
The square wave has a period ``2*pi``, has value +1 from 0 to
``2*pi*duty`` and -1 from ``2*pi*duty`` to ``2*pi``. `duty` must be in
the interval [0,1].
Note that this is not band-limited. It produces an infinite number
of harmonics, which are aliased back and forth across the frequency
spectrum.
Parameters
----------
t : array_like
The input time array.
duty : array_like, optional
Duty cycle. Default is 0.5 (50% duty cycle).
If an array, causes wave shape to change over time, and must be the
same length as t.
Returns
-------
y : ndarray
Output array containing the square waveform.
Examples
--------
A 5 Hz waveform sampled at 500 Hz for 1 second:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(0, 1, 500, endpoint=False)
>>> plt.plot(t, signal.square(2 * np.pi * 5 * t))
>>> plt.ylim(-2, 2)
A pulse-width modulated sine wave:
>>> plt.figure()
>>> sig = np.sin(2 * np.pi * t)
>>> pwm = signal.square(2 * np.pi * 30 * t, duty=(sig + 1)/2)
>>> plt.subplot(2, 1, 1)
>>> plt.plot(t, sig)
>>> plt.subplot(2, 1, 2)
>>> plt.plot(t, pwm)
>>> plt.ylim(-1.5, 1.5)
"""
t, w = asarray(t), asarray(duty)
w = asarray(w + (t - t))
t = asarray(t + (w - w))
if t.dtype.char in ['fFdD']:
ytype = t.dtype.char
else:
ytype = 'd'
y = zeros(t.shape, ytype)
# width must be between 0 and 1 inclusive
mask1 = (w > 1) | (w < 0)
place(y, mask1, nan)
# on the interval 0 to duty*2*pi function is 1
tmod = mod(t, 2 * pi)
mask2 = (1 - mask1) & (tmod < w * 2 * pi)
place(y, mask2, 1)
# on the interval duty*2*pi to 2*pi function is
# (pi*(w+1)-tmod) / (pi*(1-w))
mask3 = (1 - mask1) & (1 - mask2)
place(y, mask3, -1)
return y
def gausspulse(t, fc=1000, bw=0.5, bwr=-6, tpr=-60, retquad=False,
retenv=False):
"""
Return a Gaussian modulated sinusoid:
``exp(-a t^2) exp(1j*2*pi*fc*t).``
If `retquad` is True, then return the real and imaginary parts
(in-phase and quadrature).
If `retenv` is True, then return the envelope (unmodulated signal).
Otherwise, return the real part of the modulated sinusoid.
Parameters
----------
t : ndarray or the string 'cutoff'
Input array.
fc : int, optional
Center frequency (e.g. Hz). Default is 1000.
bw : float, optional
Fractional bandwidth in frequency domain of pulse (e.g. Hz).
Default is 0.5.
bwr : float, optional
Reference level at which fractional bandwidth is calculated (dB).
Default is -6.
tpr : float, optional
If `t` is 'cutoff', then the function returns the cutoff
time for when the pulse amplitude falls below `tpr` (in dB).
Default is -60.
retquad : bool, optional
If True, return the quadrature (imaginary) as well as the real part
of the signal. Default is False.
retenv : bool, optional
If True, return the envelope of the signal. Default is False.
Returns
-------
yI : ndarray
Real part of signal. Always returned.
yQ : ndarray
Imaginary part of signal. Only returned if `retquad` is True.
yenv : ndarray
Envelope of signal. Only returned if `retenv` is True.
See Also
--------
scipy.signal.morlet
Examples
--------
Plot real component, imaginary component, and envelope for a 5 Hz pulse,
sampled at 100 Hz for 2 seconds:
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> t = np.linspace(-1, 1, 2 * 100, endpoint=False)
>>> i, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)
>>> plt.plot(t, i, t, q, t, e, '--')
"""
if fc < 0:
raise ValueError("Center frequency (fc=%.2f) must be >=0." % fc)
if bw <= 0:
raise ValueError("Fractional bandwidth (bw=%.2f) must be > 0." % bw)
if bwr >= 0:
raise ValueError("Reference level for bandwidth (bwr=%.2f) must "
"be < 0 dB" % bwr)
# exp(-a t^2) <-> sqrt(pi/a) exp(-pi^2/a * f^2) = g(f)
ref = pow(10.0, bwr / 20.0)
# fdel = fc*bw/2: g(fdel) = ref --- solve this for a
#
# pi^2/a * fc^2 * bw^2 /4=-log(ref)
a = -(pi * fc * bw) ** 2 / (4.0 * log(ref))
if t == 'cutoff': # compute cut_off point
# Solve exp(-a tc**2) = tref for tc
# tc = sqrt(-log(tref) / a) where tref = 10^(tpr/20)
if tpr >= 0:
raise ValueError("Reference level for time cutoff must be < 0 dB")
tref = pow(10.0, tpr / 20.0)
return sqrt(-log(tref) / a)
yenv = exp(-a * t * t)
yI = yenv * cos(2 * pi * fc * t)
yQ = yenv * sin(2 * pi * fc * t)
if not retquad and not retenv:
return yI
if not retquad and retenv:
return yI, yenv
if retquad and not retenv:
return yI, yQ
if retquad and retenv:
return yI, yQ, yenv
def chirp(t, f0, t1, f1, method='linear', phi=0, vertex_zero=True):
"""Frequency-swept cosine generator.
In the following, 'Hz' should be interpreted as 'cycles per unit';
there is no requirement here that the unit is one second. The
important distinction is that the units of rotation are cycles, not
radians. Likewise, `t` could be a measurement of space instead of time.
Parameters
----------
t : array_like
Times at which to evaluate the waveform.
f0 : float
Frequency (e.g. Hz) at time t=0.
t1 : float
Time at which `f1` is specified.
f1 : float
Frequency (e.g. Hz) of the waveform at time `t1`.
method : {'linear', 'quadratic', 'logarithmic', 'hyperbolic'}, optional
Kind of frequency sweep. If not given, `linear` is assumed. See
Notes below for more details.
phi : float, optional
Phase offset, in degrees. Default is 0.
vertex_zero : bool, optional
This parameter is only used when `method` is 'quadratic'.
It determines whether the vertex of the parabola that is the graph
of the frequency is at t=0 or t=t1.
Returns
-------
y : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)`` where `phase` is the integral
(from 0 to `t`) of ``2*pi*f(t)``. ``f(t)`` is defined below.
See Also
--------
sweep_poly
Notes
-----
There are four options for the `method`. The following formulas give
the instantaneous frequency (in Hz) of the signal generated by
`chirp()`. For convenience, the shorter names shown below may also be
used.
linear, lin, li:
``f(t) = f0 + (f1 - f0) * t / t1``
quadratic, quad, q:
The graph of the frequency f(t) is a parabola through (0, f0) and
(t1, f1). By default, the vertex of the parabola is at (0, f0).
If `vertex_zero` is False, then the vertex is at (t1, f1). The
formula is:
if vertex_zero is True:
``f(t) = f0 + (f1 - f0) * t**2 / t1**2``
else:
``f(t) = f1 - (f1 - f0) * (t1 - t)**2 / t1**2``
To use a more general quadratic function, or an arbitrary
polynomial, use the function `scipy.signal.waveforms.sweep_poly`.
logarithmic, log, lo:
``f(t) = f0 * (f1/f0)**(t/t1)``
f0 and f1 must be nonzero and have the same sign.
This signal is also known as a geometric or exponential chirp.
hyperbolic, hyp:
``f(t) = f0*f1*t1 / ((f0 - f1)*t + f1*t1)``
f0 and f1 must be nonzero.
"""
# 'phase' is computed in _chirp_phase, to make testing easier.
phase = _chirp_phase(t, f0, t1, f1, method, vertex_zero)
# Convert phi to radians.
phi *= pi / 180
return cos(phase + phi)
def _chirp_phase(t, f0, t1, f1, method='linear', vertex_zero=True):
"""
Calculate the phase used by chirp_phase to generate its output.
See `chirp_phase` for a description of the arguments.
"""
t = asarray(t)
f0 = float(f0)
t1 = float(t1)
f1 = float(f1)
if method in ['linear', 'lin', 'li']:
beta = (f1 - f0) / t1
phase = 2 * pi * (f0 * t + 0.5 * beta * t * t)
elif method in ['quadratic', 'quad', 'q']:
beta = (f1 - f0) / (t1 ** 2)
if vertex_zero:
phase = 2 * pi * (f0 * t + beta * t ** 3 / 3)
else:
phase = 2 * pi * (f1 * t + beta * ((t1 - t) ** 3 - t1 ** 3) / 3)
elif method in ['logarithmic', 'log', 'lo']:
if f0 * f1 <= 0.0:
raise ValueError("For a logarithmic chirp, f0 and f1 must be "
"nonzero and have the same sign.")
if f0 == f1:
phase = 2 * pi * f0 * t
else:
beta = t1 / log(f1 / f0)
phase = 2 * pi * beta * f0 * (pow(f1 / f0, t / t1) - 1.0)
elif method in ['hyperbolic', 'hyp']:
if f0 == 0 or f1 == 0:
raise ValueError("For a hyperbolic chirp, f0 and f1 must be "
"nonzero.")
if f0 == f1:
# Degenerate case: constant frequency.
phase = 2 * pi * f0 * t
else:
# Singular point: the instantaneous frequency blows up
# when t == sing.
sing = -f1 * t1 / (f0 - f1)
phase = 2 * pi * (-sing * f0) * log(np.abs(1 - t/sing))
else:
raise ValueError("method must be 'linear', 'quadratic', 'logarithmic',"
" or 'hyperbolic', but a value of %r was given."
% method)
return phase
def sweep_poly(t, poly, phi=0):
"""
Frequency-swept cosine generator, with a time-dependent frequency.
This function generates a sinusoidal function whose instantaneous
frequency varies with time. The frequency at time `t` is given by
the polynomial `poly`.
Parameters
----------
t : ndarray
Times at which to evaluate the waveform.
poly : 1-D array_like or instance of numpy.poly1d
The desired frequency expressed as a polynomial. If `poly` is
a list or ndarray of length n, then the elements of `poly` are
the coefficients of the polynomial, and the instantaneous
frequency is
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of numpy.poly1d, then the
instantaneous frequency is
``f(t) = poly(t)``
phi : float, optional
Phase offset, in degrees, Default: 0.
Returns
-------
sweep_poly : ndarray
A numpy array containing the signal evaluated at `t` with the
requested time-varying frequency. More precisely, the function
returns ``cos(phase + (pi/180)*phi)``, where `phase` is the integral
(from 0 to t) of ``2 * pi * f(t)``; ``f(t)`` is defined above.
See Also
--------
chirp
Notes
-----
.. versionadded:: 0.8.0
If `poly` is a list or ndarray of length `n`, then the elements of
`poly` are the coefficients of the polynomial, and the instantaneous
frequency is:
``f(t) = poly[0]*t**(n-1) + poly[1]*t**(n-2) + ... + poly[n-1]``
If `poly` is an instance of `numpy.poly1d`, then the instantaneous
frequency is:
``f(t) = poly(t)``
Finally, the output `s` is:
``cos(phase + (pi/180)*phi)``
where `phase` is the integral from 0 to `t` of ``2 * pi * f(t)``,
``f(t)`` as defined above.
"""
# 'phase' is computed in _sweep_poly_phase, to make testing easier.
phase = _sweep_poly_phase(t, poly)
# Convert to radians.
phi *= pi / 180
return cos(phase + phi)
def _sweep_poly_phase(t, poly):
"""
Calculate the phase used by sweep_poly to generate its output.
See `sweep_poly` for a description of the arguments.
"""
# polyint handles lists, ndarrays and instances of poly1d automatically.
intpoly = polyint(poly)
phase = 2 * pi * polyval(intpoly, t)
return phase
| bsd-3-clause |
trachelr/mne-python | examples/preprocessing/plot_find_ecg_artifacts.py | 19 | 1304 | """
==================
Find ECG artifacts
==================
Locate QRS component of ECG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
# Setup for reading the raw data
raw = io.Raw(raw_fname)
event_id = 999
ecg_events, _, _ = mne.preprocessing.find_ecg_events(raw, event_id,
ch_name='MEG 1531')
# Read epochs
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False, eog=False,
include=['MEG 1531'], exclude='bads')
tmin, tmax = -0.1, 0.1
epochs = mne.Epochs(raw, ecg_events, event_id, tmin, tmax, picks=picks,
proj=False)
data = epochs.get_data()
print("Number of detected ECG artifacts : %d" % len(data))
###############################################################################
# Plot ECG artifacts
plt.plot(1e3 * epochs.times, np.squeeze(data).T)
plt.xlabel('Times (ms)')
plt.ylabel('ECG')
plt.show()
| bsd-3-clause |
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated | python-packages/mne-python-0.10/mne/fixes.py | 1 | 29568 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fixe is no longer needed.
# XXX : copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <fpedregosa@acm.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD
from __future__ import division
import collections
from operator import itemgetter
import inspect
import warnings
import numpy as np
import scipy
from scipy import linalg, sparse
from math import ceil, log
from numpy.fft import irfft
from distutils.version import LooseVersion
from functools import partial
from .externals import six
from .externals.six.moves import copyreg, xrange
from gzip import GzipFile
###############################################################################
# Misc
class gzip_open(GzipFile): # python2.6 doesn't have context managing
def __enter__(self):
if hasattr(GzipFile, '__enter__'):
return GzipFile.__enter__(self)
else:
return self
def __exit__(self, exc_type, exc_value, traceback):
if hasattr(GzipFile, '__exit__'):
return GzipFile.__exit__(self, exc_type, exc_value, traceback)
else:
return self.close()
class _Counter(collections.defaultdict):
"""Partial replacement for Python 2.7 collections.Counter."""
def __init__(self, iterable=(), **kwargs):
super(_Counter, self).__init__(int, **kwargs)
self.update(iterable)
def most_common(self):
return sorted(six.iteritems(self), key=itemgetter(1), reverse=True)
def update(self, other):
"""Adds counts for elements in other"""
if isinstance(other, self.__class__):
for x, n in six.iteritems(other):
self[x] += n
else:
for x in other:
self[x] += 1
try:
Counter = collections.Counter
except AttributeError:
Counter = _Counter
def _unique(ar, return_index=False, return_inverse=False):
"""A replacement for the np.unique that appeared in numpy 1.4.
While np.unique existed long before, keyword return_inverse was
only added in 1.4.
"""
try:
ar = ar.flatten()
except AttributeError:
if not return_inverse and not return_index:
items = sorted(set(ar))
return np.asarray(items)
else:
ar = np.asarray(ar).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
elif return_inverse or return_index:
return ar, np.empty(0, np.bool)
else:
return ar
if return_inverse or return_index:
perm = ar.argsort()
aux = ar[perm]
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if return_inverse:
iflag = np.cumsum(flag) - 1
iperm = perm.argsort()
if return_index:
return aux[flag], perm[flag], iflag[iperm]
else:
return aux[flag], iflag[iperm]
else:
return aux[flag], perm[flag]
else:
ar.sort()
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
if LooseVersion(np.__version__) < LooseVersion('1.5'):
unique = _unique
else:
unique = np.unique
def _bincount(X, weights=None, minlength=None):
"""Replacing np.bincount in numpy < 1.6 to provide minlength."""
result = np.bincount(X, weights)
if minlength is None or len(result) >= minlength:
return result
out = np.zeros(minlength, np.int)
out[:len(result)] = result
return out
if LooseVersion(np.__version__) < LooseVersion('1.6'):
bincount = _bincount
else:
bincount = np.bincount
def _copysign(x1, x2):
"""Slow replacement for np.copysign, which was introduced in numpy 1.4"""
return np.abs(x1) * np.sign(x2)
if not hasattr(np, 'copysign'):
copysign = _copysign
else:
copysign = np.copysign
def _in1d(ar1, ar2, assume_unique=False, invert=False):
"""Replacement for in1d that is provided for numpy >= 1.4"""
# Ravel both arrays, behavior for the first array could be different
ar1 = np.asarray(ar1).ravel()
ar2 = np.asarray(ar2).ravel()
# This code is significantly faster when the condition is satisfied.
if len(ar2) < 10 * len(ar1) ** 0.145:
if invert:
mask = np.ones(len(ar1), dtype=np.bool)
for a in ar2:
mask &= (ar1 != a)
else:
mask = np.zeros(len(ar1), dtype=np.bool)
for a in ar2:
mask |= (ar1 == a)
return mask
# Otherwise use sorting
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = np.unique(ar2)
ar = np.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
if not hasattr(np, 'in1d') or LooseVersion(np.__version__) < '1.8':
in1d = _in1d
else:
in1d = np.in1d
def _digitize(x, bins, right=False):
"""Replacement for digitize with right kwarg (numpy < 1.7).
Notes
-----
This fix is only meant for integer arrays. If ``right==True`` but either
``x`` or ``bins`` are of a different type, a NotImplementedError will be
raised.
"""
if right:
x = np.asarray(x)
bins = np.asarray(bins)
if (x.dtype.kind not in 'ui') or (bins.dtype.kind not in 'ui'):
raise NotImplementedError("Only implemented for integer input")
return np.digitize(x - 1e-5, bins)
else:
return np.digitize(x, bins)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
digitize = _digitize
else:
digitize = np.digitize
def _tril_indices(n, k=0):
"""Replacement for tril_indices that is provided for numpy >= 1.4"""
mask = np.greater_equal(np.subtract.outer(np.arange(n), np.arange(n)), -k)
indices = np.where(mask)
return indices
if not hasattr(np, 'tril_indices'):
tril_indices = _tril_indices
else:
tril_indices = np.tril_indices
def _unravel_index(indices, dims):
"""Add support for multiple indices in unravel_index that is provided
for numpy >= 1.4"""
indices_arr = np.asarray(indices)
if indices_arr.size == 1:
return np.unravel_index(indices, dims)
else:
if indices_arr.ndim != 1:
raise ValueError('indices should be one dimensional')
ndims = len(dims)
unraveled_coords = np.empty((indices_arr.size, ndims), dtype=np.int)
for coord, idx in zip(unraveled_coords, indices_arr):
coord[:] = np.unravel_index(idx, dims)
return tuple(unraveled_coords.T)
if LooseVersion(np.__version__) < LooseVersion('1.4'):
unravel_index = _unravel_index
else:
unravel_index = np.unravel_index
def _qr_economic_old(A, **kwargs):
"""
Compat function for the QR-decomposition in economic mode
Scipy 0.9 changed the keyword econ=True to mode='economic'
"""
with warnings.catch_warnings(record=True):
return linalg.qr(A, econ=True, **kwargs)
def _qr_economic_new(A, **kwargs):
return linalg.qr(A, mode='economic', **kwargs)
if LooseVersion(scipy.__version__) < LooseVersion('0.9'):
qr_economic = _qr_economic_old
else:
qr_economic = _qr_economic_new
def savemat(file_name, mdict, oned_as="column", **kwargs):
"""MATLAB-format output routine that is compatible with SciPy 0.7's.
0.7.2 (or .1?) added the oned_as keyword arg with 'column' as the default
value. It issues a warning if this is not provided, stating that "This will
change to 'row' in future versions."
"""
import scipy.io
try:
return scipy.io.savemat(file_name, mdict, oned_as=oned_as, **kwargs)
except TypeError:
return scipy.io.savemat(file_name, mdict, **kwargs)
if hasattr(np, 'count_nonzero'):
from numpy import count_nonzero
else:
def count_nonzero(X):
return len(np.flatnonzero(X))
# little dance to see if np.copy has an 'order' keyword argument
if 'order' in inspect.getargspec(np.copy)[0]:
def safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
else:
# Before an 'order' argument was introduced, numpy wouldn't muck with
# the ordering
safe_copy = np.copy
def _meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
Make N-D coordinate arrays for vectorized evaluations of
N-D scalar/vector fields over N-D grids, given
one-dimensional coordinate arrays x1, x2,..., xn.
.. versionchanged:: 1.9
1-D and 0-D cases are allowed.
Parameters
----------
x1, x2,..., xn : array_like
1-D arrays representing the coordinates of a grid.
indexing : {'xy', 'ij'}, optional
Cartesian ('xy', default) or matrix ('ij') indexing of output.
See Notes for more details.
.. versionadded:: 1.7.0
sparse : bool, optional
If True a sparse grid is returned in order to conserve memory.
Default is False.
.. versionadded:: 1.7.0
copy : bool, optional
If False, a view into the original arrays are returned in order to
conserve memory. Default is True. Please note that
``sparse=False, copy=False`` will likely return non-contiguous
arrays. Furthermore, more than one element of a broadcast array
may refer to a single memory location. If you need to write to the
arrays, make copies first.
.. versionadded:: 1.7.0
Returns
-------
X1, X2,..., XN : ndarray
For vectors `x1`, `x2`,..., 'xn' with lengths ``Ni=len(xi)`` ,
return ``(N1, N2, N3,...Nn)`` shaped arrays if indexing='ij'
or ``(N2, N1, N3,...Nn)`` shaped arrays if indexing='xy'
with the elements of `xi` repeated to fill the matrix along
the first dimension for `x1`, the second for `x2` and so on.
"""
ndim = len(xi)
copy_ = kwargs.pop('copy', True)
sparse = kwargs.pop('sparse', False)
indexing = kwargs.pop('indexing', 'xy')
if kwargs:
raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
% (list(kwargs)[0],))
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
s0 = (1,) * ndim
output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1::])
for i, x in enumerate(xi)]
shape = [x.size for x in output]
if indexing == 'xy' and ndim > 1:
# switch first and second axis
output[0].shape = (1, -1) + (1,) * (ndim - 2)
output[1].shape = (-1, 1) + (1,) * (ndim - 2)
shape[0], shape[1] = shape[1], shape[0]
if sparse:
if copy_:
return [x.copy() for x in output]
else:
return output
else:
# Return the full N-D matrix (not only the 1-D vector)
if copy_:
mult_fact = np.ones(shape, dtype=int)
return [x * mult_fact for x in output]
else:
return np.broadcast_arrays(*output)
if LooseVersion(np.__version__) < LooseVersion('1.7'):
meshgrid = _meshgrid
else:
meshgrid = np.meshgrid
###############################################################################
# Back porting firwin2 for older scipy
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def _firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Examples
--------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0]) # doctest: +SKIP
>>> print(taps[72:78]) # doctest: +SKIP
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s'
% (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps, 2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq) - 1 and freq[k] == freq[k + 1]:
freq[k] = freq[k] - eps
freq[k + 1] = freq[k + 1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps - 1) / 2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from scipy.signal.signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def get_firwin2():
"""Helper to get firwin2"""
try:
from scipy.signal import firwin2
except ImportError:
firwin2 = _firwin2
return firwin2
def _filtfilt(*args, **kwargs):
"""wrap filtfilt, excluding padding arguments"""
from scipy.signal import filtfilt
# cut out filter args
if len(args) > 4:
args = args[:4]
if 'padlen' in kwargs:
del kwargs['padlen']
return filtfilt(*args, **kwargs)
def get_filtfilt():
"""Helper to get filtfilt from scipy"""
from scipy.signal import filtfilt
if 'padlen' in inspect.getargspec(filtfilt)[0]:
return filtfilt
return _filtfilt
def _get_argrelmax():
try:
from scipy.signal import argrelmax
except ImportError:
argrelmax = _argrelmax
return argrelmax
def _argrelmax(data, axis=0, order=1, mode='clip'):
"""Calculate the relative maxima of `data`.
Parameters
----------
data : ndarray
Array in which to find the relative maxima.
axis : int, optional
Axis over which to select from `data`. Default is 0.
order : int, optional
How many points on each side to use for the comparison
to consider ``comparator(n, n+x)`` to be True.
mode : str, optional
How the edges of the vector are treated.
Available options are 'wrap' (wrap around) or 'clip' (treat overflow
as the same as the last (or first) element).
Default 'clip'. See `numpy.take`.
Returns
-------
extrema : tuple of ndarrays
Indices of the maxima in arrays of integers. ``extrema[k]`` is
the array of indices of axis `k` of `data`. Note that the
return value is a tuple even when `data` is one-dimensional.
"""
comparator = np.greater
if((int(order) != order) or (order < 1)):
raise ValueError('Order must be an int >= 1')
datalen = data.shape[axis]
locs = np.arange(0, datalen)
results = np.ones(data.shape, dtype=bool)
main = data.take(locs, axis=axis, mode=mode)
for shift in xrange(1, order + 1):
plus = data.take(locs + shift, axis=axis, mode=mode)
minus = data.take(locs - shift, axis=axis, mode=mode)
results &= comparator(main, plus)
results &= comparator(main, minus)
if(~results.any()):
return results
return np.where(results)
###############################################################################
# Back porting matrix_rank for numpy < 1.7
def _matrix_rank(M, tol=None):
""" Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that
are greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for
linear least squares [2].
This default threshold is designed to detect rank deficiency accounting
for the numerical errors of the SVD computation. Imagine that there is a
column in `M` that is an exact (in floating point) linear combination of
other columns in `M`. Computing the SVD on `M` will not produce a
singular value exactly equal to 0 in general: any difference of the
smallest SVD value from 0 will be caused by numerical imprecision in the
calculation of the SVD. Our threshold for small SVD values takes this
numerical imprecision into account, and the default threshold will detect
such numerical rank deficiency. The threshold may declare a matrix `M`
rank deficient even if the linear combination of some columns of `M` is
not exactly equal to another column of `M` but only numerically very
close to another column of `M`.
We chose our default threshold because it is in wide use. Other
thresholds are possible. For example, elsewhere in the 2007 edition of
*Numerical recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance
values to detect *effective* rank deficiency. The most useful measure of
the tolerance depends on the operations you intend to use on your matrix.
For example, if your data come from uncertain measurements with
uncertainties greater than floating point epsilon, choosing a tolerance
near that uncertainty may be preferable. The tolerance may be absolute if
the uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = np.asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return np.int(not all(M == 0))
S = np.linalg.svd(M, compute_uv=False)
if tol is None:
tol = S.max() * np.max(M.shape) * np.finfo(S.dtype).eps
return np.sum(S > tol)
if LooseVersion(np.__version__) > '1.7.1':
from numpy.linalg import matrix_rank
else:
matrix_rank = _matrix_rank
def _reconstruct_partial(func, args, kwargs):
"""Helper to pickle partial functions"""
return partial(func, *args, **(kwargs or {}))
def _reduce_partial(p):
"""Helper to pickle partial functions"""
return _reconstruct_partial, (p.func, p.args, p.keywords)
# This adds pickling functionality to older Python 2.6
# Please always import partial from here.
copyreg.pickle(partial, _reduce_partial)
def normalize_colors(vmin, vmax, clip=False):
"""Helper to handle matplotlib API"""
import matplotlib.pyplot as plt
try:
return plt.Normalize(vmin, vmax, clip=clip)
except AttributeError:
return plt.normalize(vmin, vmax, clip=clip)
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
def _sparse_block_diag(mats, format=None, dtype=None):
"""An implementation of scipy.sparse.block_diag since old versions of
scipy don't have it. Forms a sparse matrix by stacking matrices in block
diagonal form.
Parameters
----------
mats : list of matrices
Input matrices.
format : str, optional
The sparse format of the result (e.g. "csr"). If not given, the
matrix is returned in "coo" format.
dtype : dtype specifier, optional
The data-type of the output matrix. If not given, the dtype is
determined from that of blocks.
Returns
-------
res : sparse matrix
"""
nmat = len(mats)
rows = []
for ia, a in enumerate(mats):
row = [None] * nmat
row[ia] = a
rows.append(row)
return sparse.bmat(rows, format=format, dtype=dtype)
try:
from scipy.sparse import block_diag as sparse_block_diag
except Exception:
sparse_block_diag = _sparse_block_diag
def _isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
`isclose(a, b)` might be different from `isclose(b, a)` in
some rare cases.
Examples
--------
>>> isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False], dtype=bool)
>>> isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True], dtype=bool)
>>> isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False], dtype=bool)
>>> isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True], dtype=bool)
"""
def within_tol(x, y, atol, rtol):
with np.errstate(invalid='ignore'):
result = np.less_equal(abs(x - y), atol + rtol * abs(y))
if np.isscalar(a) and np.isscalar(b):
result = bool(result)
return result
x = np.array(a, copy=False, subok=True, ndmin=1)
y = np.array(b, copy=False, subok=True, ndmin=1)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
dt = np.core.multiarray.result_type(y, 1.)
y = np.array(y, dtype=dt, copy=False, subok=True)
xfin = np.isfinite(x)
yfin = np.isfinite(y)
if np.all(xfin) and np.all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = np.zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * np.ones_like(cond)
y = y * np.ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = np.isnan(x) & np.isnan(y)
cond[both_nan] = both_nan[both_nan]
return cond
if LooseVersion(np.__version__) < LooseVersion('1.7'):
isclose = _isclose
else:
isclose = np.isclose
| bsd-3-clause |
rexshihaoren/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
wongkaiweng/LTLMoP | src/lib/handlers/share/MotionControl/BugControllerHandler.py | 8 | 39771 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
===================================================================
BugController.py - Bug Algorithm Motion Controller
===================================================================
This motion controller uses the Bug 2 algorithm developed by V. Lumelsky and A. Stepanov. The algorithm assumes the robot is a
point operating in the plane with a contact sensor or a zero range sensor to
detect obstacles. When the robot has a finite range (non-zero range) sensor
"""
#import BugControllerHelper
from numpy import *
from __is_inside import is_inside
import Polygon,Polygon.IO
import Polygon.Utils as PolyUtils
import Polygon.Shapes as PolyShapes
import time, math
import sys,os
import matplotlib.pyplot as plt
import copy
from scipy.linalg import norm
from math import *
import random
import matplotlib.animation as animation
import thread
import threading
import lib.handlers.handlerTemplates as handlerTemplates
class BugControllerHandler(handlerTemplates.MotionControlHandler):
def __init__(self, executor, shared_data,robot_type):
"""
Bug alogorithm motion planning controller
robot_type (int): Which robot is used for execution. pioneer is 1, ODE is 2 (default=1)
"""
self.velocity_count_thres = 100;
self.velocity_count = 0;
#operate_system (int): Which operating system is used for execution. Ubuntu and Mac is 1, Windows is 2
if sys.platform in ['win32', 'cygwin']:
self.operate_system = 2
else:
self.operate_system = 1
# Information about the robot
## 1: Pioneer ; 2: 0DE
if robot_type not in [1,2]:
robot_type = 1
self.system = robot_type
#settings for Ubuntu or Mac plotting (only when you set operate_system = 1)
self.PLOT = False # plot with matplot
self.PLOT_M_LINE = False # plot m-line
self.PLOT_EXIT = False # plot exit point of a region
self.PLOT_OVERLAP = False # plot overlap area with the obstacle
#settings for Windows (only when you set operate_system = 2)
self.PLOT_WINDOWS = True
# Get references to handlers we'll need to communicate with
self.drive_handler = executor.hsub.getHandlerInstanceByType(handlerTemplates.DriveHandler)
self.pose_handler = executor.hsub.getHandlerInstanceByType(handlerTemplates.PoseHandler)
if self.system == 1:
self.robocomm = shared_data['robocomm']
# Get information about regions
self.proj = executor.proj
self.coordmap_map2lab = self.hsub.coordmap_map2lab
self.coordmap_lab2map = self.hsub.coordmap_lab2map
self.last_warning = 0
###################################
########used by Bug algorithm######
###################################
# PARAMETERS
#for plotting
self.time = time.clock()
self.time_thres = 1;
# Pioneer related parameters
self.PioneerWidthHalf = 0.20 #0.25 # (m) width of Pioneer #0.20
self.PioneerLengthHalf = 0.25 #0.30 (m) lenght of Pioneer #0.25
# Real Robot polygon related parameters
self.boxRealVertical = self.PioneerLengthHalf*2
self.boxRealHorizontal = self.PioneerWidthHalf*2.5
self.boxRealVertical_shift = self.boxRealVertical/2
self.boxRealHorizontal_shift = self.boxRealHorizontal/2
# Pioneer Range related parameters
self.range = 2*self.PioneerLengthHalf+0.40 # (m) specify the range of the robot (when the normal circle range cannot detect obstacle) #0.85
self.obsRange = self.range*0.7 # (m) range that says the robot detects obstacles #0.25
self.shift = 0.20 # How far the range is shifted to ensure it is sensing region in front is bigger 0.20
self.boxVertical = self.obsRange*2 # box cutting from range of Pioneer
self.boxHorizontal = self.obsRange*2 # box cutting from range of Pioneer
self.boxVertical_shift = self.boxVertical + self.boxRealVertical/2*1.5 # vertical shifting of box
self.boxHorizontal_shift = self.boxHorizontal/2 # horizontal shifting of the box
## 2: 0DE
self.factorODE = 30 # 30 works better than 50
if self.system == 2:
self.PioneerWidthHalf = self.PioneerWidthHalf*self.factorODE
self.PioneerLengthHalf = self.PioneerLengthHalf*self.factorODE
self.range = self.range*self.factorODE
self.obsRange = self.obsRange*self.factorODE
self.shift = self.shift*self.factorODE
self.boxVertical = self.boxVertical*self.factorODE
self.boxHorizontal = self.boxHorizontal*self.factorODE
self.boxVertical_shift = self.boxVertical_shift*self.factorODE
self.boxHorizontal_shift = self.boxHorizontal_shift*self.factorODE
self.boxRealVertical = self.boxRealVertical*self.factorODE
self.boxRealHorizontal = self.boxRealHorizontal*self.factorODE
self.boxRealVertical_shift = self.boxRealVertical_shift*self.factorODE
self.boxRealHorizontal_shift= self.boxRealHorizontal_shift*self.factorODE
self.map = {} # dictionary for all the regions
self.all = Polygon.Polygon() # Polygon with all the regions
self.map_work = Polygon.Polygon() # Polygon of the current region and next region considered
self.ogr = Polygon.Polygon() #Polygon built from occupancy grid data points
self.previous_current_reg = None # previous current region
self.currentRegionPoly = None # current region's polygon
self.nextRegionPoly = None # next region's polygon
self.overlap = None
self.q_g = [0,0] # goal point of the robot heading to
self.q_hit = [0,0] # location where the robot first detect an obstacle
self.boundary_following= False # tracking whether it is in boundary following mode
self.m_line = None # m-line polygon
self.trans_matrix = mat([[0,1],[-1,0]]) # transformation matrix for find the normal to the vector connecting a point on the obstacle to the robot
self.q_hit_count = 0
self.q_hit_Thres = 1000
self.prev_follow = [[],[]]
## Construct robot polygon (for checking overlap)
pose = self.pose_handler.getPose()
self.prev_pose = pose
self.robot = PolyShapes.Rectangle(self.boxHorizontal,self.boxVertical)
self.robot.shift(pose[0]-self.boxHorizontal_shift,pose[1]-self.boxVertical_shift)
self.robot = PolyShapes.Circle(self.obsRange,(pose[0],pose[1])) - self.robot
self.robot.rotate(pose[2]-pi/2,pose[0],pose[1])
self.robot.shift(self.shift*cos(pose[2]),self.shift*sin(pose[2]))
#construct real robot polygon( see if there is overlaping with path to goal
self.realRobot = PolyShapes.Rectangle(self.boxRealHorizontal,self.boxRealVertical )
self.realRobot.shift(pose[0]-self.boxRealHorizontal_shift,pose[1]-self.boxRealVertical_shift)
self.realRobot.rotate(pose[2]-pi/2,pose[0],pose[1])
#constructing polygon of different regions (holes being taken care)
for region in self.proj.rfi.regions:
self.map[region.name] = self.createRegionPolygon(region)
for n in range(len(region.holeList)): # no of holes
self.map[region.name] -= self.createRegionPolygon(region,n)
#construct a polygon that included all the regions
for regionName,regionPoly in self.map.iteritems():
self.all += regionPoly
#setting for plotting
if self.operate_system == 1:
if self.PLOT or self.PLOT_OVERLAP == True:
self.original_figure = 1
plt.figure(self.original_figure)
if self.PLOT_EXIT or self.PLOT_M_LINE == True:
self.overlap_figure = 2
plt.figure(self.overlap_figure)
else:
if self.PLOT_WINDOWS == True:
# start using anmination to plot Pioneer
self.fig = plt.figure()
self.ax = self.fig.add_subplot(111)
self.scope = _Scope(self.ax,self)
thread.start_new_thread(self.jplot,())
#Plot the robot on the map in figure 1
if self.PLOT == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
def gotoRegion(self, current_reg, next_reg, last=False):
"""
If ``last`` is True, we will move to the center of the destination region.
Returns ``True`` if we've reached the destination region.
"""
q_gBundle = [[],[]] # goal bundle
q_overlap = [[],[]] # overlapping points with robot range
pose = self.pose_handler.getPose() # Find our current configuration
#Plot the robot on the map in figure 1
if self.PLOT == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
if current_reg == next_reg and not last:
# No need to move!
self.drive_handler.setVelocity(0, 0) # So let's stop
return False
# Check if Vicon has cut out
if math.isnan(pose[2]):
print "no vicon pose"
print "WARNING: No Vicon data! Pausing."
#self.drive_handler.setVelocity(0, 0) # So let's stop
time.sleep(1)
return False
###This part is run when the robot goes to a new region, otherwise, the original map will be used.
if not self.previous_current_reg == current_reg:
#print 'getting into bug alogorithm'
#clean up the previous self.map_work
self.map_work = Polygon.Polygon()
# NOTE: Information about region geometry can be found in self.proj.rfi.regions
# create polygon list for regions other than the current_reg and the next_reg
self.map_work += self.map[self.proj.rfi.regions[current_reg].name]
self.map_work += self.map[self.proj.rfi.regions[next_reg].name]
# building current polygon and destination polygon
self.nextRegionPoly = self.map[self.proj.rfi.regions[next_reg].name]
self.currentRegionPoly = self.map[self.proj.rfi.regions[current_reg].name]
#set to zero velocity before finding the tranFace
self.drive_handler.setVelocity(0, 0)
if last:
transFace = None
else:
print "Current reg is " + str(self.proj.rfi.regions[current_reg].name.lower())
print "Next reg is "+ str(self.proj.rfi.regions[next_reg].name.lower())
for i in range(len(self.proj.rfi.transitions[current_reg][next_reg])):
pointArray_transface = [x for x in self.proj.rfi.transitions[current_reg][next_reg][i]]
transFace = asarray(map(self.coordmap_map2lab,pointArray_transface))
bundle_x = (transFace[0,0] +transFace[1,0])/2 #mid-point coordinate x
bundle_y = (transFace[0,1] +transFace[1,1])/2 #mid-point coordinate y
q_gBundle = hstack((q_gBundle,vstack((bundle_x,bundle_y))))
q_gBundle = q_gBundle.transpose()
# Find the closest face to the current position
max_magsq = 1000000
for tf in q_gBundle:
magsq = (tf[0] - pose[0])**2 + (tf[1] - pose[1])**2
if magsq < max_magsq:
connection = 0
tf = tf+(tf-asarray(self.currentRegionPoly.center()))/norm(tf-asarray(self.currentRegionPoly.center()))*2.1*self.PioneerLengthHalf
if not self.nextRegionPoly.covers(PolyShapes.Circle(self.PioneerLengthHalf*2,(tf[0],tf[1]))):
tf = tf-(tf-asarray(self.currentRegionPoly.center()))/norm(tf-asarray(self.currentRegionPoly.center()))*4.2*self.PioneerLengthHalf
if self.nextRegionPoly.covers(PolyShapes.Circle(self.PioneerLengthHalf*2,(tf[0],tf[1]))):
connection = 1
else:
connection = 1
if connection == 1:
pt1 = tf
max_magsq = magsq
transFace = 1
self.q_g[0] = pt1[0]
self.q_g[1] = pt1[1]
else:
sample = False
while not sample:
self.q_g[0],self.q_g[1] = self.nextRegionPoly.sample(random.random)
robo = PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
if not bool(robo - self.nextRegionPoly):
sample = True
"""
# Push the goal point to somewhere inside the next region to ensure the robot will get there.(CHECK!!)
self.q_g = self.q_g+(self.q_g-asarray(self.currentRegionPoly.center()))/norm(self.q_g-asarray(self.currentRegionPoly.center()))*3*self.PioneerLengthHalf
if not self.nextRegionPoly.isInside(self.q_g[0],self.q_g[1]):
self.q_g = self.q_g-(self.q_g-asarray(self.currentRegionPoly.center()))/norm(self.q_g-asarray(self.currentRegionPoly.center()))*6*self.PioneerLengthHalf
"""
#plot exiting point
if self.PLOT_EXIT == True:
plt.figure(self.overlap_figure)
plt.clf()
plt.plot(q_gBundle[:,0],q_gBundle[:,1],'ko' )
plt.plot(self.q_g[0],self.q_g[1],'ro')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
if transFace is None:
print "ERROR: Unable to find transition face between regions %s and %s. Please check the decomposition (try viewing projectname_decomposed.regions in RegionEditor or a text editor)." % (self.proj.rfi.regions[current_reg].name, self.proj.rfi.regions[next_reg].name)
##################################################
#######check whether obstacle is detected#########
##################################################
#Update pose,update self.robot, self.realRobot orientation
self.robot.shift(pose[0]-self.prev_pose[0],pose[1]-self.prev_pose[1])
self.realRobot.shift(pose[0]-self.prev_pose[0],pose[1]-self.prev_pose[1])
self.robot.rotate(pose[2]-self.prev_pose[2],pose[0],pose[1])
self.realRobot.rotate(pose[2]-self.prev_pose[2],pose[0],pose[1])
self.prev_pose = pose
############################
########### STEP 1##########
############################
##Check whether obsRange overlaps with obstacle or the boundary (overlap returns the part of robot not covered by the region
# for real Pioneer robot
if self.system == 1:
# motion controller is not in boundary following mode
if self.boundary_following == False:
if self.robocomm.getReceiveObs() == False:
overlap = self.robot - ( self.map_work)
else:
overlap = self.robot - ( self.map_work - self.robocomm.getObsPoly())
else: #TRUE
# use a robot with full range all around it
Robot = PolyShapes.Circle(self.obsRange,(pose[0],pose[1]))
Robot.shift(self.shift*cos(pose[2]),self.shift*sin(pose[2]))
if self.robocomm.getReceiveObs() == False:
overlap = Robot - ( self.map_work)
else:
overlap = Robot - ( self.map_work - self.robocomm.getObsPoly())
# for ODE
else:
if self.boundary_following == False:
overlap = self.robot - (self.map_work)
else:#TRUE
overlap = self.robot - (self.map_work)
if self.boundary_following == False:
if bool(overlap): ## overlap of obstacles
#print "There MAYBE overlap~~ check connection to goal"
# check whether the real robot or and path to goal overlap with the obstacle
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
path = PolyUtils.convexHull(self.realRobot + QGoalPoly)
if self.system == 1:
if self.robocomm.getReceiveObs() == False:
pathOverlap = path - ( self.map_work)
else:
pathOverlap = path - ( self.map_work - self.robocomm.getObsPoly())
else:
pathOverlap = path - ( self.map_work)
if bool(pathOverlap): # there is overlapping, go into bounding following mode
#print "There IS overlap"
self.q_hit = mat([pose[0],pose[1]]).T
self.boundary_following = True
#Generate m-line polygon
QHitPoly = PolyShapes.Circle(self.PioneerLengthHalf/4,(pose[0],pose[1]))
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf/4,(self.q_g[0],self.q_g[1]))
self.m_line = PolyUtils.convexHull(QHitPoly + QGoalPoly)
#plot the first overlap
if self.PLOT_M_LINE == True:
plt.figure(self.overlap_figure)
plt.clf()
self.plotPoly(QHitPoly,'k')
self.plotPoly(QGoalPoly,'k')
self.plotPoly(overlap,'g')
self.plotPoly(self.m_line,'b')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
else: ##head towards the q_goal
if self.system == 1:
if self.robocomm.getReceiveObs() == False: # wait for obstacles from Pioneer
vx = 0
vy = 0
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
#print "no obstacles 2-ODE true"
else: ##head towards the q_goal
if self.system == 1:
if self.robocomm.getReceiveObs() == False: # wait for obstacles from Pioneer
vx = 0
vy = 0
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
#print "no obstacles 1-ODE true"
if self.boundary_following == True:
self.q_hit_count += 1
# finding the point to go normal to (closest overlapping point)
j = 0
recheck = 0
while not bool(overlap):
# cannot see the obstacle. Based on the location of the previous point blow up the range of the robot on the left or on the right
j += 1
# finding whether the previous obstacle point is on the left side or the right side of the robot
# angle = angle of the previous point from the x-axis of the field
# omega = angle of the current Pioneer orientation from the x-axis of the field
# cc = differnece between angle and omega ( < pi = previous point on the left of robot, else on the right of robot)
x = self.prev_follow[0] -pose[0]
y = self.prev_follow[1] -pose[1]
angle = atan(y/x)
# convert angle to 2pi
if x > 0 and y > 0:
angle = angle
elif x < 0 and y > 0:
angle = pi + angle
elif x <0 and y < 0:
angle = pi + angle
else:
angle = 2*pi + angle
# convert pose to 2pi
if pose[2] < 0:
omega = (2*pi + pose[2])
else:
omega = pose[2]
if omega > angle:
cc = 2*pi - (omega - angle)
else:
cc = angle - omega
# on the left
#if angle - omega > 0 and angle - omega < pi:
if cc < pi:
#print "on the left, angle: "+ str(angle) + " omega: "+ str(omega)+ " angle-omega: "+ str(angle-omega)
Robot = PolyShapes.Rectangle(self.range*2*j,self.range*2*j)
Robot.shift(pose[0]-self.range*j*2,pose[1]-self.range*j)
Robot.rotate(pose[2]-pi/2,pose[0],pose[1])
# on the right
else:
#print "on the right, angle: "+ str(angle) + " omega: "+ str(omega)+ " angle-omega: "+ str(angle-omega)
Robot = PolyShapes.Rectangle(self.range*2*j,self.range*2*j)
Robot.shift(pose[0],pose[1]-self.range*j)
Robot.rotate(pose[2]-pi/2,pose[0],pose[1])
if self.system == 1:
overlap = Robot - ( self.map_work - self.robocomm.getObsPoly())
else:
overlap = Robot - ( self.map_work)
#self.plotPoly(Robot, 'm',2)
#determines as dynamic obstacles and can be go striaight to the goal point
if j >= 2:
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
overlap = None
self.overlap = overlap
self.q_hit_count = 0
self.boundary_following = False
self.m_line = None
self.drive_handler.setVelocity(vx,vy, pose[2])
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ###0.05
departed = not self.currentRegionPoly.overlaps(self.realRobot)
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
arrived = self.nextRegionPoly.covers(self.realRobot)
return arrived
##extra box plotting in figure 1#
if self.PLOT_OVERLAP == True:
plt.figure(self.original_figure)
plt.clf()
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
self.plotPoly(overlap,'g',3)
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.original_figure)
# find the closest point on the obstacle to the robot
overlap_len = len(overlap)
for j in range(overlap_len):
BoundPolyPoints = asarray(overlap[j])
for i in range(len(BoundPolyPoints)-1):
bundle_x = (BoundPolyPoints[i,0] +BoundPolyPoints[1+i,0])/2 #mid-point coordinate x
bundle_y = (BoundPolyPoints[i,1] +BoundPolyPoints[1+i,1])/2 #mid-point coordinate y
q_overlap = hstack((q_overlap,vstack((bundle_x,bundle_y))))
bundle_x = (BoundPolyPoints[len(BoundPolyPoints)-1,0] +BoundPolyPoints[0,0])/2 #mid-point coordinate x
bundle_y = (BoundPolyPoints[len(BoundPolyPoints)-1,1] +BoundPolyPoints[0,1])/2 #mid-point coordinate y
q_overlap = hstack((q_overlap,vstack((bundle_x,bundle_y))))
q_overlap = q_overlap.transpose()
pt = self.closest_pt([pose[0],pose[1]], vstack((q_overlap,asarray(PolyUtils.pointList(overlap)))))
self.prev_follow = pt
#calculate the vector to follow the obstacle
normal = mat([pose[0],pose[1]] - pt)
#find the distance from the closest point
distance = norm(normal)
velocity = normal * self.trans_matrix
vx = (velocity/norm(velocity)/3)[0,0]
vy = (velocity/norm(velocity)/3)[0,1]
# push or pull the robot towards the obstacle depending on whether the robot is close or far from the obstacle.
turn = pi/4*(distance-0.5*self.obsRange)/(self.obsRange) ### change to 0.6 from 0.5 for more allowance in following
corr_matrix = mat([[cos(turn),-sin(turn)],[sin(turn),cos(turn)]])
v = corr_matrix*mat([[vx],[vy]])
vx = v[0,0]
vy = v[1,0]
##plotting overlap on figure 2
if self.PLOT_OVERLAP == True:
plt.figure(self.overlap_figure)
plt.clf()
self.plotPoly(self.m_line,'b');
self.plotPoly(overlap,'r');
plt.plot(pt[0],pt[1],'ro')
plt.plot(pose[0],pose[1],'bo')
self.plotPioneer(self.overlap_figure,0)
## conditions that the loop will end
#for 11111
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ####0.05
departed = not self.currentRegionPoly.overlaps(self.realRobot)
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
arrived = self.nextRegionPoly.covers(self.realRobot)
#for 33333
reachMLine= self.m_line.overlaps(RobotPoly)
# 1.reached the next region
if arrived:
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
print "arriving at the next region. Exit boundary following mode"
vx = 0
vy = 0
"""
# 2.q_hit is reencountered
elif norm(self.q_hit-mat([pose[0],pose[1]]).T) < 0.05 and self.q_hit_count > self.q_hit_Thres:
print "reencounter q_hit. cannot reach q_goal"
vx = 0
vy = 0
"""
# 3.m-line reencountered
elif reachMLine:
#print >>sys.__stdout__, "m-line overlaps RoboPoly, m-line" + str(norm(self.q_g-self.q_hit)-2*self.obsRange) + " distance: " + str(norm(self.q_g-mat([pose[0],pose[1]]).T))
if norm(self.q_g-mat([pose[0],pose[1]]).T) < norm(self.q_g-self.q_hit)-2*self.obsRange:
#print "m-line overlaps RoboPoly, m-line" + str(norm(self.q_g-self.q_hit)-2*self.obsRange) + " distance: " + str(norm(self.q_g-mat([pose[0],pose[1]]).T))
#print "leaving boundary following mode"
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
leaving = False
# turn the robot till it is facing the goal
while not leaving:
x = self.q_g[0] -self.pose_handler.getPose()[0]
y = self.q_g[1] -self.pose_handler.getPose()[1]
angle = atan(y/x)
if x > 0 and y > 0:
angle = angle
elif x < 0 and y > 0:
angle = pi + angle
elif x <0 and y < 0:
angle = pi + angle
else:
angle = 2*pi + angle
if self.pose_handler.getPose()[2] < 0:
omega = (2*pi + self.pose_handler.getPose()[2])
#print >>sys.__stdout__,"omega<0: "+ str(omega)
else:
omega = self.pose_handler.getPose()[2]
#print >>sys.__stdout__,"omega: "+ str(omega)
if omega > angle:
cc = 2*pi - (omega - angle)
else:
cc = angle - omega
# angle(goal point orientation) on the left of omega(robot orientation)
#if angle - omega > 0 and angle - omega < pi:
if cc < pi:
#print>>sys.__stdout__, "turn left"
vx,vy = self.turnLeft(cc)
# on the right
else:
#print>>sys.__stdout__, "turn right"
vx, vy = self.turnRight(2*pi-cc)
#print>>sys.__stdout__, "omega: "+ str(omega) + " angle: "+ str(angle) + " (omega-angle): " + str(omega-angle)
self.drive_handler.setVelocity(vx,vy, self.pose_handler.getPose()[2])
if omega - angle < pi/6 and omega - angle > -pi/6:
leaving = True
#Check whether the robot can leave now (the robot has to be closer to the goal than when it is at q_hit to leave)
QGoalPoly= PolyShapes.Circle(self.PioneerLengthHalf,(self.q_g[0],self.q_g[1]))
path = PolyUtils.convexHull(self.realRobot + QGoalPoly)
if self.system == 1:
if self.robocomm.getReceiveObs() == False:
pathOverlap = path - ( self.map_work)
else:
pathOverlap = path - ( self.map_work - self.robocomm.getObsPoly())
else:
pathOverlap = path - ( self.map_work)
if not bool(pathOverlap):
#print "There is NO MORE obstacles in front for now."
# check if the robot is closer to the goal compared with q_hit
if norm(self.q_hit-mat(self.q_g).T) > norm(mat([pose[0],pose[1]]).T-mat(self.q_g).T) :
#print "The robot is closer than the leaving point. The robot can leave"
self.boundary_following = False
self.m_line = None
self.q_hit_count = 0
dis_cur = vstack((self.q_g[0],self.q_g[1]))- mat([pose[0],pose[1]]).T
vx = (dis_cur/norm(dis_cur)/3)[0,0]
vy = (dis_cur/norm(dis_cur)/3)[1,0]
else:
lala = 1
#print "not leaving bug algorithm. difference(-farther) =" + str(norm(self.q_hit-mat(self.q_g).T) - norm(mat([pose[0],pose[1]]).T-mat(self.q_g).T))
"""
# Pass this desired velocity on to the drive handler
# Check if there are obstacles within 0.35m of the robot, if so, stop the robot
if self.system == 1:
if self.robocomm.getSTOP() == True:
vx = 0
vy = 0
"""
#vx = 0
#vy = 0
self.overlap = overlap
self.drive_handler.setVelocity(vx,vy, pose[2])
# Set the current region as the previous current region(for checking whether the robot has arrived at the next region)
self.previous_current_reg = current_reg
# check whether robot has arrived at the next region
RobotPoly = PolyShapes.Circle(self.PioneerLengthHalf+0.06,(pose[0],pose[1])) ###0.05
#departed = not self.currentRegionPoly.overlaps(self.realRobot) and (not (self.nextRegionPoly.overlaps(self.realRobot) and not self.nextRegionPoly.covers(self.realRobot)))
departed = not self.currentRegionPoly.overlaps(self.realRobot)
arrived = self.nextRegionPoly.covers(self.realRobot)
if arrived:
self.q_hit_count = 0
self.boundary_following = False
self.m_line = None
if departed and (not arrived) and (time.time()-self.last_warning) > 0.5:
print "WARNING: Left current region but not in expected destination region"
# Figure out what region we think we stumbled into
for r in self.proj.rfi.regions:
pointArray = [self.coordmap_map2lab(x) for x in r.getPoints()]
vertices = mat(pointArray).T
if is_inside([pose[0], pose[1]], vertices):
#print "I think I'm in " + r.name
#print pose
break
self.last_warning = time.time()
return arrived
def plotPioneer(self,number,y = 1):
"""
Plotting regions and obstacles with matplotlib.pyplot
number: figure number (see on top)
y = 0 : plot self.map_work instead of self.map
"""
if self.operate_system == 1:
if not plt.isinteractive():
plt.ion()
plt.hold(True)
for regionName,regionPoly in self.map.iteritems():
self.plotPoly(regionPoly,'k')
if self.system == 1:
if bool(self.robocomm.getObsPoly()):
self.plotPoly(self.robocomm.getObsPoly(),'k')
if self.operate_system == 1:
plt.figure(number).canvas.draw()
def turnLeft(self,a):
"""
Turn left with angle a
"""
vx = cos(self.pose_handler.getPose()[2]+a)* self.PioneerLengthHalf;
vy = sin(self.pose_handler.getPose()[2]+a)* self.PioneerLengthHalf;
return vx,vy
def turnRight(self,a):
"""
Turn right with angle a
"""
vx = cos(self.pose_handler.getPose()[2]-a)* self.PioneerLengthHalf;
vy = sin(self.pose_handler.getPose()[2]-a)* self.PioneerLengthHalf;
return vx,vy
def euclid(self,pt1, pt2):
"""
for calculating the minimum distance from a point on the obstacle
"""
pairs = zip(pt1, pt2) # Form pairs in corresponding dimensions
sum_sq_diffs = sum((a - b)**2 for a, b in pairs) # Find sum of squared diff
return (sum_sq_diffs)**(float(1)/2) # Take sqrt to get euclidean distance
def closest_pt(self,pt, vec):
"""
Returns the point in vec with minimum euclidean distance to pt
"""
return min(vec, key=lambda x: self.euclid(pt, x))
def plotPoly(self,c,string,w = 1):
"""
Plot polygons inside the boundary
c = polygon to be plotted with matlabplot
string = string that specify color
w = width of the line plotting
"""
if bool(c):
for i in range(len(c)):
#toPlot = Polygon.Polygon(c.contour(i))
toPlot = Polygon.Polygon(c.contour(i)) & self.all
if bool(toPlot):
for j in range(len(toPlot)):
#BoundPolyPoints = asarray(PolyUtils.pointList(toPlot.contour(j)))
BoundPolyPoints = asarray(PolyUtils.pointList(Polygon.Polygon(toPlot.contour(j))))
if self.operate_system == 2:
self.ax.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
self.ax.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
else:
plt.plot(BoundPolyPoints[:,0],BoundPolyPoints[:,1],string,linewidth=w)
plt.plot([BoundPolyPoints[-1,0],BoundPolyPoints[0,0]],[BoundPolyPoints[-1,1],BoundPolyPoints[0,1]],string,linewidth=w)
def getOldRegionName(self, regionName):
"""
This function returns the old region name (eg: r1, r2, other etc) when given the new region name (eg: p1, p2..)
"""
for oldRegionName,newRegionNames in self.proj.regionMapping.iteritems():
if regionName in newRegionNames:
return oldRegionName
print 'Cannot find region with sub-region %s' % regionName
return None
def createRegionPolygon(self,region,hole = None):
"""
This function takes in the region points and make it a Polygon.
"""
if hole == None:
pointArray = [x for x in region.getPoints()]
else:
pointArray = [x for x in region.getPoints(hole_id = hole)]
pointArray = map(self.coordmap_map2lab, pointArray)
regionPoints = [(pt[0],pt[1]) for pt in pointArray]
formedPolygon= Polygon.Polygon(regionPoints)
return formedPolygon
def data_gen(self):
self.ax.cla()
self.plotPioneer(1)
self.plotPoly(self.realRobot, 'r')
self.plotPoly(self.robot, 'b')
pose = self.pose_handler.getPose()
self.ax.plot(pose[0],pose[1],'bo')
self.ax.plot(self.q_g[0],self.q_g[1],'ro')
self.plotPoly(self.overlap,'g')
self.plotPoly(self.m_line,'b')
yield(pose[0],pose[1])
self.ax.plot(self.prev_follow[0],self.prev_follow[1],'ko')
def jplot(self):
ani = animation.FuncAnimation(self.fig, self.scope.update, self.data_gen)
plt.show()
class _Scope:
def __init__(self, ax, motion, maxt=2, dt=0.02):
self.i = 0
self.ax = ax
self.line, = self.ax.plot(1)
self.ax.set_ylim(0, 1)
self.motion = motion
def update(self,data):
(data1) = self.motion.data_gen()
a = data1.next()
self.line.set_data(a)
self.ax.relim()
self.ax.autoscale()
return self.line,
| gpl-3.0 |
mgruben/morse-code | python/MorseCodeDecoder.py | 1 | 16481 | import re
import matplotlib.pyplot as plt
import seaborn as sns
MORSE_CODE = {
".-": "A",
"-...": "B",
"-.-.": "C",
"-..": "D",
".": "E",
"..-.": "F",
"--.": "G",
"....": "H",
"..": "I",
".---": "J",
"-.-": "K",
".-..": "L",
"--": "M",
"-.": "N",
"---": "O",
".--.": "P",
"--.-": "Q",
".-.": "R",
"...": "S",
"-": "T",
"..-": "U",
"...-": "V",
".--": "W",
"-..-": "X",
"-.--": "Y",
"--..": "Z",
"-----": "0",
".----": "1",
"..---": "2",
"...--": "3",
"....-": "4",
".....": "5",
"-....": "6",
"--...": "7",
"---..": "8",
"----.": "9",
"...---...": "SOS"
}
heyJude = ".... . -.-- .--- ..- -.. ."
JudeBits = "00011001100110011000000110000001111110011001111110011111100000000000000110011111100111111001111110000001100110011111100000011111100110011000000110000"
fuzzyBits = "0000000011011010011100000110000001111110100111110011111100000000000111011111111011111011111000000101100011111100000111110011101100000100000"
fuzzyTest = "00000000000000011111111000000011111111111100000000000111111111000001111111110100000000111111111111011000011111111011111111111000000000000000000011111111110000110001111111111111000111000000000001111111111110000111111111100001100111111111110000000000111111111111011100001110000000000000000001111111111010111111110110000000000000001111111111100001111111111110000100001111111111111100000000000111111111000000011000000111000000000000000000000000000011110001111100000111100000000111111111100111111111100111111111111100000000011110011111011111110000000000000000000000111111111110000000011111000000011111000000001111111111110000000001111100011111111000000000111111111110000011000000000111110000000111000000000011111111111111000111001111111111001111110000000000000000000001111000111111111100001111111111111100100000000001111111100111111110111111110000000011101111111000111000000001001111111000000001111111111000000000111100001111111000000000000011111111100111111110111111111100000000000111111110000001100000000000000000000111111101010000010000001111111100000000011111000111111111000000111111111110011111111001111111110000000011000111111110000111011111111111100001111100001111111100000000000011110011101110001000111111110000000001111000011111110010110001111111111000000000000000000111111111110000000100000000000000000011110111110000001000011101110000000000011111111100000011111111111100111111111111000111111111000001111111100000000000001110111111111111000000110011111111111101110001111111111100000000111100000111100000111111111100000111111111111000000011111111000000000001000000111100000001000001111100111111111110000000000000000000010001111111100000011111111100000000000000100001111111111110111001111111111100000111111100001111111111000000000000000000000000011100000111111111111011110000000010000000011111111100011111111111100001110000111111111111100000000000000111110000011111001111111100000000000011100011100000000000011111000001111111111101000000001110000000000000000000000000000111110010000000000111111111000011111111110000000000111111111111101111111111100000000010000000000000011111111100100001100000000000000111100111100000000001100000001111111111110000000011111111111000000000111100000000000000000000111101111111111111000000000001111000011111000011110000000001100111111100111000000000100111000000000000111110000010000011111000000000000001111111111100000000110111111111100000000000000111111111111100000111000000000111111110001111000000111111110111111000000001111000000000010000111111111000011110001111111110111110000111111111111000000000000000000000000111111111110000000111011111111100011111110000000001111111110000011111111100111111110000000001111111111100111111111110000000000110000000000000000001000011111111110000000001111111110000000000000000000000011111111111111000000111111111000001111111110000000000111111110000010000000011111111000011111001111111100000001110000000011110000000001011111111000011111011111111110011011111111111000000000000000000100011111111111101111111100000000000000001100000000000000000011110010111110000000011111111100000000001111100011111111111101100000000111110000011110000111111111111000000001111111111100001110111111111110111000000000011111111101111100011111111110000000000000000000000000010000111111111100000000001111111110111110000000000000000000000110000011110000000000001111111111100110001111111100000011100000000000111110000000011111111110000011111000001111000110000000011100000000000000111100001111111111100000111000000001111111111000000111111111100110000000001111000001111111100011100001111111110000010011111111110000000000000000000111100000011111000001111000000000111111001110000000011111111000100000000000011111111000011001111111100000000000110111000000000000111111111111000100000000111111111110000001111111111011100000000000000000000000000"
class Cluster(object):
def __init__(self, loc):
self.currentPoints = []
self.centroid = None
self.previousPoints = []
self.location = loc
## Methods for claiming currentPoints and calculating centroid.
def addPoint(self, point):
self.currentPoints.append(point)
def didChange(self):
if len(self.currentPoints) != len(self.previousPoints):
return True
else:
return not (self.currentPoints == self.previousPoints)
def clearPoints(self):
self.previousPoints = self.currentPoints[:]
del self.currentPoints[:]
def update(self):
'''
After new points have been assigned to this cluster, this method
calculates the new centroid of the cluster and moves the cluster
to that location.
'''
if len(self.currentPoints) > 0:
s = 0.0
for p in self.currentPoints:
s += p
self.centroid = s / len(self.currentPoints)
self.location = self.centroid
## Getter methods.
def getLocation(self):
return self.location
def getDistance(self, point):
return abs(self.location - point)
## Printer methods.
def printCentroid(self):
print(self.centroid)
def printLocation(self):
print(self.location)
def printPoints(self):
result = ""
for point in self.currentPoints:
result += str(point) + " "
print(result[:-1])
def printPreviousPoints(self):
result = ""
for point in self.previousPoints:
result += str(point) + " "
print(result[:-1])
class KMeans(object):
def __init__(self, stream, numClusters):
self.clusters = []
self.bitCollection = []
self.timeUnits = [0,0,0]
self.dist = {}
self.keys = []
self.converged = False
stream = stream.strip("0")
## Populate this.bitCollection.
if len(stream) == 0:
self.bitCollection.append("")
else:
ones = re.split("0+", stream)
zeros = re.split("1+", stream)
if len(zeros) == 0:
self.bitCollection.append(ones[0])
else:
for i in range(len(ones) - 1):
self.bitCollection.append(ones[i])
self.bitCollection.append(zeros[i + 1])
self.bitCollection.append(ones[-1])
## Populate this.dist.
for bit in self.bitCollection:
l = len(bit)
if l in self.dist:
self.dist[l] += 1
else:
self.dist[l] = 1
self.keys = sorted(self.dist.keys())
## Handle short inputs (i.e. displays fewer than three timing units)
if len(self.keys) == 1 or len(self.keys) == 2:
self.timeUnits[0] = self.keys[0]
self.timeUnits[1] = self.keys[0] * 3
self.timeUnits[2] = self.keys[0] * 7
self.converged = True
## Handle long inputs via KMeans
else:
self.initializeClusters()
def initializeClusters(self):
'''
Populates this.clusters with this.numClusters Cluster objects,
whose initial locations are from this.keys (the minimum, the
maximum, and the middle between the two).
'''
self.clusters.append(Cluster(float(self.keys[0])))
self.clusters.append(Cluster((float(self.keys[0]) + float(self.keys[-1])) / 2))
self.clusters.append(Cluster(float(self.keys[-1])))
def assignToClosestCluster(self):
'''
Assigns cluster-labels to each length-point from the fuzzy input,
which is subsequently used by the clusters to re-calculate their
centroids and move accordingly.
'''
self.clear()
for key in self.keys:
bestCluster = Cluster(5000)
closest = 10000000.0
for c in self.clusters:
d = c.getDistance(key)
if d < closest:
closest = d
bestCluster = c
for i in range(self.dist[key]):
bestCluster.addPoint(key)
def calculateTimeUnits(self):
for i in range(3):
self.timeUnits[i] = self.clusters[i].getLocation()
def clear(self):
for c in self.clusters:
c.clearPoints()
def converge(self):
'''
Assigns the closest Cluster to each point, calculates the centroid
for those Clusters based off of those points, moves the Clusters
to their respective centroids, and repeats until assignment on the next
iteration is the same.
'''
if not self.converged:
self.assignToClosestCluster()
while not self.converged:
self.update()
self.assignToClosestCluster()
if not self.didChange():
self.converged = True
self.calculateTimeUnits()
def didChange(self):
for c in self.clusters:
if c.didChange():
return True
return False
def update(self):
for c in self.clusters:
c.update()
## Getter methods.
def getTimeUnit(self, index):
return self.timeUnits[index]
## Printer methods.
def printBitCollection(self):
for bit in self.bitCollection:
print(bit)
def printClusterPoints(self):
for c in self.clusters:
print("Points for cluster at " + str(c.getLocation()))
c.printPoints()
def printClusters(self):
for c in self.clusters:
print(c.getLocation())
def printDidChange(self):
print(self.didChange)
def printDistances(self):
for key in self.keys:
best = -1.0
closest = 10000000.0
for c in self.clusters:
d = c.getDistance(key)
print("From cluster at " + str(c.getLocation()) + \
"to point at " + str(key) + " is: " + str(d))
if d < closest:
closest = d
best = c.getLocation()
print("Closest to: " + str(best))
def printDistribution(self):
for key in self.keys:
print("Length: " + str(key) + " occurred " + str(self.dist[key]) + " times")
def printKeys(self):
for key in self.keys:
print(key)
def printTimeUnits(self):
for t in self.timeUnits:
print(t)
## Plotter methods.
def plotDistribution(self):
xmax = max(self.keys)
ymax = max(self.dist.values())
plt.figure()
plt.bar(self.dist.keys(), self.dist.values())
plt.title("Bit Length Frequencies")
plt.xlabel("Number of Characters")
plt.ylabel("Frequency")
plt.axis([0, xmax, 0, ymax])
plt.axvline((self.getTimeUnit(0) + self.getTimeUnit(1)) / 2, color='b', linestyle='dashed', linewidth=2)
plt.axvline((self.getTimeUnit(1) + self.getTimeUnit(2)) / 2, color='b', linestyle='dashed', linewidth=2)
plt.show()
# Required as per problem API
def decodeBitsAdvanced(fuzzyBits):
'''
input bits, a string of 0s and 1s with variable timing
returns string, a morse code message
'''
morse = ""
fuzzyBits = fuzzyBits.strip("0")
km = KMeans(fuzzyBits, 3)
km.converge()
thresh13 = (km.getTimeUnit(0) + km.getTimeUnit(1)) / 2
thresh37 = (km.getTimeUnit(1) + km.getTimeUnit(2)) / 2
ones = re.split("0+", fuzzyBits)
zeros = re.split("1+", fuzzyBits)
for i in range(len(zeros) - 1):
morse += nextTelePairFuzzy(ones[i], zeros[i + 1], thresh13, thresh37)
return morse
# Required as per problem API
def decodeBits(bits):
'''
input bits, a string of 0s and 1s with fixed timing
returns string, a morse code message
'''
morse = ""
bits = bits.strip("0")
tu = getTimeUnit(bits)
ones = re.split("0+", bits)
zeros = re.split("1+",bits)
for i in range(len(zeros) - 1):
morse += nextTelePair(ones[i], zeros[i + 1], tu)
return morse
# Required as per problem API
def decodeMorse(morseCode):
'''
input morseCode, a string of dots, dashes, and spaces
returns string, a human-readable message
'''
result = ""
morseCode = morseCode.replace(" ", " SPACE ")
morses = morseCode.split()
for morse in morses:
if morse == "SPACE":
result += " "
else:
try:
result += MORSE_CODE[morse]
except KeyError:
result += "(KEYERR: "+morse+")"
return result
# Helper function for 3 of 3 in the series
def nextTelePairFuzzy(one, zero, thresh13, thresh37):
tele = nextTeleSingleFuzzy(one, thresh13)
if len(zero) >= thresh13 and len(zero) < thresh37:
tele += " "
elif len(zero) >= thresh37:
tele += " "
return tele
# Helper function for 3 of 3 in the series
def nextTeleSingleFuzzy(one, thresh13):
tele = ""
if len(one) <= thresh13:
tele += "."
else:
tele += "-"
return tele
# Helper function for 2 of 3 in the series
def nextTelePair(one, zero, tu):
tele = nextTeleSingle(one, tu)
if len(zero) == 3 * tu:
tele += " "
elif len(zero) == 7 * tu:
tele += " "
return tele
# Helper function for 2 of 3 in the series
def nextTeleSingle(one, tu):
tele = ""
if len(one) == tu:
tele += "."
elif len(one) == 3 * tu:
tele += "-"
return tele
# Helper function for 2 of 3 in the series
def getTimeUnit(bits):
'''
input bits, a string of 0s and 1s with fixed timing
returns int, the single timing unit (i.e. dot or shortest pause)
'''
if bits == "":
return 0
o = re.split("0+", bits)
if len(o) == 1:
return len(bits)
if o == ['','']:
return 0
os = len(o[0])
for elem in o:
if len(elem) != os:
os = min(os,len(elem))
break
z = re.split("1+", bits)
zs = len(z[1])
for i in range(1, len(z) - 1):
if zs != len(z[i]):
zs = min(zs, len(z[i]))
break
return min(os, zs)
# Function to rapidly iterate through possible thresholds;
# not a valid way of solving 3 of 3.
def bruteThreshholds(fuzzyBits):
fuzzyBits = fuzzyBits.strip("0")
f = open('BruteForceDump', 'w')
ones = re.split("0+", fuzzyBits)
zeros = re.split("1+", fuzzyBits)
lowerStart = 7
lowerStop = 8 # exclusive
lowerStep = 0.25
upperStart = 15
upperStop = 16 # exclusive
upperStep = 0.25
for lower in range(0, int((lowerStop - lowerStart) / lowerStep), 1):
for upper in range(0, int((upperStop - upperStart) / upperStep), 1):
morse = ""
for i in range(len(zeros) - 1):
morse += nextTelePairFuzzy(ones[i], zeros[i + 1],
lowerStart + lower * lowerStep, upperStart + upper * upperStep)
f.write(str(lowerStart + lower * lowerStep) + " " + str(upperStart + upper * upperStep) + '\n')
f.write(decodeMorse(morse))
f.write('\n\n')
| gpl-3.0 |
sanja7s/SR_Twitter | src_general/explain_FORMATION_DELETION_REL.py | 1 | 6415 | #!/usr/bin/env python
# a bar plot with errorbars
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
from pylab import *
width = 0.28 # the width of the bars
font = {'family' : 'sans-serif',
'variant' : 'normal',
'weight' : 'light',
'size' : 14}
matplotlib.rc('font', **font)
# plot with various axes scales
plt.figure(1)
fig = gcf()
def plot_bars_FORMATION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 0))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='darkred', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='lightcoral', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Formed and persisting', \
'Formed and non-persisting', 'Persisting average'),\
frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At formation', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
N = 3
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
formationDeletionMeans = (1.12747979427, 1.56808719079, 1.62160176341)
formationDeletionStd = (1.35650452374, 1.71205560699, 1.83913259462)
# PERSISTING LINKS
# STRONG contacts REL
formationNodeletionMeans = (0.964889222681, 1.44874202028, 1.68794592565)
formationNodeletionStd = (1.30256068643, 1.64860382968, 1.94388833634)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_FORMATION_STRONG_REL(formationNodeletionMeans, formationNodeletionStd,\
formationDeletionMeans, formationDeletionStd, SRMeansS, SRStdS)
def plot_bars_DELETION_STRONG_REL(PersistingMeans, PersistingStd, Means, Std, PERSreal, PERSstd):
ind = np.arange(N) # the x locations for the groups
#width = 0.3 # the width of the bars
#ax = plt.subplot(321)
ax = plt.subplot2grid((1,2),(0, 1))
#rects1 = ax.bar(ind-0.2, PersistingMeans, width, color='c', yerr=PersistingStd, align='center')
#rects2 = ax.bar(ind+0.2, Means, width, color='cyan', yerr=Std, align='center')
rects1 = ax.bar(ind-width, PersistingMeans, width, color='c', \
align='center', yerr=PersistingStd, linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects2 = ax.bar(ind, Means, width, color='cyan', \
yerr=Std, align='center', linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
rects3 = ax.bar(ind+width, PERSreal, width, color='r',\
yerr=PERSstd, align='center',linewidth=0,\
error_kw=dict(ecolor='gray', lw=1.5, capsize=2.7, capthick=1))
ax.legend((rects1[0], rects2[0], rects3[0]), \
('Persisting decommissioned', \
'Non-persisting decommissioned', 'Persisting average'),\
loc='best',frameon=False)
# add some text for labels, title and axes ticks
#ax.set_title('Relative status (strong contacts)')
ax.set_xticks(ind )
ax.set_xticklabels(('Before', 'At decommission', 'After'))
ax.set_ylim([-0.5, 5])
ax.set_yticks((0,5))
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.05*height,
'%.2f' % float(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
autolabel(rects3)
return plt
##########################################################################
# NON PERSISTING LINKS
# STRONG contacts REL
#deletionFormationMeans = (1.35860783095, 1.40335612181, 1.38222498446)
#deletionFormationStd = (1.39698763227, 1.515042018, 1.6001731639)
deletionFormationMeans = (1.21614009307, 1.58645603723, 1.613397012)
deletionFormationStd = (1.39228801763, 1.73298601092, 1.84822380219)
# PERSISTING LINKS
#deletionNoformationMeans = (1.16101995042, 1.52591193484, 1.54066816196)
#deletionNoformationStd = (1.36105887603, 1.69996084625, 1.80123581372)
deletionNoformationMeans = (1.09195402299, 1.16457680251, 1.09717868339)
deletionNoformationStd = (1.25857893939, 1.33146910699, 1.31900439894)
SRMeans = (0.856632, 0.906697, 0.995124, 1.010403, 1.031534)
SRStd = (1.114944, 1.194131, 1.283704, 1.245234, 1.317081)
SRMeansS = (0.96007799999999988,0.96007799999999988,0.96007799999999988)
SRStdS = (1.2310188,1.2310188,1.2310188)
plt1 = plot_bars_DELETION_STRONG_REL(deletionNoformationMeans, deletionNoformationStd,\
deletionFormationMeans, deletionFormationStd, SRMeansS, SRStdS)
##########################################################################
plt.tight_layout()
fig = plt.gcf()
fig.set_size_inches(12.4,4.5)
plt.tight_layout()
#plt.figtext(0.20, 0.49, 'Relative status of the pair: weak contacts')
#plt.figtext(0.27, 0.973, 'Relative status of the pair: strong contacts')
fig.suptitle('Relative status (strong contacts)', verticalalignment='center', horizontalalignment='center', size = 16)
#fig.suptitle('Sum including weak contacts', verticalalignment='center', y=0.5, horizontalalignment='center', size = 16)
plt.savefig("/home/sscepano/Projects7s/Twitter-workspace/DATA/General/explain_FORMATION_DELETION_REL.eps", dpi=710)
| mit |
MarkRegalla27/ThinkStats2 | code/hypothesis.py | 75 | 10162 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2010 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import nsfg
import nsfg2
import first
import thinkstats2
import thinkplot
import copy
import random
import numpy as np
import matplotlib.pyplot as pyplot
class CoinTest(thinkstats2.HypothesisTest):
"""Tests the hypothesis that a coin is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
heads, tails = data
test_stat = abs(heads - tails)
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
heads, tails = self.data
n = heads + tails
sample = [random.choice('HT') for _ in range(n)]
hist = thinkstats2.Hist(sample)
data = hist['H'], hist['T']
return data
class DiffMeansPermute(thinkstats2.HypothesisTest):
"""Tests a difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = abs(group1.mean() - group2.mean())
return test_stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
group1, group2 = self.data
self.n, self.m = len(group1), len(group2)
self.pool = np.hstack((group1, group2))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
class DiffMeansOneSided(DiffMeansPermute):
"""Tests a one-sided difference in means by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.mean() - group2.mean()
return test_stat
class DiffStdPermute(DiffMeansPermute):
"""Tests a one-sided difference in standard deviation by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: data in whatever form is relevant
"""
group1, group2 = data
test_stat = group1.std() - group2.std()
return test_stat
class CorrelationPermute(thinkstats2.HypothesisTest):
"""Tests correlations by permutation."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: tuple of xs and ys
"""
xs, ys = data
test_stat = abs(thinkstats2.Corr(xs, ys))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
xs, ys = self.data
xs = np.random.permutation(xs)
return xs, ys
class DiceTest(thinkstats2.HypothesisTest):
"""Tests whether a six-sided die is fair."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum(abs(observed - expected))
return test_stat
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
n = sum(self.data)
values = [1,2,3,4,5,6]
rolls = np.random.choice(values, n, replace=True)
hist = thinkstats2.Hist(rolls)
freqs = hist.Freqs(values)
return freqs
class DiceChiTest(DiceTest):
"""Tests a six-sided die using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: list of frequencies
"""
observed = data
n = sum(observed)
expected = np.ones(6) * n / 6
test_stat = sum((observed - expected)**2 / expected)
return test_stat
class PregLengthTest(thinkstats2.HypothesisTest):
"""Tests difference in pregnancy length using a chi-squared statistic."""
def TestStatistic(self, data):
"""Computes the test statistic.
data: pair of lists of pregnancy lengths
"""
firsts, others = data
stat = self.ChiSquared(firsts) + self.ChiSquared(others)
return stat
def ChiSquared(self, lengths):
"""Computes the chi-squared statistic.
lengths: sequence of lengths
returns: float
"""
hist = thinkstats2.Hist(lengths)
observed = np.array(hist.Freqs(self.values))
expected = self.expected_probs * len(lengths)
stat = sum((observed - expected)**2 / expected)
return stat
def MakeModel(self):
"""Build a model of the null hypothesis.
"""
firsts, others = self.data
self.n = len(firsts)
self.pool = np.hstack((firsts, others))
pmf = thinkstats2.Pmf(self.pool)
self.values = range(35, 44)
self.expected_probs = np.array(pmf.Probs(self.values))
def RunModel(self):
"""Run the model of the null hypothesis.
returns: simulated data
"""
np.random.shuffle(self.pool)
data = self.pool[:self.n], self.pool[self.n:]
return data
def RunDiceTest():
"""Tests whether a die is fair.
"""
data = [8, 9, 19, 5, 8, 11]
dt = DiceTest(data)
print('dice test', dt.PValue(iters=10000))
dt = DiceChiTest(data)
print('dice chi test', dt.PValue(iters=10000))
def FalseNegRate(data, num_runs=1000):
"""Computes the chance of a false negative based on resampling.
data: pair of sequences
num_runs: how many experiments to simulate
returns: float false negative rate
"""
group1, group2 = data
count = 0
for i in range(num_runs):
sample1 = thinkstats2.Resample(group1)
sample2 = thinkstats2.Resample(group2)
ht = DiffMeansPermute((sample1, sample2))
p_value = ht.PValue(iters=101)
if p_value > 0.05:
count += 1
return count / num_runs
def PrintTest(p_value, ht):
"""Prints results from a hypothesis test.
p_value: float
ht: HypothesisTest
"""
print('p-value =', p_value)
print('actual =', ht.actual)
print('ts max =', ht.MaxTestStat())
def RunTests(data, iters=1000):
"""Runs several tests on the given data.
data: pair of sequences
iters: number of iterations to run
"""
# test the difference in means
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute two-sided')
PrintTest(p_value, ht)
ht.PlotCdf()
thinkplot.Save(root='hypothesis1',
title='Permutation test',
xlabel='difference in means (weeks)',
ylabel='CDF',
legend=False)
# test the difference in means one-sided
ht = DiffMeansOneSided(data)
p_value = ht.PValue(iters=iters)
print('\nmeans permute one-sided')
PrintTest(p_value, ht)
# test the difference in std
ht = DiffStdPermute(data)
p_value = ht.PValue(iters=iters)
print('\nstd permute one-sided')
PrintTest(p_value, ht)
def ReplicateTests():
"""Replicates tests with the new NSFG data."""
live, firsts, others = nsfg2.MakeFrames()
# compare pregnancy lengths
print('\nprglngth2')
data = firsts.prglngth.values, others.prglngth.values
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
print('\nbirth weight 2')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation 2')
PrintTest(p_value, ht)
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared 2')
PrintTest(p_value, ht)
def main():
thinkstats2.RandomSeed(17)
# run the coin test
ct = CoinTest((140, 110))
pvalue = ct.PValue()
print('coin test p-value', pvalue)
# compare pregnancy lengths
print('\nprglngth')
live, firsts, others = first.MakeFrames()
data = firsts.prglngth.values, others.prglngth.values
RunTests(data)
# compare birth weights
print('\nbirth weight')
data = (firsts.totalwgt_lb.dropna().values,
others.totalwgt_lb.dropna().values)
ht = DiffMeansPermute(data)
p_value = ht.PValue(iters=1000)
print('means permute two-sided')
PrintTest(p_value, ht)
# test correlation
live2 = live.dropna(subset=['agepreg', 'totalwgt_lb'])
data = live2.agepreg.values, live2.totalwgt_lb.values
ht = CorrelationPermute(data)
p_value = ht.PValue()
print('\nage weight correlation')
print('n=', len(live2))
PrintTest(p_value, ht)
# run the dice test
RunDiceTest()
# compare pregnancy lengths (chi-squared)
data = firsts.prglngth.values, others.prglngth.values
ht = PregLengthTest(data)
p_value = ht.PValue()
print('\npregnancy length chi-squared')
PrintTest(p_value, ht)
# compute the false negative rate for difference in pregnancy length
data = firsts.prglngth.values, others.prglngth.values
neg_rate = FalseNegRate(data)
print('false neg rate', neg_rate)
# run the tests with new nsfg data
ReplicateTests()
if __name__ == "__main__":
main()
| gpl-3.0 |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/examples/svm/plot_separating_hyperplane.py | 62 | 1274 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machines classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
DucQuang1/py-earth | doc/generate_figures.py | 1 | 1933 | import matplotlib as mpl
mpl.use('Agg')
import numpy
from pyearth import Earth
from matplotlib import pyplot
#=========================================================================
# V-Function Example
#=========================================================================
# Create some fake data
numpy.random.seed(0)
m = 1000
n = 10
X = 80 * numpy.random.uniform(size=(m, n)) - 40
y = numpy.abs(X[:, 6] - 4.0) + 1 * numpy.random.normal(size=m)
# Fit an Earth model
model = Earth()
model.fit(X, y)
# Print the model
print model.trace()
print model.summary()
# Plot the model
y_hat = model.predict(X)
pyplot.figure()
pyplot.plot(X[:, 6], y, 'r.')
pyplot.plot(X[:, 6], y_hat, 'b.')
pyplot.xlabel('x_6')
pyplot.ylabel('y')
pyplot.title('Simple Earth Example')
pyplot.savefig('simple_earth_example.png')
#=========================================================================
# Hinge plot
#=========================================================================
from xkcdify import XKCDify
x = numpy.arange(-10, 10, .1)
y = x * (x > 0)
fig = pyplot.figure(figsize=(10, 5))
pyplot.plot(x, y)
ax = pyplot.gca()
pyplot.title('Basic Hinge Function')
pyplot.xlabel('x')
pyplot.ylabel('h(x)')
pyplot.annotate('x=t', (0, 0), xytext=(-30, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=.2"))
XKCDify(ax)
pyplot.setp(ax, frame_on=False)
pyplot.savefig('hinge.png')
#=========================================================================
# Piecewise Linear Plot
#=========================================================================
m = 1000
x = numpy.arange(-10, 10, .1)
y = 1 - 2 * (1 - x) * (x < 1) + 0.5 * (x - 1) * (x > 1)
pyplot.figure(figsize=(10, 5))
pyplot.plot(x, y)
ax = pyplot.gca()
pyplot.xlabel('x')
pyplot.ylabel('y')
pyplot.title('Piecewise Linear Function')
XKCDify(ax)
pyplot.setp(ax, frame_on=False)
pyplot.savefig('piecewise_linear.png')
| bsd-3-clause |
mihail911/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/axes.py | 69 | 259904 | from __future__ import division, generators
import math, sys, warnings, datetime, new
import numpy as np
from numpy import ma
import matplotlib
rcParams = matplotlib.rcParams
import matplotlib.artist as martist
import matplotlib.axis as maxis
import matplotlib.cbook as cbook
import matplotlib.collections as mcoll
import matplotlib.colors as mcolors
import matplotlib.contour as mcontour
import matplotlib.dates as mdates
import matplotlib.font_manager as font_manager
import matplotlib.image as mimage
import matplotlib.legend as mlegend
import matplotlib.lines as mlines
import matplotlib.mlab as mlab
import matplotlib.patches as mpatches
import matplotlib.quiver as mquiver
import matplotlib.scale as mscale
import matplotlib.table as mtable
import matplotlib.text as mtext
import matplotlib.ticker as mticker
import matplotlib.transforms as mtransforms
iterable = cbook.iterable
is_string_like = cbook.is_string_like
def _process_plot_format(fmt):
"""
Process a matlab(TM) style color/line style format string. Return a
(*linestyle*, *color*) tuple as a result of the processing. Default
values are ('-', 'b'). Example format strings include:
* 'ko': black circles
* '.b': blue dots
* 'r--': red dashed lines
.. seealso::
:func:`~matplotlib.Line2D.lineStyles` and
:func:`~matplotlib.pyplot.colors`:
for all possible styles and color format string.
"""
linestyle = None
marker = None
color = None
# Is fmt just a colorspec?
try:
color = mcolors.colorConverter.to_rgb(fmt)
return linestyle, marker, color # Yes.
except ValueError:
pass # No, not just a color.
# handle the multi char special cases and strip them from the
# string
if fmt.find('--')>=0:
linestyle = '--'
fmt = fmt.replace('--', '')
if fmt.find('-.')>=0:
linestyle = '-.'
fmt = fmt.replace('-.', '')
if fmt.find(' ')>=0:
linestyle = 'None'
fmt = fmt.replace(' ', '')
chars = [c for c in fmt]
for c in chars:
if c in mlines.lineStyles:
if linestyle is not None:
raise ValueError(
'Illegal format string "%s"; two linestyle symbols' % fmt)
linestyle = c
elif c in mlines.lineMarkers:
if marker is not None:
raise ValueError(
'Illegal format string "%s"; two marker symbols' % fmt)
marker = c
elif c in mcolors.colorConverter.colors:
if color is not None:
raise ValueError(
'Illegal format string "%s"; two color symbols' % fmt)
color = c
else:
raise ValueError(
'Unrecognized character %c in format string' % c)
if linestyle is None and marker is None:
linestyle = rcParams['lines.linestyle']
if linestyle is None:
linestyle = 'None'
if marker is None:
marker = 'None'
return linestyle, marker, color
def set_default_color_cycle(clist):
"""
Change the default cycle of colors that will be used by the plot
command. This must be called before creating the
:class:`Axes` to which it will apply; it will
apply to all future axes.
*clist* is a sequence of mpl color specifiers
"""
_process_plot_var_args.defaultColors = clist[:]
rcParams['lines.color'] = clist[0]
class _process_plot_var_args:
"""
Process variable length arguments to the plot command, so that
plot commands like the following are supported::
plot(t, s)
plot(t1, s1, t2, s2)
plot(t1, s1, 'ko', t2, s2)
plot(t1, s1, 'ko', t2, s2, 'r--', t3, e3)
an arbitrary number of *x*, *y*, *fmt* are allowed
"""
defaultColors = ['b','g','r','c','m','y','k']
def __init__(self, axes, command='plot'):
self.axes = axes
self.command = command
self._clear_color_cycle()
def _clear_color_cycle(self):
self.colors = _process_plot_var_args.defaultColors[:]
# if the default line color is a color format string, move it up
# in the que
try: ind = self.colors.index(rcParams['lines.color'])
except ValueError:
self.firstColor = rcParams['lines.color']
else:
self.colors[0], self.colors[ind] = self.colors[ind], self.colors[0]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def set_color_cycle(self, clist):
self.colors = clist[:]
self.firstColor = self.colors[0]
self.Ncolors = len(self.colors)
self.count = 0
def _get_next_cycle_color(self):
if self.count==0:
color = self.firstColor
else:
color = self.colors[int(self.count % self.Ncolors)]
self.count += 1
return color
def __call__(self, *args, **kwargs):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
xunits = kwargs.pop( 'xunits', self.axes.xaxis.units)
yunits = kwargs.pop( 'yunits', self.axes.yaxis.units)
if xunits!=self.axes.xaxis.units:
self.axes.xaxis.set_units(xunits)
if yunits!=self.axes.yaxis.units:
self.axes.yaxis.set_units(yunits)
ret = self._grab_next_args(*args, **kwargs)
return ret
def set_lineprops(self, line, **kwargs):
assert self.command == 'plot', 'set_lineprops only works with "plot"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(line,funcName):
raise TypeError, 'There is no line property "%s"'%key
func = getattr(line,funcName)
func(val)
def set_patchprops(self, fill_poly, **kwargs):
assert self.command == 'fill', 'set_patchprops only works with "fill"'
for key, val in kwargs.items():
funcName = "set_%s"%key
if not hasattr(fill_poly,funcName):
raise TypeError, 'There is no patch property "%s"'%key
func = getattr(fill_poly,funcName)
func(val)
def _xy_from_y(self, y):
if self.axes.yaxis is not None:
b = self.axes.yaxis.update_units(y)
if b: return np.arange(len(y)), y, False
if not ma.isMaskedArray(y):
y = np.asarray(y)
if len(y.shape) == 1:
y = y[:,np.newaxis]
nr, nc = y.shape
x = np.arange(nr)
if len(x.shape) == 1:
x = x[:,np.newaxis]
return x,y, True
def _xy_from_xy(self, x, y):
if self.axes.xaxis is not None and self.axes.yaxis is not None:
bx = self.axes.xaxis.update_units(x)
by = self.axes.yaxis.update_units(y)
# right now multicol is not supported if either x or y are
# unit enabled but this can be fixed..
if bx or by: return x, y, False
x = ma.asarray(x)
y = ma.asarray(y)
if len(x.shape) == 1:
x = x[:,np.newaxis]
if len(y.shape) == 1:
y = y[:,np.newaxis]
nrx, ncx = x.shape
nry, ncy = y.shape
assert nrx == nry, 'Dimensions of x and y are incompatible'
if ncx == ncy:
return x, y, True
if ncx == 1:
x = np.repeat(x, ncy, axis=1)
if ncy == 1:
y = np.repeat(y, ncx, axis=1)
assert x.shape == y.shape, 'Dimensions of x and y are incompatible'
return x, y, True
def _plot_1_arg(self, y, **kwargs):
assert self.command == 'plot', 'fill needs at least 2 arguments'
ret = []
x, y, multicol = self._xy_from_y(y)
if multicol:
for j in xrange(y.shape[1]):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y[:,j],
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
else:
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color = color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
return ret
def _plot_2_args(self, tup2, **kwargs):
ret = []
if is_string_like(tup2[1]):
assert self.command == 'plot', ('fill needs at least 2 non-string '
'arguments')
y, fmt = tup2
x, y, multicol = self._xy_from_y(y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
if multicol:
for j in xrange(y.shape[1]):
makeline(x[:,j], y[:,j])
else:
makeline(x, y)
return ret
else:
x, y = tup2
x, y, multicol = self._xy_from_xy(x, y)
def makeline(x, y):
color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=color,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
facecolor = self._get_next_cycle_color()
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _plot_3_args(self, tup3, **kwargs):
ret = []
x, y, fmt = tup3
x, y, multicol = self._xy_from_xy(x, y)
linestyle, marker, color = _process_plot_format(fmt)
def makeline(x, y):
_color = color
if _color is None:
_color = self._get_next_cycle_color()
seg = mlines.Line2D(x, y,
color=_color,
linestyle=linestyle, marker=marker,
axes=self.axes,
)
self.set_lineprops(seg, **kwargs)
ret.append(seg)
def makefill(x, y):
facecolor = color
x = self.axes.convert_xunits(x)
y = self.axes.convert_yunits(y)
seg = mpatches.Polygon(np.hstack(
(x[:,np.newaxis],y[:,np.newaxis])),
facecolor = facecolor,
fill=True,
closed=closed
)
self.set_patchprops(seg, **kwargs)
ret.append(seg)
if self.command == 'plot':
func = makeline
else:
closed = kwargs.get('closed', True)
func = makefill
if multicol:
for j in xrange(y.shape[1]):
func(x[:,j], y[:,j])
else:
func(x, y)
return ret
def _grab_next_args(self, *args, **kwargs):
remaining = args
while 1:
if len(remaining)==0: return
if len(remaining)==1:
for seg in self._plot_1_arg(remaining[0], **kwargs):
yield seg
remaining = []
continue
if len(remaining)==2:
for seg in self._plot_2_args(remaining, **kwargs):
yield seg
remaining = []
continue
if len(remaining)==3:
if not is_string_like(remaining[2]):
raise ValueError, 'third arg must be a format string'
for seg in self._plot_3_args(remaining, **kwargs):
yield seg
remaining=[]
continue
if is_string_like(remaining[2]):
for seg in self._plot_3_args(remaining[:3], **kwargs):
yield seg
remaining=remaining[3:]
else:
for seg in self._plot_2_args(remaining[:2], **kwargs):
yield seg
remaining=remaining[2:]
class Axes(martist.Artist):
"""
The :class:`Axes` contains most of the figure elements:
:class:`~matplotlib.axis.Axis`, :class:`~matplotlib.axis.Tick`,
:class:`~matplotlib.lines.Line2D`, :class:`~matplotlib.text.Text`,
:class:`~matplotlib.patches.Polygon`, etc., and sets the
coordinate system.
The :class:`Axes` instance supports callbacks through a callbacks
attribute which is a :class:`~matplotlib.cbook.CallbackRegistry`
instance. The events you can connect to are 'xlim_changed' and
'ylim_changed' and the callback will be called with func(*ax*)
where *ax* is the :class:`Axes` instance.
"""
name = "rectilinear"
_shared_x_axes = cbook.Grouper()
_shared_y_axes = cbook.Grouper()
def __str__(self):
return "Axes(%g,%g;%gx%g)" % tuple(self._position.bounds)
def __init__(self, fig, rect,
axisbg = None, # defaults to rc axes.facecolor
frameon = True,
sharex=None, # use Axes instance's xaxis info
sharey=None, # use Axes instance's yaxis info
label='',
**kwargs
):
"""
Build an :class:`Axes` instance in
:class:`~matplotlib.figure.Figure` *fig* with
*rect=[left, bottom, width, height]* in
:class:`~matplotlib.figure.Figure` coordinates
Optional keyword arguments:
================ =========================================
Keyword Description
================ =========================================
*adjustable* [ 'box' | 'datalim' ]
*alpha* float: the alpha transparency
*anchor* [ 'C', 'SW', 'S', 'SE', 'E', 'NE', 'N',
'NW', 'W' ]
*aspect* [ 'auto' | 'equal' | aspect_ratio ]
*autoscale_on* [ *True* | *False* ] whether or not to
autoscale the *viewlim*
*axis_bgcolor* any matplotlib color, see
:func:`~matplotlib.pyplot.colors`
*axisbelow* draw the grids and ticks below the other
artists
*cursor_props* a (*float*, *color*) tuple
*figure* a :class:`~matplotlib.figure.Figure`
instance
*frame_on* a boolean - draw the axes frame
*label* the axes label
*navigate* [ *True* | *False* ]
*navigate_mode* [ 'PAN' | 'ZOOM' | None ] the navigation
toolbar button status
*position* [left, bottom, width, height] in
class:`~matplotlib.figure.Figure` coords
*sharex* an class:`~matplotlib.axes.Axes` instance
to share the x-axis with
*sharey* an class:`~matplotlib.axes.Axes` instance
to share the y-axis with
*title* the title string
*visible* [ *True* | *False* ] whether the axes is
visible
*xlabel* the xlabel
*xlim* (*xmin*, *xmax*) view limits
*xscale* [%(scale)s]
*xticklabels* sequence of strings
*xticks* sequence of floats
*ylabel* the ylabel strings
*ylim* (*ymin*, *ymax*) view limits
*yscale* [%(scale)s]
*yticklabels* sequence of strings
*yticks* sequence of floats
================ =========================================
""" % {'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()])}
martist.Artist.__init__(self)
if isinstance(rect, mtransforms.Bbox):
self._position = rect
else:
self._position = mtransforms.Bbox.from_bounds(*rect)
self._originalPosition = self._position.frozen()
self.set_axes(self)
self.set_aspect('auto')
self._adjustable = 'box'
self.set_anchor('C')
self._sharex = sharex
self._sharey = sharey
if sharex is not None:
self._shared_x_axes.join(self, sharex)
if sharex._adjustable == 'box':
sharex._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
if sharey is not None:
self._shared_y_axes.join(self, sharey)
if sharey._adjustable == 'box':
sharey._adjustable = 'datalim'
#warnings.warn(
# 'shared axes: "adjustable" is being changed to "datalim"')
self._adjustable = 'datalim'
self.set_label(label)
self.set_figure(fig)
# this call may differ for non-sep axes, eg polar
self._init_axis()
if axisbg is None: axisbg = rcParams['axes.facecolor']
self._axisbg = axisbg
self._frameon = frameon
self._axisbelow = rcParams['axes.axisbelow']
self._hold = rcParams['axes.hold']
self._connected = {} # a dict from events to (id, func)
self.cla()
# funcs used to format x and y - fall back on major formatters
self.fmt_xdata = None
self.fmt_ydata = None
self.set_cursor_props((1,'k')) # set the cursor properties for axes
self._cachedRenderer = None
self.set_navigate(True)
self.set_navigate_mode(None)
if len(kwargs): martist.setp(self, **kwargs)
if self.xaxis is not None:
self._xcid = self.xaxis.callbacks.connect('units finalize',
self.relim)
if self.yaxis is not None:
self._ycid = self.yaxis.callbacks.connect('units finalize',
self.relim)
def get_window_extent(self, *args, **kwargs):
'''
get the axes bounding box in display space; *args* and
*kwargs* are empty
'''
return self.bbox
def _init_axis(self):
"move this out of __init__ because non-separable axes don't use it"
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
self._update_transScale()
def set_figure(self, fig):
"""
Set the class:`~matplotlib.axes.Axes` figure
accepts a class:`~matplotlib.figure.Figure` instance
"""
martist.Artist.set_figure(self, fig)
self.bbox = mtransforms.TransformedBbox(self._position, fig.transFigure)
#these will be updated later as data is added
self.dataLim = mtransforms.Bbox.unit()
self.viewLim = mtransforms.Bbox.unit()
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
self._set_lim_and_transforms()
def _set_lim_and_transforms(self):
"""
set the *dataLim* and *viewLim*
:class:`~matplotlib.transforms.Bbox` attributes and the
*transScale*, *transData*, *transLimits* and *transAxes*
transformations.
"""
self.transAxes = mtransforms.BboxTransformTo(self.bbox)
# Transforms the x and y axis separately by a scale factor
# It is assumed that this part will have non-linear components
self.transScale = mtransforms.TransformWrapper(
mtransforms.IdentityTransform())
# An affine transformation on the data, generally to limit the
# range of the axes
self.transLimits = mtransforms.BboxTransformFrom(
mtransforms.TransformedBbox(self.viewLim, self.transScale))
# The parentheses are important for efficiency here -- they
# group the last two (which are usually affines) separately
# from the first (which, with log-scaling can be non-affine).
self.transData = self.transScale + (self.transLimits + self.transAxes)
self._xaxis_transform = mtransforms.blended_transform_factory(
self.axes.transData, self.axes.transAxes)
self._yaxis_transform = mtransforms.blended_transform_factory(
self.axes.transAxes, self.axes.transData)
def get_xaxis_transform(self):
"""
Get the transformation used for drawing x-axis labels, ticks
and gridlines. The x-direction is in data coordinates and the
y-direction is in axis coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing x-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in data coordinates
and the y-direction is in axis coordinates. Returns a
3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, -1 * pad_points / 72.0,
self.figure.dpi_scale_trans),
"top", "center")
def get_xaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary x-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in data
coordinates and the y-direction is in axis coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._xaxis_transform +
mtransforms.ScaledTranslation(0, pad_points / 72.0,
self.figure.dpi_scale_trans),
"bottom", "center")
def get_yaxis_transform(self):
"""
Get the transformation used for drawing y-axis labels, ticks
and gridlines. The x-direction is in axis coordinates and the
y-direction is in data coordinates.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad_points):
"""
Get the transformation used for drawing y-axis labels, which
will add the given amount of padding (in points) between the
axes and the label. The x-direction is in axis coordinates
and the y-direction is in data coordinates. Returns a 3-tuple
of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(-1 * pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "right")
def get_yaxis_text2_transform(self, pad_points):
"""
Get the transformation used for drawing the secondary y-axis
labels, which will add the given amount of padding (in points)
between the axes and the label. The x-direction is in axis
coordinates and the y-direction is in data coordinates.
Returns a 3-tuple of the form::
(transform, valign, halign)
where *valign* and *halign* are requested alignments for the
text.
.. note::
This transformation is primarily used by the
:class:`~matplotlib.axis.Axis` class, and is meant to be
overridden by new kinds of projections that may need to
place axis elements in different locations.
"""
return (self._yaxis_transform +
mtransforms.ScaledTranslation(pad_points / 72.0, 0,
self.figure.dpi_scale_trans),
"center", "left")
def _update_transScale(self):
self.transScale.set(
mtransforms.blended_transform_factory(
self.xaxis.get_transform(), self.yaxis.get_transform()))
if hasattr(self, "lines"):
for line in self.lines:
line._transformed_path.invalidate()
def get_position(self, original=False):
'Return the a copy of the axes rectangle as a Bbox'
if original:
return self._originalPosition.frozen()
else:
return self._position.frozen()
def set_position(self, pos, which='both'):
"""
Set the axes position with::
pos = [left, bottom, width, height]
in relative 0,1 coords, or *pos* can be a
:class:`~matplotlib.transforms.Bbox`
There are two position variables: one which is ultimately
used, but which may be modified by :meth:`apply_aspect`, and a
second which is the starting point for :meth:`apply_aspect`.
Optional keyword arguments:
*which*
========== ====================
value description
========== ====================
'active' to change the first
'original' to change the second
'both' to change both
========== ====================
"""
if not isinstance(pos, mtransforms.BboxBase):
pos = mtransforms.Bbox.from_bounds(*pos)
if which in ('both', 'active'):
self._position.set(pos)
if which in ('both', 'original'):
self._originalPosition.set(pos)
def reset_position(self):
'Make the original position the active position'
pos = self.get_position(original=True)
self.set_position(pos, which='active')
def _set_artist_props(self, a):
'set the boilerplate props for artists added to axes'
a.set_figure(self.figure)
if not a.is_transform_set():
a.set_transform(self.transData)
a.set_axes(self)
def _gen_axes_patch(self):
"""
Returns the patch used to draw the background of the axes. It
is also used as the clipping path for any data elements on the
axes.
In the standard axes, this is a rectangle, but in other
projections it may not be.
.. note::
Intended to be overridden by new projection types.
"""
return mpatches.Rectangle((0.0, 0.0), 1.0, 1.0)
def cla(self):
'Clear the current axes'
# Note: this is called by Axes.__init__()
self.xaxis.cla()
self.yaxis.cla()
self.ignore_existing_data_limits = True
self.callbacks = cbook.CallbackRegistry(('xlim_changed',
'ylim_changed'))
if self._sharex is not None:
# major and minor are class instances with
# locator and formatter attributes
self.xaxis.major = self._sharex.xaxis.major
self.xaxis.minor = self._sharex.xaxis.minor
x0, x1 = self._sharex.get_xlim()
self.set_xlim(x0, x1, emit=False)
self.xaxis.set_scale(self._sharex.xaxis.get_scale())
else:
self.xaxis.set_scale('linear')
if self._sharey is not None:
self.yaxis.major = self._sharey.yaxis.major
self.yaxis.minor = self._sharey.yaxis.minor
y0, y1 = self._sharey.get_ylim()
self.set_ylim(y0, y1, emit=False)
self.yaxis.set_scale(self._sharey.yaxis.get_scale())
else:
self.yaxis.set_scale('linear')
self._autoscaleon = True
self._update_transScale() # needed?
self._get_lines = _process_plot_var_args(self)
self._get_patches_for_fill = _process_plot_var_args(self, 'fill')
self._gridOn = rcParams['axes.grid']
self.lines = []
self.patches = []
self.texts = []
self.tables = []
self.artists = []
self.images = []
self.legend_ = None
self.collections = [] # collection.Collection instances
self.grid(self._gridOn)
props = font_manager.FontProperties(size=rcParams['axes.titlesize'])
self.titleOffsetTrans = mtransforms.ScaledTranslation(
0.0, 5.0 / 72.0, self.figure.dpi_scale_trans)
self.title = mtext.Text(
x=0.5, y=1.0, text='',
fontproperties=props,
verticalalignment='bottom',
horizontalalignment='center',
)
self.title.set_transform(self.transAxes + self.titleOffsetTrans)
self.title.set_clip_box(None)
self._set_artist_props(self.title)
# the patch draws the background of the axes. we want this to
# be below the other artists; the axesPatch name is
# deprecated. We use the frame to draw the edges so we are
# setting the edgecolor to None
self.patch = self.axesPatch = self._gen_axes_patch()
self.patch.set_figure(self.figure)
self.patch.set_facecolor(self._axisbg)
self.patch.set_edgecolor('None')
self.patch.set_linewidth(0)
self.patch.set_transform(self.transAxes)
# the frame draws the border around the axes and we want this
# above. this is a place holder for a more sophisticated
# artist that might just draw a left, bottom frame, or a
# centered frame, etc the axesFrame name is deprecated
self.frame = self.axesFrame = self._gen_axes_patch()
self.frame.set_figure(self.figure)
self.frame.set_facecolor('none')
self.frame.set_edgecolor(rcParams['axes.edgecolor'])
self.frame.set_linewidth(rcParams['axes.linewidth'])
self.frame.set_transform(self.transAxes)
self.frame.set_zorder(2.5)
self.axison = True
self.xaxis.set_clip_path(self.patch)
self.yaxis.set_clip_path(self.patch)
self._shared_x_axes.clean()
self._shared_y_axes.clean()
def clear(self):
'clear the axes'
self.cla()
def set_color_cycle(self, clist):
"""
Set the color cycle for any future plot commands on this Axes.
clist is a list of mpl color specifiers.
"""
self._get_lines.set_color_cycle(clist)
def ishold(self):
'return the HOLD status of the axes'
return self._hold
def hold(self, b=None):
"""
call signature::
hold(b=None)
Set the hold state. If *hold* is *None* (default), toggle the
*hold* state. Else set the *hold* state to boolean value *b*.
Examples:
* toggle hold:
>>> hold()
* turn hold on:
>>> hold(True)
* turn hold off
>>> hold(False)
When hold is True, subsequent plot commands will be added to
the current axes. When hold is False, the current axes and
figure will be cleared on the next plot command
"""
if b is None:
self._hold = not self._hold
else:
self._hold = b
def get_aspect(self):
return self._aspect
def set_aspect(self, aspect, adjustable=None, anchor=None):
"""
*aspect*
======== ================================================
value description
======== ================================================
'auto' automatic; fill position rectangle with data
'normal' same as 'auto'; deprecated
'equal' same scaling from data to plot units for x and y
num a circle will be stretched such that the height
is num times the width. aspect=1 is the same as
aspect='equal'.
======== ================================================
*adjustable*
========= ============================
value description
========= ============================
'box' change physical size of axes
'datalim' change xlim or ylim
========= ============================
*anchor*
===== =====================
value description
===== =====================
'C' centered
'SW' lower left corner
'S' middle of bottom edge
'SE' lower right corner
etc.
===== =====================
"""
if aspect in ('normal', 'auto'):
self._aspect = 'auto'
elif aspect == 'equal':
self._aspect = 'equal'
else:
self._aspect = float(aspect) # raise ValueError if necessary
if adjustable is not None:
self.set_adjustable(adjustable)
if anchor is not None:
self.set_anchor(anchor)
def get_adjustable(self):
return self._adjustable
def set_adjustable(self, adjustable):
"""
ACCEPTS: [ 'box' | 'datalim' ]
"""
if adjustable in ('box', 'datalim'):
if self in self._shared_x_axes or self in self._shared_y_axes:
if adjustable == 'box':
raise ValueError(
'adjustable must be "datalim" for shared axes')
self._adjustable = adjustable
else:
raise ValueError('argument must be "box", or "datalim"')
def get_anchor(self):
return self._anchor
def set_anchor(self, anchor):
"""
*anchor*
===== ============
value description
===== ============
'C' Center
'SW' bottom left
'S' bottom
'SE' bottom right
'E' right
'NE' top right
'N' top
'NW' top left
'W' left
===== ============
"""
if anchor in mtransforms.Bbox.coefs.keys() or len(anchor) == 2:
self._anchor = anchor
else:
raise ValueError('argument must be among %s' %
', '.join(mtransforms.BBox.coefs.keys()))
def get_data_ratio(self):
"""
Returns the aspect ratio of the raw data.
This method is intended to be overridden by new projection
types.
"""
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
return ysize/xsize
def apply_aspect(self, position=None):
'''
Use :meth:`_aspect` and :meth:`_adjustable` to modify the
axes box or the view limits.
'''
if position is None:
position = self.get_position(original=True)
aspect = self.get_aspect()
if aspect == 'auto':
self.set_position( position , which='active')
return
if aspect == 'equal':
A = 1
else:
A = aspect
#Ensure at drawing time that any Axes involved in axis-sharing
# does not have its position changed.
if self in self._shared_x_axes or self in self._shared_y_axes:
if self._adjustable == 'box':
self._adjustable = 'datalim'
warnings.warn(
'shared axes: "adjustable" is being changed to "datalim"')
figW,figH = self.get_figure().get_size_inches()
fig_aspect = figH/figW
if self._adjustable == 'box':
box_aspect = A * self.get_data_ratio()
pb = position.frozen()
pb1 = pb.shrunk_to_aspect(box_aspect, pb, fig_aspect)
self.set_position(pb1.anchored(self.get_anchor(), pb), 'active')
return
# reset active to original in case it had been changed
# by prior use of 'box'
self.set_position(position, which='active')
xmin,xmax = self.get_xbound()
xsize = max(math.fabs(xmax-xmin), 1e-30)
ymin,ymax = self.get_ybound()
ysize = max(math.fabs(ymax-ymin), 1e-30)
l,b,w,h = position.bounds
box_aspect = fig_aspect * (h/w)
data_ratio = box_aspect / A
y_expander = (data_ratio*xsize/ysize - 1.0)
#print 'y_expander', y_expander
# If y_expander > 0, the dy/dx viewLim ratio needs to increase
if abs(y_expander) < 0.005:
#print 'good enough already'
return
dL = self.dataLim
xr = 1.05 * dL.width
yr = 1.05 * dL.height
xmarg = xsize - xr
ymarg = ysize - yr
Ysize = data_ratio * xsize
Xsize = ysize / data_ratio
Xmarg = Xsize - xr
Ymarg = Ysize - yr
xm = 0 # Setting these targets to, e.g., 0.05*xr does not seem to help.
ym = 0
#print 'xmin, xmax, ymin, ymax', xmin, xmax, ymin, ymax
#print 'xsize, Xsize, ysize, Ysize', xsize, Xsize, ysize, Ysize
changex = (self in self._shared_y_axes
and self not in self._shared_x_axes)
changey = (self in self._shared_x_axes
and self not in self._shared_y_axes)
if changex and changey:
warnings.warn("adjustable='datalim' cannot work with shared "
"x and y axes")
return
if changex:
adjust_y = False
else:
#print 'xmarg, ymarg, Xmarg, Ymarg', xmarg, ymarg, Xmarg, Ymarg
if xmarg > xm and ymarg > ym:
adjy = ((Ymarg > 0 and y_expander < 0)
or (Xmarg < 0 and y_expander > 0))
else:
adjy = y_expander > 0
#print 'y_expander, adjy', y_expander, adjy
adjust_y = changey or adjy #(Ymarg > xmarg)
if adjust_y:
yc = 0.5*(ymin+ymax)
y0 = yc - Ysize/2.0
y1 = yc + Ysize/2.0
self.set_ybound((y0, y1))
#print 'New y0, y1:', y0, y1
#print 'New ysize, ysize/xsize', y1-y0, (y1-y0)/xsize
else:
xc = 0.5*(xmin+xmax)
x0 = xc - Xsize/2.0
x1 = xc + Xsize/2.0
self.set_xbound((x0, x1))
#print 'New x0, x1:', x0, x1
#print 'New xsize, ysize/xsize', x1-x0, ysize/(x1-x0)
def axis(self, *v, **kwargs):
'''
Convenience method for manipulating the x and y view limits
and the aspect ratio of the plot.
*kwargs* are passed on to :meth:`set_xlim` and
:meth:`set_ylim`
'''
if len(v)==1 and is_string_like(v[0]):
s = v[0].lower()
if s=='on': self.set_axis_on()
elif s=='off': self.set_axis_off()
elif s in ('equal', 'tight', 'scaled', 'normal', 'auto', 'image'):
self.set_autoscale_on(True)
self.set_aspect('auto')
self.autoscale_view()
# self.apply_aspect()
if s=='equal':
self.set_aspect('equal', adjustable='datalim')
elif s == 'scaled':
self.set_aspect('equal', adjustable='box', anchor='C')
self.set_autoscale_on(False) # Req. by Mark Bakker
elif s=='tight':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
elif s == 'image':
self.autoscale_view(tight=True)
self.set_autoscale_on(False)
self.set_aspect('equal', adjustable='box', anchor='C')
else:
raise ValueError('Unrecognized string %s to axis; '
'try on or off' % s)
xmin, xmax = self.get_xlim()
ymin, ymax = self.get_ylim()
return xmin, xmax, ymin, ymax
try: v[0]
except IndexError:
emit = kwargs.get('emit', True)
xmin = kwargs.get('xmin', None)
xmax = kwargs.get('xmax', None)
xmin, xmax = self.set_xlim(xmin, xmax, emit)
ymin = kwargs.get('ymin', None)
ymax = kwargs.get('ymax', None)
ymin, ymax = self.set_ylim(ymin, ymax, emit)
return xmin, xmax, ymin, ymax
v = v[0]
if len(v) != 4:
raise ValueError('v must contain [xmin xmax ymin ymax]')
self.set_xlim([v[0], v[1]])
self.set_ylim([v[2], v[3]])
return v
def get_child_artists(self):
"""
Return a list of artists the axes contains.
.. deprecated:: 0.98
"""
raise DeprecationWarning('Use get_children instead')
def get_frame(self):
'Return the axes Rectangle frame'
warnings.warn('use ax.patch instead', DeprecationWarning)
return self.patch
def get_legend(self):
'Return the legend.Legend instance, or None if no legend is defined'
return self.legend_
def get_images(self):
'return a list of Axes images contained by the Axes'
return cbook.silent_list('AxesImage', self.images)
def get_lines(self):
'Return a list of lines contained by the Axes'
return cbook.silent_list('Line2D', self.lines)
def get_xaxis(self):
'Return the XAxis instance'
return self.xaxis
def get_xgridlines(self):
'Get the x grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D xgridline', self.xaxis.get_gridlines())
def get_xticklines(self):
'Get the xtick lines as a list of Line2D instances'
return cbook.silent_list('Text xtickline', self.xaxis.get_ticklines())
def get_yaxis(self):
'Return the YAxis instance'
return self.yaxis
def get_ygridlines(self):
'Get the y grid lines as a list of Line2D instances'
return cbook.silent_list('Line2D ygridline', self.yaxis.get_gridlines())
def get_yticklines(self):
'Get the ytick lines as a list of Line2D instances'
return cbook.silent_list('Line2D ytickline', self.yaxis.get_ticklines())
#### Adding and tracking artists
def has_data(self):
'''Return *True* if any artists have been added to axes.
This should not be used to determine whether the *dataLim*
need to be updated, and may not actually be useful for
anything.
'''
return (
len(self.collections) +
len(self.images) +
len(self.lines) +
len(self.patches))>0
def add_artist(self, a):
'Add any :class:`~matplotlib.artist.Artist` to the axes'
a.set_axes(self)
self.artists.append(a)
self._set_artist_props(a)
a.set_clip_path(self.patch)
a._remove_method = lambda h: self.artists.remove(h)
def add_collection(self, collection, autolim=True):
'''
add a :class:`~matplotlib.collections.Collection` instance
to the axes
'''
label = collection.get_label()
if not label:
collection.set_label('collection%d'%len(self.collections))
self.collections.append(collection)
self._set_artist_props(collection)
collection.set_clip_path(self.patch)
if autolim:
if collection._paths and len(collection._paths):
self.update_datalim(collection.get_datalim(self.transData))
collection._remove_method = lambda h: self.collections.remove(h)
def add_line(self, line):
'''
Add a :class:`~matplotlib.lines.Line2D` to the list of plot
lines
'''
self._set_artist_props(line)
line.set_clip_path(self.patch)
self._update_line_limits(line)
if not line.get_label():
line.set_label('_line%d'%len(self.lines))
self.lines.append(line)
line._remove_method = lambda h: self.lines.remove(h)
def _update_line_limits(self, line):
p = line.get_path()
if p.vertices.size > 0:
self.dataLim.update_from_path(p, self.ignore_existing_data_limits,
updatex=line.x_isdata,
updatey=line.y_isdata)
self.ignore_existing_data_limits = False
def add_patch(self, p):
"""
Add a :class:`~matplotlib.patches.Patch` *p* to the list of
axes patches; the clipbox will be set to the Axes clipping
box. If the transform is not set, it will be set to
:attr:`transData`.
"""
self._set_artist_props(p)
p.set_clip_path(self.patch)
self._update_patch_limits(p)
self.patches.append(p)
p._remove_method = lambda h: self.patches.remove(h)
def _update_patch_limits(self, patch):
'update the data limits for patch *p*'
# hist can add zero height Rectangles, which is useful to keep
# the bins, counts and patches lined up, but it throws off log
# scaling. We'll ignore rects with zero height or width in
# the auto-scaling
if (isinstance(patch, mpatches.Rectangle) and
(patch.get_width()==0 or patch.get_height()==0)):
return
vertices = patch.get_path().vertices
if vertices.size > 0:
xys = patch.get_patch_transform().transform(vertices)
if patch.get_data_transform() != self.transData:
transform = (patch.get_data_transform() +
self.transData.inverted())
xys = transform.transform(xys)
self.update_datalim(xys, updatex=patch.x_isdata,
updatey=patch.y_isdata)
def add_table(self, tab):
'''
Add a :class:`~matplotlib.tables.Table` instance to the
list of axes tables
'''
self._set_artist_props(tab)
self.tables.append(tab)
tab.set_clip_path(self.patch)
tab._remove_method = lambda h: self.tables.remove(h)
def relim(self):
'recompute the data limits based on current artists'
# Collections are deliberately not supported (yet); see
# the TODO note in artists.py.
self.dataLim.ignore(True)
self.ignore_existing_data_limits = True
for line in self.lines:
self._update_line_limits(line)
for p in self.patches:
self._update_patch_limits(p)
def update_datalim(self, xys, updatex=True, updatey=True):
'Update the data lim bbox with seq of xy tups or equiv. 2-D array'
# if no data is set currently, the bbox will ignore its
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(xys) and not len(xys): return
if not ma.isMaskedArray(xys):
xys = np.asarray(xys)
self.dataLim.update_from_data_xy(xys, self.ignore_existing_data_limits,
updatex=updatex, updatey=updatey)
self.ignore_existing_data_limits = False
def update_datalim_numerix(self, x, y):
'Update the data lim bbox with seq of xy tups'
# if no data is set currently, the bbox will ignore it's
# limits and set the bound to be the bounds of the xydata.
# Otherwise, it will compute the bounds of it's current data
# and the data in xydata
if iterable(x) and not len(x): return
self.dataLim.update_from_data(x, y, self.ignore_existing_data_limits)
self.ignore_existing_data_limits = False
def update_datalim_bounds(self, bounds):
'''
Update the datalim to include the given
:class:`~matplotlib.transforms.Bbox` *bounds*
'''
self.dataLim.set(mtransforms.Bbox.union([self.dataLim, bounds]))
def _process_unit_info(self, xdata=None, ydata=None, kwargs=None):
'look for unit *kwargs* and update the axis instances as necessary'
if self.xaxis is None or self.yaxis is None: return
#print 'processing', self.get_geometry()
if xdata is not None:
# we only need to update if there is nothing set yet.
if not self.xaxis.have_units():
self.xaxis.update_units(xdata)
#print '\tset from xdata', self.xaxis.units
if ydata is not None:
# we only need to update if there is nothing set yet.
if not self.yaxis.have_units():
self.yaxis.update_units(ydata)
#print '\tset from ydata', self.yaxis.units
# process kwargs 2nd since these will override default units
if kwargs is not None:
xunits = kwargs.pop( 'xunits', self.xaxis.units)
if xunits!=self.xaxis.units:
#print '\tkw setting xunits', xunits
self.xaxis.set_units(xunits)
# If the units being set imply a different converter,
# we need to update.
if xdata is not None:
self.xaxis.update_units(xdata)
yunits = kwargs.pop('yunits', self.yaxis.units)
if yunits!=self.yaxis.units:
#print '\tkw setting yunits', yunits
self.yaxis.set_units(yunits)
# If the units being set imply a different converter,
# we need to update.
if ydata is not None:
self.yaxis.update_units(ydata)
def in_axes(self, mouseevent):
'''
return *True* if the given *mouseevent* (in display coords)
is in the Axes
'''
return self.patch.contains(mouseevent)[0]
def get_autoscale_on(self):
"""
Get whether autoscaling is applied on plot commands
"""
return self._autoscaleon
def set_autoscale_on(self, b):
"""
Set whether autoscaling is applied on plot commands
accepts: [ *True* | *False* ]
"""
self._autoscaleon = b
def autoscale_view(self, tight=False, scalex=True, scaley=True):
"""
autoscale the view limits using the data limits. You can
selectively autoscale only a single axis, eg, the xaxis by
setting *scaley* to *False*. The autoscaling preserves any
axis direction reversal that has already been done.
"""
# if image data only just use the datalim
if not self._autoscaleon: return
if scalex:
xshared = self._shared_x_axes.get_siblings(self)
dl = [ax.dataLim for ax in xshared]
bb = mtransforms.BboxBase.union(dl)
x0, x1 = bb.intervalx
if scaley:
yshared = self._shared_y_axes.get_siblings(self)
dl = [ax.dataLim for ax in yshared]
bb = mtransforms.BboxBase.union(dl)
y0, y1 = bb.intervaly
if (tight or (len(self.images)>0 and
len(self.lines)==0 and
len(self.patches)==0)):
if scalex:
self.set_xbound(x0, x1)
if scaley:
self.set_ybound(y0, y1)
return
if scalex:
XL = self.xaxis.get_major_locator().view_limits(x0, x1)
self.set_xbound(XL)
if scaley:
YL = self.yaxis.get_major_locator().view_limits(y0, y1)
self.set_ybound(YL)
#### Drawing
def draw(self, renderer=None, inframe=False):
"Draw everything (plot lines, axes, labels)"
if renderer is None:
renderer = self._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible(): return
renderer.open_group('axes')
self.apply_aspect()
# the patch draws the background rectangle -- the frame below
# will draw the edges
if self.axison and self._frameon:
self.patch.draw(renderer)
artists = []
if len(self.images)<=1 or renderer.option_image_nocomposite():
for im in self.images:
im.draw(renderer)
else:
# make a composite image blending alpha
# list of (mimage.Image, ox, oy)
mag = renderer.get_image_magnification()
ims = [(im.make_image(mag),0,0)
for im in self.images if im.get_visible()]
l, b, r, t = self.bbox.extents
width = mag*((round(r) + 0.5) - (round(l) - 0.5))
height = mag*((round(t) + 0.5) - (round(b) - 0.5))
im = mimage.from_images(height,
width,
ims)
im.is_grayscale = False
l, b, w, h = self.bbox.bounds
# composite images need special args so they will not
# respect z-order for now
renderer.draw_image(
round(l), round(b), im, self.bbox,
self.patch.get_path(),
self.patch.get_transform())
artists.extend(self.collections)
artists.extend(self.patches)
artists.extend(self.lines)
artists.extend(self.texts)
artists.extend(self.artists)
if self.axison and not inframe:
if self._axisbelow:
self.xaxis.set_zorder(0.5)
self.yaxis.set_zorder(0.5)
else:
self.xaxis.set_zorder(2.5)
self.yaxis.set_zorder(2.5)
artists.extend([self.xaxis, self.yaxis])
if not inframe: artists.append(self.title)
artists.extend(self.tables)
if self.legend_ is not None:
artists.append(self.legend_)
# the frame draws the edges around the axes patch -- we
# decouple these so the patch can be in the background and the
# frame in the foreground.
if self.axison and self._frameon:
artists.append(self.frame)
dsu = [ (a.zorder, i, a) for i, a in enumerate(artists)
if not a.get_animated() ]
dsu.sort()
for zorder, i, a in dsu:
a.draw(renderer)
renderer.close_group('axes')
self._cachedRenderer = renderer
def draw_artist(self, a):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
a.draw(self._cachedRenderer)
def redraw_in_frame(self):
"""
This method can only be used after an initial draw which
caches the renderer. It is used to efficiently update Axes
data (axis ticks, labels, etc are not updated)
"""
assert self._cachedRenderer is not None
self.draw(self._cachedRenderer, inframe=True)
def get_renderer_cache(self):
return self._cachedRenderer
def __draw_animate(self):
# ignore for now; broken
if self._lastRenderer is None:
raise RuntimeError('You must first call ax.draw()')
dsu = [(a.zorder, a) for a in self.animated.keys()]
dsu.sort()
renderer = self._lastRenderer
renderer.blit()
for tmp, a in dsu:
a.draw(renderer)
#### Axes rectangle characteristics
def get_frame_on(self):
"""
Get whether the axes rectangle patch is drawn
"""
return self._frameon
def set_frame_on(self, b):
"""
Set whether the axes rectangle patch is drawn
ACCEPTS: [ *True* | *False* ]
"""
self._frameon = b
def get_axisbelow(self):
"""
Get whether axis below is true or not
"""
return self._axisbelow
def set_axisbelow(self, b):
"""
Set whether the axis ticks and gridlines are above or below most artists
ACCEPTS: [ *True* | *False* ]
"""
self._axisbelow = b
def grid(self, b=None, **kwargs):
"""
call signature::
grid(self, b=None, **kwargs)
Set the axes grids on or off; *b* is a boolean
If *b* is *None* and ``len(kwargs)==0``, toggle the grid state. If
*kwargs* are supplied, it is assumed that you want a grid and *b*
is thus set to *True*
*kawrgs* are used to set the grid line properties, eg::
ax.grid(color='r', linestyle='-', linewidth=2)
Valid :class:`~matplotlib.lines.Line2D` kwargs are
%(Line2D)s
"""
if len(kwargs): b = True
self.xaxis.grid(b, **kwargs)
self.yaxis.grid(b, **kwargs)
grid.__doc__ = cbook.dedent(grid.__doc__) % martist.kwdocd
def ticklabel_format(self, **kwargs):
"""
Convenience method for manipulating the ScalarFormatter
used by default for linear axes.
Optional keyword arguments:
============ =====================================
Keyword Description
============ =====================================
*style* [ 'sci' (or 'scientific') | 'plain' ]
plain turns off scientific notation
*scilimits* (m, n), pair of integers; if *style*
is 'sci', scientific notation will
be used for numbers outside the range
10`-m`:sup: to 10`n`:sup:.
Use (0,0) to include all numbers.
*axis* [ 'x' | 'y' | 'both' ]
============ =====================================
Only the major ticks are affected.
If the method is called when the
:class:`~matplotlib.ticker.ScalarFormatter` is not the
:class:`~matplotlib.ticker.Formatter` being used, an
:exc:`AttributeError` will be raised.
"""
style = kwargs.pop('style', '').lower()
scilimits = kwargs.pop('scilimits', None)
if scilimits is not None:
try:
m, n = scilimits
m+n+1 # check that both are numbers
except (ValueError, TypeError):
raise ValueError("scilimits must be a sequence of 2 integers")
axis = kwargs.pop('axis', 'both').lower()
if style[:3] == 'sci':
sb = True
elif style in ['plain', 'comma']:
sb = False
if style == 'plain':
cb = False
else:
cb = True
raise NotImplementedError, "comma style remains to be added"
elif style == '':
sb = None
else:
raise ValueError, "%s is not a valid style value"
try:
if sb is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_scientific(sb)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_scientific(sb)
if scilimits is not None:
if axis == 'both' or axis == 'x':
self.xaxis.major.formatter.set_powerlimits(scilimits)
if axis == 'both' or axis == 'y':
self.yaxis.major.formatter.set_powerlimits(scilimits)
except AttributeError:
raise AttributeError(
"This method only works with the ScalarFormatter.")
def set_axis_off(self):
"""turn off the axis"""
self.axison = False
def set_axis_on(self):
"""turn on the axis"""
self.axison = True
def get_axis_bgcolor(self):
'Return the axis background color'
return self._axisbg
def set_axis_bgcolor(self, color):
"""
set the axes background color
ACCEPTS: any matplotlib color - see
:func:`~matplotlib.pyplot.colors`
"""
self._axisbg = color
self.patch.set_facecolor(color)
### data limits, ticks, tick labels, and formatting
def invert_xaxis(self):
"Invert the x-axis."
left, right = self.get_xlim()
self.set_xlim(right, left)
def xaxis_inverted(self):
'Returns True if the x-axis is inverted.'
left, right = self.get_xlim()
return right < left
def get_xbound(self):
"""
Returns the x-axis numerical bounds where::
lowerBound < upperBound
"""
left, right = self.get_xlim()
if left < right:
return left, right
else:
return right, left
def set_xbound(self, lower=None, upper=None):
"""
Set the lower and upper numerical bounds of the x-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_xbound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.xaxis_inverted():
if lower < upper:
self.set_xlim(upper, lower)
else:
self.set_xlim(lower, upper)
else:
if lower < upper:
self.set_xlim(lower, upper)
else:
self.set_xlim(upper, lower)
def get_xlim(self):
"""
Get the x-axis range [*xmin*, *xmax*]
"""
return tuple(self.viewLim.intervalx)
def set_xlim(self, xmin=None, xmax=None, emit=True, **kwargs):
"""
call signature::
set_xlim(self, *args, **kwargs)
Set the limits for the xaxis
Returns the current xlimits as a length 2 tuple: [*xmin*, *xmax*]
Examples::
set_xlim((valmin, valmax))
set_xlim(valmin, valmax)
set_xlim(xmin=1) # xmax unchanged
set_xlim(xmax=1) # xmin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
ACCEPTS: len(2) sequence of floats
"""
if xmax is None and iterable(xmin):
xmin,xmax = xmin
self._process_unit_info(xdata=(xmin, xmax))
if xmin is not None:
xmin = self.convert_xunits(xmin)
if xmax is not None:
xmax = self.convert_xunits(xmax)
old_xmin,old_xmax = self.get_xlim()
if xmin is None: xmin = old_xmin
if xmax is None: xmax = old_xmax
xmin, xmax = mtransforms.nonsingular(xmin, xmax, increasing=False)
xmin, xmax = self.xaxis.limit_range_for_scale(xmin, xmax)
self.viewLim.intervalx = (xmin, xmax)
if emit:
self.callbacks.process('xlim_changed', self)
# Call all of the other x-axes that are shared with this one
for other in self._shared_x_axes.get_siblings(self):
if other is not self:
other.set_xlim(self.viewLim.intervalx, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return xmin, xmax
def get_xscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.xaxis.get_scale()
def set_xscale(self, value, **kwargs):
"""
call signature::
set_xscale(value)
Set the scaling of the x-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.xaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_xscale.__doc__ = cbook.dedent(set_xscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_xticks(self, minor=False):
'Return the x ticks as a list of locations'
return self.xaxis.get_ticklocs(minor=minor)
def set_xticks(self, ticks, minor=False):
"""
Set the x ticks with list of *ticks*
ACCEPTS: sequence of floats
"""
return self.xaxis.set_ticks(ticks, minor=minor)
def get_xmajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_majorticklabels())
def get_xminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_minorticklabels())
def get_xticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text xticklabel',
self.xaxis.get_ticklabels(minor=minor))
def set_xticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_xticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the xtick labels with list of strings *labels*. Return a
list of axis text instances.
*kwargs* set the :class:`~matplotlib.text.Text` properties.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.xaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_xticklabels.__doc__ = cbook.dedent(
set_xticklabels.__doc__) % martist.kwdocd
def invert_yaxis(self):
"Invert the y-axis."
left, right = self.get_ylim()
self.set_ylim(right, left)
def yaxis_inverted(self):
'Returns True if the y-axis is inverted.'
left, right = self.get_ylim()
return right < left
def get_ybound(self):
"Return y-axis numerical bounds in the form of lowerBound < upperBound"
left, right = self.get_ylim()
if left < right:
return left, right
else:
return right, left
def set_ybound(self, lower=None, upper=None):
"""Set the lower and upper numerical bounds of the y-axis.
This method will honor axes inversion regardless of parameter order.
"""
if upper is None and iterable(lower):
lower,upper = lower
old_lower,old_upper = self.get_ybound()
if lower is None: lower = old_lower
if upper is None: upper = old_upper
if self.yaxis_inverted():
if lower < upper:
self.set_ylim(upper, lower)
else:
self.set_ylim(lower, upper)
else:
if lower < upper:
self.set_ylim(lower, upper)
else:
self.set_ylim(upper, lower)
def get_ylim(self):
"""
Get the y-axis range [*ymin*, *ymax*]
"""
return tuple(self.viewLim.intervaly)
def set_ylim(self, ymin=None, ymax=None, emit=True, **kwargs):
"""
call signature::
set_ylim(self, *args, **kwargs):
Set the limits for the yaxis; v = [ymin, ymax]::
set_ylim((valmin, valmax))
set_ylim(valmin, valmax)
set_ylim(ymin=1) # ymax unchanged
set_ylim(ymax=1) # ymin unchanged
Keyword arguments:
*ymin*: scalar
the min of the ylim
*ymax*: scalar
the max of the ylim
*emit*: [ True | False ]
notify observers of lim change
Returns the current ylimits as a length 2 tuple
ACCEPTS: len(2) sequence of floats
"""
if ymax is None and iterable(ymin):
ymin,ymax = ymin
if ymin is not None:
ymin = self.convert_yunits(ymin)
if ymax is not None:
ymax = self.convert_yunits(ymax)
old_ymin,old_ymax = self.get_ylim()
if ymin is None: ymin = old_ymin
if ymax is None: ymax = old_ymax
ymin, ymax = mtransforms.nonsingular(ymin, ymax, increasing=False)
ymin, ymax = self.yaxis.limit_range_for_scale(ymin, ymax)
self.viewLim.intervaly = (ymin, ymax)
if emit:
self.callbacks.process('ylim_changed', self)
# Call all of the other y-axes that are shared with this one
for other in self._shared_y_axes.get_siblings(self):
if other is not self:
other.set_ylim(self.viewLim.intervaly, emit=False)
if (other.figure != self.figure and
other.figure.canvas is not None):
other.figure.canvas.draw_idle()
return ymin, ymax
def get_yscale(self):
'return the xaxis scale string: %s' % (
", ".join(mscale.get_scale_names()))
return self.yaxis.get_scale()
def set_yscale(self, value, **kwargs):
"""
call signature::
set_yscale(value)
Set the scaling of the y-axis: %(scale)s
ACCEPTS: [%(scale)s]
Different kwargs are accepted, depending on the scale:
%(scale_docs)s
"""
self.yaxis.set_scale(value, **kwargs)
self.autoscale_view()
self._update_transScale()
set_yscale.__doc__ = cbook.dedent(set_yscale.__doc__) % {
'scale': ' | '.join([repr(x) for x in mscale.get_scale_names()]),
'scale_docs': mscale.get_scale_docs().strip()}
def get_yticks(self, minor=False):
'Return the y ticks as a list of locations'
return self.yaxis.get_ticklocs(minor=minor)
def set_yticks(self, ticks, minor=False):
"""
Set the y ticks with list of *ticks*
ACCEPTS: sequence of floats
Keyword arguments:
*minor*: [ False | True ]
Sets the minor ticks if True
"""
return self.yaxis.set_ticks(ticks, minor=minor)
def get_ymajorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_majorticklabels())
def get_yminorticklabels(self):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_minorticklabels())
def get_yticklabels(self, minor=False):
'Get the xtick labels as a list of Text instances'
return cbook.silent_list('Text yticklabel',
self.yaxis.get_ticklabels(minor=minor))
def set_yticklabels(self, labels, fontdict=None, minor=False, **kwargs):
"""
call signature::
set_yticklabels(labels, fontdict=None, minor=False, **kwargs)
Set the ytick labels with list of strings *labels*. Return a list of
:class:`~matplotlib.text.Text` instances.
*kwargs* set :class:`~matplotlib.text.Text` properties for the labels.
Valid properties are
%(Text)s
ACCEPTS: sequence of strings
"""
return self.yaxis.set_ticklabels(labels, fontdict,
minor=minor, **kwargs)
set_yticklabels.__doc__ = cbook.dedent(
set_yticklabels.__doc__) % martist.kwdocd
def xaxis_date(self, tz=None):
"""Sets up x-axis ticks and labels that treat the x data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
xmin, xmax = self.dataLim.intervalx
if xmin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(xdata=(dmin, dmax))
dmin, dmax = self.convert_xunits([dmin, dmax])
self.viewLim.intervalx = dmin, dmax
self.dataLim.intervalx = dmin, dmax
locator = self.xaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.xaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervalx[0]==0.:
self.viewLim.intervalx = tuple(self.dataLim.intervalx)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.xaxis.set_major_formatter(formatter)
def yaxis_date(self, tz=None):
"""Sets up y-axis ticks and labels that treat the y data as dates.
*tz* is the time zone to use in labeling dates. Defaults to rc value.
"""
ymin, ymax = self.dataLim.intervaly
if ymin==0.:
# no data has been added - let's set the default datalim.
# We should probably use a better proxy for the datalim
# have been updated than the ignore setting
dmax = today = datetime.date.today()
dmin = today-datetime.timedelta(days=10)
self._process_unit_info(ydata=(dmin, dmax))
dmin, dmax = self.convert_yunits([dmin, dmax])
self.viewLim.intervaly = dmin, dmax
self.dataLim.intervaly = dmin, dmax
locator = self.yaxis.get_major_locator()
if not isinstance(locator, mdates.DateLocator):
locator = mdates.AutoDateLocator(tz)
self.yaxis.set_major_locator(locator)
# the autolocator uses the viewlim to pick the right date
# locator, but it may not have correct viewlim before an
# autoscale. If the viewlim is still zero..1, set it to the
# datalim and the autoscaler will update it on request
if self.viewLim.intervaly[0]==0.:
self.viewLim.intervaly = tuple(self.dataLim.intervaly)
locator.refresh()
formatter = self.xaxis.get_major_formatter()
if not isinstance(formatter, mdates.DateFormatter):
formatter = mdates.AutoDateFormatter(locator, tz)
self.yaxis.set_major_formatter(formatter)
def format_xdata(self, x):
"""
Return *x* string formatted. This function will use the attribute
self.fmt_xdata if it is callable, else will fall back on the xaxis
major formatter
"""
try: return self.fmt_xdata(x)
except TypeError:
func = self.xaxis.get_major_formatter().format_data_short
val = func(x)
return val
def format_ydata(self, y):
"""
Return y string formatted. This function will use the
:attr:`fmt_ydata` attribute if it is callable, else will fall
back on the yaxis major formatter
"""
try: return self.fmt_ydata(y)
except TypeError:
func = self.yaxis.get_major_formatter().format_data_short
val = func(y)
return val
def format_coord(self, x, y):
'return a format string formatting the *x*, *y* coord'
if x is None:
x = '???'
if y is None:
y = '???'
xs = self.format_xdata(x)
ys = self.format_ydata(y)
return 'x=%s, y=%s'%(xs,ys)
#### Interactive manipulation
def can_zoom(self):
"""
Return *True* if this axes support the zoom box
"""
return True
def get_navigate(self):
"""
Get whether the axes responds to navigation commands
"""
return self._navigate
def set_navigate(self, b):
"""
Set whether the axes responds to navigation toolbar commands
ACCEPTS: [ True | False ]
"""
self._navigate = b
def get_navigate_mode(self):
"""
Get the navigation toolbar button status: 'PAN', 'ZOOM', or None
"""
return self._navigate_mode
def set_navigate_mode(self, b):
"""
Set the navigation toolbar button status;
.. warning::
this is not a user-API function.
"""
self._navigate_mode = b
def start_pan(self, x, y, button):
"""
Called when a pan operation has started.
*x*, *y* are the mouse coordinates in display coords.
button is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
.. note::
Intended to be overridden by new projection types.
"""
self._pan_start = cbook.Bunch(
lim = self.viewLim.frozen(),
trans = self.transData.frozen(),
trans_inverse = self.transData.inverted().frozen(),
bbox = self.bbox.frozen(),
x = x,
y = y
)
def end_pan(self):
"""
Called when a pan operation completes (when the mouse button
is up.)
.. note::
Intended to be overridden by new projection types.
"""
del self._pan_start
def drag_pan(self, button, key, x, y):
"""
Called when the mouse moves during a pan operation.
*button* is the mouse button number:
* 1: LEFT
* 2: MIDDLE
* 3: RIGHT
*key* is a "shift" key
*x*, *y* are the mouse coordinates in display coords.
.. note::
Intended to be overridden by new projection types.
"""
def format_deltas(key, dx, dy):
if key=='control':
if(abs(dx)>abs(dy)):
dy = dx
else:
dx = dy
elif key=='x':
dy = 0
elif key=='y':
dx = 0
elif key=='shift':
if 2*abs(dx) < abs(dy):
dx=0
elif 2*abs(dy) < abs(dx):
dy=0
elif(abs(dx)>abs(dy)):
dy=dy/abs(dy)*abs(dx)
else:
dx=dx/abs(dx)*abs(dy)
return (dx,dy)
p = self._pan_start
dx = x - p.x
dy = y - p.y
if dx == 0 and dy == 0:
return
if button == 1:
dx, dy = format_deltas(key, dx, dy)
result = p.bbox.translated(-dx, -dy) \
.transformed(p.trans_inverse)
elif button == 3:
try:
dx = -dx / float(self.bbox.width)
dy = -dy / float(self.bbox.height)
dx, dy = format_deltas(key, dx, dy)
if self.get_aspect() != 'auto':
dx = 0.5 * (dx + dy)
dy = dx
alpha = np.power(10.0, (dx, dy))
start = p.trans_inverse.transform_point((p.x, p.y))
lim_points = p.lim.get_points()
result = start + alpha * (lim_points - start)
result = mtransforms.Bbox(result)
except OverflowError:
warnings.warn('Overflow while panning')
return
self.set_xlim(*result.intervalx)
self.set_ylim(*result.intervaly)
def get_cursor_props(self):
"""
return the cursor propertiess as a (*linewidth*, *color*)
tuple, where *linewidth* is a float and *color* is an RGBA
tuple
"""
return self._cursorProps
def set_cursor_props(self, *args):
"""
Set the cursor property as::
ax.set_cursor_props(linewidth, color)
or::
ax.set_cursor_props((linewidth, color))
ACCEPTS: a (*float*, *color*) tuple
"""
if len(args)==1:
lw, c = args[0]
elif len(args)==2:
lw, c = args
else:
raise ValueError('args must be a (linewidth, color) tuple')
c =mcolors.colorConverter.to_rgba(c)
self._cursorProps = lw, c
def connect(self, s, func):
"""
Register observers to be notified when certain events occur. Register
with callback functions with the following signatures. The function
has the following signature::
func(ax) # where ax is the instance making the callback.
The following events can be connected to:
'xlim_changed','ylim_changed'
The connection id is is returned - you can use this with
disconnect to disconnect from the axes event
"""
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def disconnect(self, cid):
'disconnect from the Axes event.'
raise DeprecationWarning('use the callbacks CallbackRegistry instance '
'instead')
def get_children(self):
'return a list of child artists'
children = []
children.append(self.xaxis)
children.append(self.yaxis)
children.extend(self.lines)
children.extend(self.patches)
children.extend(self.texts)
children.extend(self.tables)
children.extend(self.artists)
children.extend(self.images)
if self.legend_ is not None:
children.append(self.legend_)
children.extend(self.collections)
children.append(self.title)
children.append(self.patch)
children.append(self.frame)
return children
def contains(self,mouseevent):
"""Test whether the mouse event occured in the axes.
Returns T/F, {}
"""
if callable(self._contains): return self._contains(self,mouseevent)
return self.patch.contains(mouseevent)
def pick(self, *args):
"""
call signature::
pick(mouseevent)
each child artist will fire a pick event if mouseevent is over
the artist and the artist has picker set
"""
if len(args)>1:
raise DeprecationWarning('New pick API implemented -- '
'see API_CHANGES in the src distribution')
martist.Artist.pick(self,args[0])
def __pick(self, x, y, trans=None, among=None):
"""
Return the artist under point that is closest to the *x*, *y*.
If *trans* is *None*, *x*, and *y* are in window coords,
(0,0 = lower left). Otherwise, *trans* is a
:class:`~matplotlib.transforms.Transform` that specifies the
coordinate system of *x*, *y*.
The selection of artists from amongst which the pick function
finds an artist can be narrowed using the optional keyword
argument *among*. If provided, this should be either a sequence
of permitted artists or a function taking an artist as its
argument and returning a true value if and only if that artist
can be selected.
Note this algorithm calculates distance to the vertices of the
polygon, so if you want to pick a patch, click on the edge!
"""
# MGDTODO: Needs updating
if trans is not None:
xywin = trans.transform_point((x,y))
else:
xywin = x,y
def dist_points(p1, p2):
'return the distance between two points'
x1, y1 = p1
x2, y2 = p2
return math.sqrt((x1-x2)**2+(y1-y2)**2)
def dist_x_y(p1, x, y):
'*x* and *y* are arrays; return the distance to the closest point'
x1, y1 = p1
return min(np.sqrt((x-x1)**2+(y-y1)**2))
def dist(a):
if isinstance(a, Text):
bbox = a.get_window_extent()
l,b,w,h = bbox.bounds
verts = (l,b), (l,b+h), (l+w,b+h), (l+w, b)
xt, yt = zip(*verts)
elif isinstance(a, Patch):
path = a.get_path()
tverts = a.get_transform().transform_path(path)
xt, yt = zip(*tverts)
elif isinstance(a, mlines.Line2D):
xdata = a.get_xdata(orig=False)
ydata = a.get_ydata(orig=False)
xt, yt = a.get_transform().numerix_x_y(xdata, ydata)
return dist_x_y(xywin, np.asarray(xt), np.asarray(yt))
artists = self.lines + self.patches + self.texts
if callable(among):
artists = filter(test, artists)
elif iterable(among):
amongd = dict([(k,1) for k in among])
artists = [a for a in artists if a in amongd]
elif among is None:
pass
else:
raise ValueError('among must be callable or iterable')
if not len(artists): return None
ds = [ (dist(a),a) for a in artists]
ds.sort()
return ds[0][1]
#### Labelling
def get_title(self):
"""
Get the title text string.
"""
return self.title.get_text()
def set_title(self, label, fontdict=None, **kwargs):
"""
call signature::
set_title(label, fontdict=None, **kwargs):
Set the title for the axes.
kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
default = {
'fontsize':rcParams['axes.titlesize'],
'verticalalignment' : 'bottom',
'horizontalalignment' : 'center'
}
self.title.set_text(label)
self.title.update(default)
if fontdict is not None: self.title.update(fontdict)
self.title.update(kwargs)
return self.title
set_title.__doc__ = cbook.dedent(set_title.__doc__) % martist.kwdocd
def get_xlabel(self):
"""
Get the xlabel text string.
"""
label = self.xaxis.get_label()
return label.get_text()
def set_xlabel(self, xlabel, fontdict=None, **kwargs):
"""
call signature::
set_xlabel(xlabel, fontdict=None, **kwargs)
Set the label for the xaxis.
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.xaxis.get_label()
label.set_text(xlabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_xlabel.__doc__ = cbook.dedent(set_xlabel.__doc__) % martist.kwdocd
def get_ylabel(self):
"""
Get the ylabel text string.
"""
label = self.yaxis.get_label()
return label.get_text()
def set_ylabel(self, ylabel, fontdict=None, **kwargs):
"""
call signature::
set_ylabel(ylabel, fontdict=None, **kwargs)
Set the label for the yaxis
Valid kwargs are Text properties:
%(Text)s
ACCEPTS: str
.. seealso::
:meth:`text`:
for information on how override and the optional args work
"""
label = self.yaxis.get_label()
label.set_text(ylabel)
if fontdict is not None: label.update(fontdict)
label.update(kwargs)
return label
set_ylabel.__doc__ = cbook.dedent(set_ylabel.__doc__) % martist.kwdocd
def text(self, x, y, s, fontdict=None,
withdash=False, **kwargs):
"""
call signature::
text(x, y, s, fontdict=None, **kwargs)
Add text in string *s* to axis at location *x*, *y*, data
coordinates.
Keyword arguments:
*fontdict*:
A dictionary to override the default text properties.
If *fontdict* is *None*, the defaults are determined by your rc
parameters.
*withdash*: [ False | True ]
Creates a :class:`~matplotlib.text.TextWithDash` instance
instead of a :class:`~matplotlib.text.Text` instance.
Individual keyword arguments can be used to override any given
parameter::
text(x, y, s, fontsize=12)
The default transform specifies that text is in data coords,
alternatively, you can specify text in axis coords (0,0 is
lower-left and 1,1 is upper-right). The example below places
text in the center of the axes::
text(0.5, 0.5,'matplotlib',
horizontalalignment='center',
verticalalignment='center',
transform = ax.transAxes)
You can put a rectangular box around the text instance (eg. to
set a background color) by using the keyword *bbox*. *bbox* is
a dictionary of :class:`matplotlib.patches.Rectangle`
properties. For example::
text(x, y, s, bbox=dict(facecolor='red', alpha=0.5))
Valid kwargs are :class:`matplotlib.text.Text` properties:
%(Text)s
"""
default = {
'verticalalignment' : 'bottom',
'horizontalalignment' : 'left',
#'verticalalignment' : 'top',
'transform' : self.transData,
}
# At some point if we feel confident that TextWithDash
# is robust as a drop-in replacement for Text and that
# the performance impact of the heavier-weight class
# isn't too significant, it may make sense to eliminate
# the withdash kwarg and simply delegate whether there's
# a dash to TextWithDash and dashlength.
if withdash:
t = mtext.TextWithDash(
x=x, y=y, text=s,
)
else:
t = mtext.Text(
x=x, y=y, text=s,
)
self._set_artist_props(t)
t.update(default)
if fontdict is not None: t.update(fontdict)
t.update(kwargs)
self.texts.append(t)
t._remove_method = lambda h: self.texts.remove(h)
#if t.get_clip_on(): t.set_clip_box(self.bbox)
if 'clip_on' in kwargs: t.set_clip_box(self.bbox)
return t
text.__doc__ = cbook.dedent(text.__doc__) % martist.kwdocd
def annotate(self, *args, **kwargs):
"""
call signature::
annotate(s, xy, xytext=None, xycoords='data',
textcoords='data', arrowprops=None, **kwargs)
Keyword arguments:
%(Annotation)s
.. plot:: mpl_examples/pylab_examples/annotation_demo2.py
"""
a = mtext.Annotation(*args, **kwargs)
a.set_transform(mtransforms.IdentityTransform())
self._set_artist_props(a)
if kwargs.has_key('clip_on'): a.set_clip_path(self.patch)
self.texts.append(a)
return a
annotate.__doc__ = cbook.dedent(annotate.__doc__) % martist.kwdocd
#### Lines and spans
def axhline(self, y=0, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhline(y=0, xmin=0, xmax=1, **kwargs)
Axis Horizontal Line
Draw a horizontal line at *y* from *xmin* to *xmax*. With the
default values of *xmin* = 0 and *xmax* = 1, this line will
always span the horizontal extent of the axes, regardless of
the xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the horizontal extent is
in axes coords: 0=left, 0.5=middle, 1.0=right but the *y*
location is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red hline at *y* = 0 that spans the xrange
>>> axhline(linewidth=4, color='r')
* draw a default hline at *y* = 1 that spans the xrange
>>> axhline(y=1)
* draw a default hline at *y* = .5 that spans the the middle half of
the xrange
>>> axhline(y=.5, xmin=0.25, xmax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
ymin, ymax = self.get_ybound()
# We need to strip away the units for comparison with
# non-unitized bounds
yy = self.convert_yunits( y )
scaley = (yy<ymin) or (yy>ymax)
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
l = mlines.Line2D([xmin,xmax], [y,y], transform=trans, **kwargs)
l.x_isdata = False
self.add_line(l)
self.autoscale_view(scalex=False, scaley=scaley)
return l
axhline.__doc__ = cbook.dedent(axhline.__doc__) % martist.kwdocd
def axvline(self, x=0, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvline(x=0, ymin=0, ymax=1, **kwargs)
Axis Vertical Line
Draw a vertical line at *x* from *ymin* to *ymax*. With the
default values of *ymin* = 0 and *ymax* = 1, this line will
always span the vertical extent of the axes, regardless of the
xlim settings, even if you change them, eg. with the
:meth:`set_xlim` command. That is, the vertical extent is in
axes coords: 0=bottom, 0.5=middle, 1.0=top but the *x* location
is in data coordinates.
Return value is the :class:`~matplotlib.lines.Line2D`
instance. kwargs are the same as kwargs to plot, and can be
used to control the line properties. Eg.,
* draw a thick red vline at *x* = 0 that spans the yrange
>>> axvline(linewidth=4, color='r')
* draw a default vline at *x* = 1 that spans the yrange
>>> axvline(x=1)
* draw a default vline at *x* = .5 that spans the the middle half of
the yrange
>>> axvline(x=.5, ymin=0.25, ymax=0.75)
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
xmin, xmax = self.get_xbound()
# We need to strip away the units for comparison with
# non-unitized bounds
xx = self.convert_xunits( x )
scalex = (xx<xmin) or (xx>xmax)
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
l = mlines.Line2D([x,x], [ymin,ymax] , transform=trans, **kwargs)
l.y_isdata = False
self.add_line(l)
self.autoscale_view(scalex=scalex, scaley=False)
return l
axvline.__doc__ = cbook.dedent(axvline.__doc__) % martist.kwdocd
def axhspan(self, ymin, ymax, xmin=0, xmax=1, **kwargs):
"""
call signature::
axhspan(ymin, ymax, xmin=0, xmax=1, **kwargs)
Axis Horizontal Span.
*y* coords are in data units and *x* coords are in axes (relative
0-1) units.
Draw a horizontal span (rectangle) from *ymin* to *ymax*.
With the default values of *xmin* = 0 and *xmax* = 1, this
always spans the xrange, regardless of the xlim settings, even
if you change them, eg. with the :meth:`set_xlim` command.
That is, the horizontal extent is in axes coords: 0=left,
0.5=middle, 1.0=right but the *y* location is in data
coordinates.
Return value is a :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a gray rectangle from *y* = 0.25-0.75 that spans the
horizontal extent of the axes
>>> axhspan(0.25, 0.75, facecolor='0.5', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon` properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/axhspan_demo.py
"""
trans = mtransforms.blended_transform_factory(
self.transAxes, self.transData)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = (xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.x_isdata = False
self.add_patch(p)
return p
axhspan.__doc__ = cbook.dedent(axhspan.__doc__) % martist.kwdocd
def axvspan(self, xmin, xmax, ymin=0, ymax=1, **kwargs):
"""
call signature::
axvspan(xmin, xmax, ymin=0, ymax=1, **kwargs)
Axis Vertical Span.
*x* coords are in data units and *y* coords are in axes (relative
0-1) units.
Draw a vertical span (rectangle) from *xmin* to *xmax*. With
the default values of *ymin* = 0 and *ymax* = 1, this always
spans the yrange, regardless of the ylim settings, even if you
change them, eg. with the :meth:`set_ylim` command. That is,
the vertical extent is in axes coords: 0=bottom, 0.5=middle,
1.0=top but the *y* location is in data coordinates.
Return value is the :class:`matplotlib.patches.Polygon`
instance.
Examples:
* draw a vertical green translucent rectangle from x=1.25 to 1.55 that
spans the yrange of the axes
>>> axvspan(1.25, 1.55, facecolor='g', alpha=0.5)
Valid kwargs are :class:`~matplotlib.patches.Polygon`
properties:
%(Polygon)s
.. seealso::
:meth:`axhspan`:
for example plot and source code
"""
trans = mtransforms.blended_transform_factory(
self.transData, self.transAxes)
# process the unit information
self._process_unit_info( [xmin, xmax], [ymin, ymax], kwargs=kwargs )
# first we need to strip away the units
xmin, xmax = self.convert_xunits( [xmin, xmax] )
ymin, ymax = self.convert_yunits( [ymin, ymax] )
verts = [(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]
p = mpatches.Polygon(verts, **kwargs)
p.set_transform(trans)
p.y_isdata = False
self.add_patch(p)
return p
axvspan.__doc__ = cbook.dedent(axvspan.__doc__) % martist.kwdocd
def hlines(self, y, xmin, xmax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
hlines(y, xmin, xmax, colors='k', linestyles='solid', **kwargs)
Plot horizontal lines at each *y* from *xmin* to *xmax*.
Returns the :class:`~matplotlib.collections.LineCollection`
that was added.
Required arguments:
*y*:
a 1-D numpy array or iterable.
*xmin* and *xmax*:
can be scalars or ``len(x)`` numpy arrays. If they are
scalars, then the respective values are constant, else the
widths of the lines are determined by *xmin* and *xmax*.
Optional keyword arguments:
*colors*:
a line collections color argument, either a single color
or a ``len(y)`` list of colors
*linestyles*:
[ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
**Example:**
.. plot:: mpl_examples/pylab_examples/hline_demo.py
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('hlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
# We do the conversion first since not all unitized data is uniform
y = self.convert_yunits( y )
xmin = self.convert_xunits( xmin )
xmax = self.convert_xunits( xmax )
if not iterable(y): y = [y]
if not iterable(xmin): xmin = [xmin]
if not iterable(xmax): xmax = [xmax]
y = np.asarray(y)
xmin = np.asarray(xmin)
xmax = np.asarray(xmax)
if len(xmin)==1:
xmin = np.resize( xmin, y.shape )
if len(xmax)==1:
xmax = np.resize( xmax, y.shape )
if len(xmin)!=len(y):
raise ValueError, 'xmin and y are unequal sized sequences'
if len(xmax)!=len(y):
raise ValueError, 'xmax and y are unequal sized sequences'
verts = [ ((thisxmin, thisy), (thisxmax, thisy))
for thisxmin, thisxmax, thisy in zip(xmin, xmax, y)]
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min(xmin.min(), xmax.min())
maxx = max(xmin.max(), xmax.max())
miny = y.min()
maxy = y.max()
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
hlines.__doc__ = cbook.dedent(hlines.__doc__)
def vlines(self, x, ymin, ymax, colors='k', linestyles='solid',
label='', **kwargs):
"""
call signature::
vlines(x, ymin, ymax, color='k', linestyles='solid')
Plot vertical lines at each *x* from *ymin* to *ymax*. *ymin*
or *ymax* can be scalars or len(*x*) numpy arrays. If they are
scalars, then the respective values are constant, else the
heights of the lines are determined by *ymin* and *ymax*.
*colors*
a line collections color args, either a single color
or a len(*x*) list of colors
*linestyles*
one of [ 'solid' | 'dashed' | 'dashdot' | 'dotted' ]
Returns the :class:`matplotlib.collections.LineCollection`
that was added.
kwargs are :class:`~matplotlib.collections.LineCollection` properties:
%(LineCollection)s
"""
if kwargs.get('fmt') is not None:
raise DeprecationWarning('vlines now uses a '
'collections.LineCollection and not a '
'list of Line2D to draw; see API_CHANGES')
self._process_unit_info(xdata=x, ydata=ymin, kwargs=kwargs)
# We do the conversion first since not all unitized data is uniform
x = self.convert_xunits( x )
ymin = self.convert_yunits( ymin )
ymax = self.convert_yunits( ymax )
if not iterable(x): x = [x]
if not iterable(ymin): ymin = [ymin]
if not iterable(ymax): ymax = [ymax]
x = np.asarray(x)
ymin = np.asarray(ymin)
ymax = np.asarray(ymax)
if len(ymin)==1:
ymin = np.resize( ymin, x.shape )
if len(ymax)==1:
ymax = np.resize( ymax, x.shape )
if len(ymin)!=len(x):
raise ValueError, 'ymin and x are unequal sized sequences'
if len(ymax)!=len(x):
raise ValueError, 'ymax and x are unequal sized sequences'
Y = np.array([ymin, ymax]).T
verts = [ ((thisx, thisymin), (thisx, thisymax))
for thisx, (thisymin, thisymax) in zip(x,Y)]
#print 'creating line collection'
coll = mcoll.LineCollection(verts, colors=colors,
linestyles=linestyles, label=label)
self.add_collection(coll)
coll.update(kwargs)
minx = min( x )
maxx = max( x )
miny = min( min(ymin), min(ymax) )
maxy = max( max(ymin), max(ymax) )
corners = (minx, miny), (maxx, maxy)
self.update_datalim(corners)
self.autoscale_view()
return coll
vlines.__doc__ = cbook.dedent(vlines.__doc__) % martist.kwdocd
#### Basic plotting
def plot(self, *args, **kwargs):
"""
Plot lines and/or markers to the
:class:`~matplotlib.axes.Axes`. *args* is a variable length
argument, allowing for multiple *x*, *y* pairs with an
optional format string. For example, each of the following is
legal::
plot(x, y) # plot x and y using default line style and color
plot(x, y, 'bo') # plot x and y using blue circle markers
plot(y) # plot y using x as index array 0..N-1
plot(y, 'r+') # ditto, but with red plusses
If *x* and/or *y* is 2-dimensional, then the corresponding columns
will be plotted.
An arbitrary number of *x*, *y*, *fmt* groups can be
specified, as in::
a.plot(x1, y1, 'g^', x2, y2, 'g-')
Return value is a list of lines that were added.
The following format string characters are accepted to control
the line style or marker:
================ ===============================
character description
================ ===============================
'-' solid line style
'--' dashed line style
'-.' dash-dot line style
':' dotted line style
'.' point marker
',' pixel marker
'o' circle marker
'v' triangle_down marker
'^' triangle_up marker
'<' triangle_left marker
'>' triangle_right marker
'1' tri_down marker
'2' tri_up marker
'3' tri_left marker
'4' tri_right marker
's' square marker
'p' pentagon marker
'*' star marker
'h' hexagon1 marker
'H' hexagon2 marker
'+' plus marker
'x' x marker
'D' diamond marker
'd' thin_diamond marker
'|' vline marker
'_' hline marker
================ ===============================
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
In addition, you can specify colors in many weird and
wonderful ways, including full names (``'green'``), hex
strings (``'#008000'``), RGB or RGBA tuples (``(0,1,0,1)``) or
grayscale intensities as a string (``'0.8'``). Of these, the
string specifications can be used in place of a ``fmt`` group,
but the tuple forms can be used only as ``kwargs``.
Line styles and colors are combined in a single format string, as in
``'bo'`` for blue circles.
The *kwargs* can be used to set line properties (any property that has
a ``set_*`` method). You can use this to set a line label (for auto
legends), linewidth, anitialising, marker face color, etc. Here is an
example::
plot([1,2,3], [1,2,3], 'go-', label='line 1', linewidth=2)
plot([1,2,3], [1,4,9], 'rs', label='line 2')
axis([0, 4, 0, 10])
legend()
If you make multiple lines with one plot command, the kwargs
apply to all those lines, e.g.::
plot(x1, y1, x2, y2, antialised=False)
Neither line will be antialiased.
You do not need to use format strings, which are just
abbreviations. All of the line properties can be controlled
by keyword arguments. For example, you can set the color,
marker, linestyle, and markercolor with::
plot(x, y, color='green', linestyle='dashed', marker='o',
markerfacecolor='blue', markersize=12). See
:class:`~matplotlib.lines.Line2D` for details.
The kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
kwargs *scalex* and *scaley*, if defined, are passed on to
:meth:`~matplotlib.axes.Axes.autoscale_view` to determine
whether the *x* and *y* axes are autoscaled; the default is
*True*.
"""
scalex = kwargs.pop( 'scalex', True)
scaley = kwargs.pop( 'scaley', True)
if not self._hold: self.cla()
lines = []
for line in self._get_lines(*args, **kwargs):
self.add_line(line)
lines.append(line)
self.autoscale_view(scalex=scalex, scaley=scaley)
return lines
plot.__doc__ = cbook.dedent(plot.__doc__) % martist.kwdocd
def plot_date(self, x, y, fmt='bo', tz=None, xdate=True, ydate=False,
**kwargs):
"""
call signature::
plot_date(x, y, fmt='bo', tz=None, xdate=True, ydate=False, **kwargs)
Similar to the :func:`~matplotlib.pyplot.plot` command, except
the *x* or *y* (or both) data is considered to be dates, and the
axis is labeled accordingly.
*x* and/or *y* can be a sequence of dates represented as float
days since 0001-01-01 UTC.
Keyword arguments:
*fmt*: string
The plot format string.
*tz*: [ None | timezone string ]
The time zone to use in labeling dates. If *None*, defaults to rc
value.
*xdate*: [ True | False ]
If *True*, the *x*-axis will be labeled with dates.
*ydate*: [ False | True ]
If *True*, the *y*-axis will be labeled with dates.
Note if you are using custom date tickers and formatters, it
may be necessary to set the formatters/locators after the call
to :meth:`plot_date` since :meth:`plot_date` will set the
default tick locator to
:class:`matplotlib.ticker.AutoDateLocator` (if the tick
locator is not already set to a
:class:`matplotlib.ticker.DateLocator` instance) and the
default tick formatter to
:class:`matplotlib.ticker.AutoDateFormatter` (if the tick
formatter is not already set to a
:class:`matplotlib.ticker.DateFormatter` instance).
Valid kwargs are :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:mod:`~matplotlib.dates`:
for helper functions
:func:`~matplotlib.dates.date2num`,
:func:`~matplotlib.dates.num2date` and
:func:`~matplotlib.dates.drange`:
for help on creating the required floating point
dates.
"""
if not self._hold: self.cla()
ret = self.plot(x, y, fmt, **kwargs)
if xdate:
self.xaxis_date(tz)
if ydate:
self.yaxis_date(tz)
self.autoscale_view()
return ret
plot_date.__doc__ = cbook.dedent(plot_date.__doc__) % martist.kwdocd
def loglog(self, *args, **kwargs):
"""
call signature::
loglog(*args, **kwargs)
Make a plot with log scaling on the *x* and *y* axis.
:func:`~matplotlib.pyplot.loglog` supports all the keyword
arguments of :func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basex*/*basey*: scalar > 1
base of the *x*/*y* logarithm
*subsx*/*subsy*: [ None | sequence ]
the location of the minor *x*/*y* ticks; *None* defaults
to autosubs, which depend on the number of decades in the
plot; see :meth:`matplotlib.axes.Axes.set_xscale` /
:meth:`matplotlib.axes.Axes.set_yscale` for details
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/log_demo.py
"""
if not self._hold: self.cla()
dx = {'basex': kwargs.pop('basex', 10),
'subsx': kwargs.pop('subsx', None),
}
dy = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_xscale('log', **dx)
self.set_yscale('log', **dy)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
loglog.__doc__ = cbook.dedent(loglog.__doc__) % martist.kwdocd
def semilogx(self, *args, **kwargs):
"""
call signature::
semilogx(*args, **kwargs)
Make a plot with log scaling on the *x* axis.
:func:`semilogx` supports all the keyword arguments of
:func:`~matplotlib.pyplot.plot` and
:meth:`matplotlib.axes.Axes.set_xscale`.
Notable keyword arguments:
*basex*: scalar > 1
base of the *x* logarithm
*subsx*: [ None | sequence ]
The location of the minor xticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_xscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basex': kwargs.pop( 'basex', 10),
'subsx': kwargs.pop( 'subsx', None),
}
self.set_xscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogx.__doc__ = cbook.dedent(semilogx.__doc__) % martist.kwdocd
def semilogy(self, *args, **kwargs):
"""
call signature::
semilogy(*args, **kwargs)
Make a plot with log scaling on the *y* axis.
:func:`semilogy` supports all the keyword arguments of
:func:`~matplotlib.pylab.plot` and
:meth:`matplotlib.axes.Axes.set_yscale`.
Notable keyword arguments:
*basey*: scalar > 1
Base of the *y* logarithm
*subsy*: [ None | sequence ]
The location of the minor yticks; *None* defaults to
autosubs, which depend on the number of decades in the
plot; see :meth:`~matplotlib.axes.Axes.set_yscale` for
details.
The remaining valid kwargs are
:class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
.. seealso::
:meth:`loglog`:
For example code and figure
"""
if not self._hold: self.cla()
d = {'basey': kwargs.pop('basey', 10),
'subsy': kwargs.pop('subsy', None),
}
self.set_yscale('log', **d)
b = self._hold
self._hold = True # we've already processed the hold
l = self.plot(*args, **kwargs)
self._hold = b # restore the hold
return l
semilogy.__doc__ = cbook.dedent(semilogy.__doc__) % martist.kwdocd
def acorr(self, x, **kwargs):
"""
call signature::
acorr(x, normed=False, detrend=mlab.detrend_none, usevlines=False,
maxlags=None, **kwargs)
Plot the autocorrelation of *x*. If *normed* = *True*,
normalize the data by the autocorrelation at 0-th lag. *x* is
detrended by the *detrend* callable (default no normalization).
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length 2*maxlags+1 lag vector
- *c* is the 2*maxlags+1 auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :meth:`plot`
The default *linestyle* is None and the default *marker* is
``'o'``, though these can be overridden with keyword args.
The cross correlation is performed with
:func:`numpy.correlate` with *mode* = 2.
If *usevlines* is *True*, :meth:`~matplotlib.axes.Axes.vlines`
rather than :meth:`~matplotlib.axes.Axes.plot` is used to draw
vertical lines from the origin to the acorr. Otherwise, the
plot style is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
*maxlags* is a positive integer detailing the number of lags
to show. The default value of *None* will return all
:math:`2 \mathrm{len}(x) - 1` lags.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where
- *linecol* is the
:class:`~matplotlib.collections.LineCollection`
- *b* is the *x*-axis.
.. seealso::
:meth:`~matplotlib.axes.Axes.plot` or
:meth:`~matplotlib.axes.Axes.vlines`: For documentation on
valid kwargs.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
return self.xcorr(x, x, **kwargs)
acorr.__doc__ = cbook.dedent(acorr.__doc__) % martist.kwdocd
def xcorr(self, x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, maxlags=None, **kwargs):
"""
call signature::
xcorr(x, y, normed=False, detrend=mlab.detrend_none,
usevlines=False, **kwargs):
Plot the cross correlation between *x* and *y*. If *normed* =
*True*, normalize the data by the cross correlation at 0-th
lag. *x* and y are detrended by the *detrend* callable
(default no normalization). *x* and *y* must be equal length.
Data are plotted as ``plot(lags, c, **kwargs)``
Return value is a tuple (*lags*, *c*, *line*) where:
- *lags* are a length ``2*maxlags+1`` lag vector
- *c* is the ``2*maxlags+1`` auto correlation vector
- *line* is a :class:`~matplotlib.lines.Line2D` instance
returned by :func:`~matplotlib.pyplot.plot`.
The default *linestyle* is *None* and the default *marker* is
'o', though these can be overridden with keyword args. The
cross correlation is performed with :func:`numpy.correlate`
with *mode* = 2.
If *usevlines* is *True*:
:func:`~matplotlib.pyplot.vlines`
rather than :func:`~matplotlib.pyplot.plot` is used to draw
vertical lines from the origin to the xcorr. Otherwise the
plotstyle is determined by the kwargs, which are
:class:`~matplotlib.lines.Line2D` properties.
The return value is a tuple (*lags*, *c*, *linecol*, *b*)
where *linecol* is the
:class:`matplotlib.collections.LineCollection` instance and
*b* is the *x*-axis.
*maxlags* is a positive integer detailing the number of lags to show.
The default value of *None* will return all ``(2*len(x)-1)`` lags.
**Example:**
:func:`~matplotlib.pyplot.xcorr` above, and
:func:`~matplotlib.pyplot.acorr` below.
**Example:**
.. plot:: mpl_examples/pylab_examples/xcorr_demo.py
"""
Nx = len(x)
if Nx!=len(y):
raise ValueError('x and y must be equal length')
x = detrend(np.asarray(x))
y = detrend(np.asarray(y))
c = np.correlate(x, y, mode=2)
if normed: c/= np.sqrt(np.dot(x,x) * np.dot(y,y))
if maxlags is None: maxlags = Nx - 1
if maxlags >= Nx or maxlags < 1:
raise ValueError('maglags must be None or strictly '
'positive < %d'%Nx)
lags = np.arange(-maxlags,maxlags+1)
c = c[Nx-1-maxlags:Nx+maxlags]
if usevlines:
a = self.vlines(lags, [0], c, **kwargs)
b = self.axhline(**kwargs)
else:
kwargs.setdefault('marker', 'o')
kwargs.setdefault('linestyle', 'None')
a, = self.plot(lags, c, **kwargs)
b = None
return lags, c, a, b
xcorr.__doc__ = cbook.dedent(xcorr.__doc__) % martist.kwdocd
def legend(self, *args, **kwargs):
"""
call signature::
legend(*args, **kwargs)
Place a legend on the current axes at location *loc*. Labels are a
sequence of strings and *loc* can be a string or an integer specifying
the legend location.
To make a legend with existing lines::
legend()
:meth:`legend` by itself will try and build a legend using the label
property of the lines/patches/collections. You can set the label of
a line by doing::
plot(x, y, label='my data')
or::
line.set_label('my data').
If label is set to '_nolegend_', the item will not be shown in
legend.
To automatically generate the legend from labels::
legend( ('label1', 'label2', 'label3') )
To make a legend for a list of lines and labels::
legend( (line1, line2, line3), ('label1', 'label2', 'label3') )
To make a legend at a given location, using a location argument::
legend( ('label1', 'label2', 'label3'), loc='upper left')
or::
legend( (line1, line2, line3), ('label1', 'label2', 'label3'), loc=2)
The location codes are
=============== =============
Location String Location Code
=============== =============
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== =============
If none of these are locations are suitable, loc can be a 2-tuple
giving x,y in axes coords, ie::
loc = 0, 1 # left top
loc = 0.5, 0.5 # center
Keyword arguments:
*isaxes*: [ True | False ]
Indicates that this is an axes legend
*numpoints*: integer
The number of points in the legend line, default is 4
*prop*: [ None | FontProperties ]
A :class:`matplotlib.font_manager.FontProperties`
instance, or *None* to use rc settings.
*pad*: [ None | scalar ]
The fractional whitespace inside the legend border, between 0 and 1.
If *None*, use rc settings.
*markerscale*: [ None | scalar ]
The relative size of legend markers vs. original. If *None*, use rc
settings.
*shadow*: [ None | False | True ]
If *True*, draw a shadow behind legend. If *None*, use rc settings.
*labelsep*: [ None | scalar ]
The vertical space between the legend entries. If *None*, use rc
settings.
*handlelen*: [ None | scalar ]
The length of the legend lines. If *None*, use rc settings.
*handletextsep*: [ None | scalar ]
The space between the legend line and legend text. If *None*, use rc
settings.
*axespad*: [ None | scalar ]
The border between the axes and legend edge. If *None*, use rc
settings.
**Example:**
.. plot:: mpl_examples/api/legend_demo.py
"""
def get_handles():
handles = self.lines[:]
handles.extend(self.patches)
handles.extend([c for c in self.collections
if isinstance(c, mcoll.LineCollection)])
handles.extend([c for c in self.collections
if isinstance(c, mcoll.RegularPolyCollection)])
return handles
if len(args)==0:
handles = []
labels = []
for handle in get_handles():
label = handle.get_label()
if (label is not None and
label != '' and not label.startswith('_')):
handles.append(handle)
labels.append(label)
if len(handles) == 0:
warnings.warn("No labeled objects found. "
"Use label='...' kwarg on individual plots.")
return None
elif len(args)==1:
# LABELS
labels = args[0]
handles = [h for h, label in zip(get_handles(), labels)]
elif len(args)==2:
if is_string_like(args[1]) or isinstance(args[1], int):
# LABELS, LOC
labels, loc = args
handles = [h for h, label in zip(get_handles(), labels)]
kwargs['loc'] = loc
else:
# LINES, LABELS
handles, labels = args
elif len(args)==3:
# LINES, LABELS, LOC
handles, labels, loc = args
kwargs['loc'] = loc
else:
raise TypeError('Invalid arguments to legend')
handles = cbook.flatten(handles)
self.legend_ = mlegend.Legend(self, handles, labels, **kwargs)
return self.legend_
#### Specialized plotting
def step(self, x, y, *args, **kwargs):
'''
call signature::
step(x, y, *args, **kwargs)
Make a step plot. Additional keyword args to :func:`step` are the same
as those for :func:`~matplotlib.pyplot.plot`.
*x* and *y* must be 1-D sequences, and it is assumed, but not checked,
that *x* is uniformly increasing.
Keyword arguments:
*where*: [ 'pre' | 'post' | 'mid' ]
If 'pre', the interval from x[i] to x[i+1] has level y[i]
If 'post', that interval has level y[i+1]
If 'mid', the jumps in *y* occur half-way between the
*x*-values.
'''
where = kwargs.pop('where', 'pre')
if where not in ('pre', 'post', 'mid'):
raise ValueError("'where' argument to step must be "
"'pre', 'post' or 'mid'")
kwargs['linestyle'] = 'steps-' + where
return self.plot(x, y, *args, **kwargs)
def bar(self, left, height, width=0.8, bottom=None,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False,
**kwargs
):
"""
call signature::
bar(left, height, width=0.8, bottom=0,
color=None, edgecolor=None, linewidth=None,
yerr=None, xerr=None, ecolor=None, capsize=3,
align='edge', orientation='vertical', log=False)
Make a bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*left*, *height*, *width*, and *bottom* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ===============================================
Argument Description
======== ===============================================
*left* the x coordinates of the left sides of the bars
*height* the heights of the bars
======== ===============================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*width* the widths of the bars
*bottom* the y coordinates of the bottom edges of
the bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*orientation* 'vertical' | 'horizontal'
*log* [False|True] False (default) leaves the
orientation axis as-is; True sets it to
log scale
=============== ==========================================
For vertical bars, *align* = 'edge' aligns bars by their left
edges in left, while *align* = 'center' interprets these
values as the *x* coordinates of the bar centers. For
horizontal bars, *align* = 'edge' aligns bars by their bottom
edges in bottom, while *align* = 'center' interprets these
values as the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
bar as the basis for stacked bar charts, or candlestick plots.
Other optional kwargs:
%(Rectangle)s
**Example:** A stacked bar chart.
.. plot:: mpl_examples/pylab_examples/bar_stacked.py
"""
if not self._hold: self.cla()
label = kwargs.pop('label', '')
def make_iterable(x):
if not iterable(x):
return [x]
else:
return x
# make them safe to take len() of
_left = left
left = make_iterable(left)
height = make_iterable(height)
width = make_iterable(width)
_bottom = bottom
bottom = make_iterable(bottom)
linewidth = make_iterable(linewidth)
adjust_ylim = False
adjust_xlim = False
if orientation == 'vertical':
self._process_unit_info(xdata=left, ydata=height, kwargs=kwargs)
if log:
self.set_yscale('log')
# size width and bottom according to length of left
if _bottom is None:
if self.get_yscale() == 'log':
bottom = [1e-100]
adjust_ylim = True
else:
bottom = [0]
nbars = len(left)
if len(width) == 1:
width *= nbars
if len(bottom) == 1:
bottom *= nbars
elif orientation == 'horizontal':
self._process_unit_info(xdata=width, ydata=bottom, kwargs=kwargs)
if log:
self.set_xscale('log')
# size left and height according to length of bottom
if _left is None:
if self.get_xscale() == 'log':
left = [1e-100]
adjust_xlim = True
else:
left = [0]
nbars = len(bottom)
if len(left) == 1:
left *= nbars
if len(height) == 1:
height *= nbars
else:
raise ValueError, 'invalid orientation: %s' % orientation
# do not convert to array here as unit info is lost
#left = np.asarray(left)
#height = np.asarray(height)
#width = np.asarray(width)
#bottom = np.asarray(bottom)
if len(linewidth) < nbars:
linewidth *= nbars
if color is None:
color = [None] * nbars
else:
color = list(mcolors.colorConverter.to_rgba_array(color))
if len(color) < nbars:
color *= nbars
if edgecolor is None:
edgecolor = [None] * nbars
else:
edgecolor = list(mcolors.colorConverter.to_rgba_array(edgecolor))
if len(edgecolor) < nbars:
edgecolor *= nbars
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*nbars
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*nbars
# FIXME: convert the following to proper input validation
# raising ValueError; don't use assert for this.
assert len(left)==nbars, "argument 'left' must be %d or scalar" % nbars
assert len(height)==nbars, ("argument 'height' must be %d or scalar" %
nbars)
assert len(width)==nbars, ("argument 'width' must be %d or scalar" %
nbars)
assert len(bottom)==nbars, ("argument 'bottom' must be %d or scalar" %
nbars)
if yerr is not None and len(yerr)!=nbars:
raise ValueError(
"bar() argument 'yerr' must be len(%s) or scalar" % nbars)
if xerr is not None and len(xerr)!=nbars:
raise ValueError(
"bar() argument 'xerr' must be len(%s) or scalar" % nbars)
patches = []
# lets do some conversions now since some types cannot be
# subtracted uniformly
if self.xaxis is not None:
xconv = self.xaxis.converter
if xconv is not None:
units = self.xaxis.get_units()
left = xconv.convert( left, units )
width = xconv.convert( width, units )
if self.yaxis is not None:
yconv = self.yaxis.converter
if yconv is not None :
units = self.yaxis.get_units()
bottom = yconv.convert( bottom, units )
height = yconv.convert( height, units )
if align == 'edge':
pass
elif align == 'center':
if orientation == 'vertical':
left = [left[i] - width[i]/2. for i in xrange(len(left))]
elif orientation == 'horizontal':
bottom = [bottom[i] - height[i]/2. for i in xrange(len(bottom))]
else:
raise ValueError, 'invalid alignment: %s' % align
args = zip(left, bottom, width, height, color, edgecolor, linewidth)
for l, b, w, h, c, e, lw in args:
if h<0:
b += h
h = abs(h)
if w<0:
l += w
w = abs(w)
r = mpatches.Rectangle(
xy=(l, b), width=w, height=h,
facecolor=c,
edgecolor=e,
linewidth=lw,
label=label
)
label = '_nolegend_'
r.update(kwargs)
#print r.get_label(), label, 'label' in kwargs
self.add_patch(r)
patches.append(r)
holdstate = self._hold
self.hold(True) # ensure hold is on before plotting errorbars
if xerr is not None or yerr is not None:
if orientation == 'vertical':
# using list comps rather than arrays to preserve unit info
x = [l+0.5*w for l, w in zip(left, width)]
y = [b+h for b,h in zip(bottom, height)]
elif orientation == 'horizontal':
# using list comps rather than arrays to preserve unit info
x = [l+w for l,w in zip(left, width)]
y = [b+0.5*h for b,h in zip(bottom, height)]
self.errorbar(
x, y,
yerr=yerr, xerr=xerr,
fmt=None, ecolor=ecolor, capsize=capsize)
self.hold(holdstate) # restore previous hold state
if adjust_xlim:
xmin, xmax = self.dataLim.intervalx
xmin = np.amin(width[width!=0]) # filter out the 0 width rects
if xerr is not None:
xmin = xmin - np.amax(xerr)
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
if adjust_ylim:
ymin, ymax = self.dataLim.intervaly
ymin = np.amin(height[height!=0]) # filter out the 0 height rects
if yerr is not None:
ymin = ymin - np.amax(yerr)
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
return patches
bar.__doc__ = cbook.dedent(bar.__doc__) % martist.kwdocd
def barh(self, bottom, width, height=0.8, left=None, **kwargs):
"""
call signature::
barh(bottom, width, height=0.8, left=0, **kwargs)
Make a horizontal bar plot with rectangles bounded by:
*left*, *left* + *width*, *bottom*, *bottom* + *height*
(left, right, bottom and top edges)
*bottom*, *width*, *height*, and *left* can be either scalars
or sequences
Return value is a list of
:class:`matplotlib.patches.Rectangle` instances.
Required arguments:
======== ======================================================
Argument Description
======== ======================================================
*bottom* the vertical positions of the bottom edges of the bars
*width* the lengths of the bars
======== ======================================================
Optional keyword arguments:
=============== ==========================================
Keyword Description
=============== ==========================================
*height* the heights (thicknesses) of the bars
*left* the x coordinates of the left edges of the
bars
*color* the colors of the bars
*edgecolor* the colors of the bar edges
*linewidth* width of bar edges; None means use default
linewidth; 0 means don't draw edges.
*xerr* if not None, will be used to generate
errorbars on the bar chart
*yerr* if not None, will be used to generate
errorbars on the bar chart
*ecolor* specifies the color of any errorbar
*capsize* (default 3) determines the length in
points of the error bar caps
*align* 'edge' (default) | 'center'
*log* [False|True] False (default) leaves the
horizontal axis as-is; True sets it to log
scale
=============== ==========================================
Setting *align* = 'edge' aligns bars by their bottom edges in
bottom, while *align* = 'center' interprets these values as
the *y* coordinates of the bar centers.
The optional arguments *color*, *edgecolor*, *linewidth*,
*xerr*, and *yerr* can be either scalars or sequences of
length equal to the number of bars. This enables you to use
barh as the basis for stacked bar charts, or candlestick
plots.
other optional kwargs:
%(Rectangle)s
"""
patches = self.bar(left=left, height=height, width=width, bottom=bottom,
orientation='horizontal', **kwargs)
return patches
barh.__doc__ = cbook.dedent(barh.__doc__) % martist.kwdocd
def broken_barh(self, xranges, yrange, **kwargs):
"""
call signature::
broken_barh(self, xranges, yrange, **kwargs)
A collection of horizontal bars spanning *yrange* with a sequence of
*xranges*.
Required arguments:
========= ==============================
Argument Description
========= ==============================
*xranges* sequence of (*xmin*, *xwidth*)
*yrange* sequence of (*ymin*, *ywidth*)
========= ==============================
kwargs are
:class:`matplotlib.collections.BrokenBarHCollection`
properties:
%(BrokenBarHCollection)s
these can either be a single argument, ie::
facecolors = 'black'
or a sequence of arguments for the various bars, ie::
facecolors = ('black', 'red', 'green')
**Example:**
.. plot:: mpl_examples/pylab_examples/broken_barh.py
"""
col = mcoll.BrokenBarHCollection(xranges, yrange, **kwargs)
self.add_collection(col, autolim=True)
self.autoscale_view()
return col
broken_barh.__doc__ = cbook.dedent(broken_barh.__doc__) % martist.kwdocd
def stem(self, x, y, linefmt='b-', markerfmt='bo', basefmt='r-'):
"""
call signature::
stem(x, y, linefmt='b-', markerfmt='bo', basefmt='r-')
A stem plot plots vertical lines (using *linefmt*) at each *x*
location from the baseline to *y*, and places a marker there
using *markerfmt*. A horizontal line at 0 is is plotted using
*basefmt*.
Return value is a tuple (*markerline*, *stemlines*,
*baseline*).
.. seealso::
`this document`__ for details
:file:`examples/pylab_examples/stem_plot.py`:
for a demo
__ http://www.mathworks.com/access/helpdesk/help/techdoc/ref/stem.html
"""
remember_hold=self._hold
if not self._hold: self.cla()
self.hold(True)
markerline, = self.plot(x, y, markerfmt)
stemlines = []
for thisx, thisy in zip(x, y):
l, = self.plot([thisx,thisx], [0, thisy], linefmt)
stemlines.append(l)
baseline, = self.plot([np.amin(x), np.amax(x)], [0,0], basefmt)
self.hold(remember_hold)
return markerline, stemlines, baseline
def pie(self, x, explode=None, labels=None, colors=None,
autopct=None, pctdistance=0.6, shadow=False,
labeldistance=1.1):
r"""
call signature::
pie(x, explode=None, labels=None,
colors=('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'),
autopct=None, pctdistance=0.6, labeldistance=1.1, shadow=False)
Make a pie chart of array *x*. The fractional area of each
wedge is given by x/sum(x). If sum(x) <= 1, then the values
of x give the fractional area directly and the array will not
be normalized.
Keyword arguments:
*explode*: [ None | len(x) sequence ]
If not *None*, is a len(*x*) array which specifies the
fraction of the radius with which to offset each wedge.
*colors*: [ None | color sequence ]
A sequence of matplotlib color args through which the pie chart
will cycle.
*labels*: [ None | len(x) sequence of strings ]
A sequence of strings providing the labels for each wedge
*autopct*: [ None | format string | format function ]
If not *None*, is a string or function used to label the
wedges with their numeric value. The label will be placed inside
the wedge. If it is a format string, the label will be ``fmt%pct``.
If it is a function, it will be called.
*pctdistance*: scalar
The ratio between the center of each pie slice and the
start of the text generated by *autopct*. Ignored if
*autopct* is *None*; default is 0.6.
*labeldistance*: scalar
The radial distance at which the pie labels are drawn
*shadow*: [ False | True ]
Draw a shadow beneath the pie.
The pie chart will probably look best if the figure and axes are
square. Eg.::
figure(figsize=(8,8))
ax = axes([0.1, 0.1, 0.8, 0.8])
Return value:
If *autopct* is None, return the tuple (*patches*, *texts*):
- *patches* is a sequence of
:class:`matplotlib.patches.Wedge` instances
- *texts* is a list of the label
:class:`matplotlib.text.Text` instances.
If *autopct* is not *None*, return the tuple (*patches*,
*texts*, *autotexts*), where *patches* and *texts* are as
above, and *autotexts* is a list of
:class:`~matplotlib.text.Text` instances for the numeric
labels.
"""
self.set_frame_on(False)
x = np.asarray(x).astype(np.float32)
sx = float(x.sum())
if sx>1: x = np.divide(x,sx)
if labels is None: labels = ['']*len(x)
if explode is None: explode = [0]*len(x)
assert(len(x)==len(labels))
assert(len(x)==len(explode))
if colors is None: colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k', 'w')
center = 0,0
radius = 1
theta1 = 0
i = 0
texts = []
slices = []
autotexts = []
for frac, label, expl in cbook.safezip(x,labels, explode):
x, y = center
theta2 = theta1 + frac
thetam = 2*math.pi*0.5*(theta1+theta2)
x += expl*math.cos(thetam)
y += expl*math.sin(thetam)
w = mpatches.Wedge((x,y), radius, 360.*theta1, 360.*theta2,
facecolor=colors[i%len(colors)])
slices.append(w)
self.add_patch(w)
w.set_label(label)
if shadow:
# make sure to add a shadow after the call to
# add_patch so the figure and transform props will be
# set
shad = mpatches.Shadow(w, -0.02, -0.02,
#props={'facecolor':w.get_facecolor()}
)
shad.set_zorder(0.9*w.get_zorder())
self.add_patch(shad)
xt = x + labeldistance*radius*math.cos(thetam)
yt = y + labeldistance*radius*math.sin(thetam)
label_alignment = xt > 0 and 'left' or 'right'
t = self.text(xt, yt, label,
size=rcParams['xtick.labelsize'],
horizontalalignment=label_alignment,
verticalalignment='center')
texts.append(t)
if autopct is not None:
xt = x + pctdistance*radius*math.cos(thetam)
yt = y + pctdistance*radius*math.sin(thetam)
if is_string_like(autopct):
s = autopct%(100.*frac)
elif callable(autopct):
s = autopct(100.*frac)
else:
raise TypeError(
'autopct must be callable or a format string')
t = self.text(xt, yt, s,
horizontalalignment='center',
verticalalignment='center')
autotexts.append(t)
theta1 = theta2
i += 1
self.set_xlim((-1.25, 1.25))
self.set_ylim((-1.25, 1.25))
self.set_xticks([])
self.set_yticks([])
if autopct is None: return slices, texts
else: return slices, texts, autotexts
def errorbar(self, x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False, **kwargs):
"""
call signature::
errorbar(x, y, yerr=None, xerr=None,
fmt='-', ecolor=None, elinewidth=None, capsize=3,
barsabove=False, lolims=False, uplims=False,
xlolims=False, xuplims=False)
Plot *x* versus *y* with error deltas in *yerr* and *xerr*.
Vertical errorbars are plotted if *yerr* is not *None*.
Horizontal errorbars are plotted if *xerr* is not *None*.
*x*, *y*, *xerr*, and *yerr* can all be scalars, which plots a
single error bar at *x*, *y*.
Optional keyword arguments:
*xerr*/*yerr*: [ scalar | N, Nx1, Nx2 array-like ]
If a scalar number, len(N) array-like object, or an Nx1 array-like
object, errorbars are drawn +/- value.
If a rank-1, Nx2 Numpy array, errorbars are drawn at -column1 and
+column2
*fmt*: '-'
The plot format symbol for *y*. If *fmt* is *None*, just plot the
errorbars with no line symbols. This can be useful for creating a
bar plot with errorbars.
*ecolor*: [ None | mpl color ]
a matplotlib color arg which gives the color the errorbar lines; if
*None*, use the marker color.
*elinewidth*: scalar
the linewidth of the errorbar lines. If *None*, use the linewidth.
*capsize*: scalar
the size of the error bar caps in points
*barsabove*: [ True | False ]
if *True*, will plot the errorbars above the plot
symbols. Default is below.
*lolims*/*uplims*/*xlolims*/*xuplims*: [ False | True ]
These arguments can be used to indicate that a value gives
only upper/lower limits. In that case a caret symbol is
used to indicate this. lims-arguments may be of the same
type as *xerr* and *yerr*.
All other keyword arguments are passed on to the plot command for the
markers, so you can add additional key=value pairs to control the
errorbar markers. For example, this code makes big red squares with
thick green edges::
x,y,yerr = rand(3,10)
errorbar(x, y, yerr, marker='s',
mfc='red', mec='green', ms=20, mew=4)
where *mfc*, *mec*, *ms* and *mew* are aliases for the longer
property names, *markerfacecolor*, *markeredgecolor*, *markersize*
and *markeredgewith*.
valid kwargs for the marker properties are
%(Line2D)s
Return value is a length 3 tuple. The first element is the
:class:`~matplotlib.lines.Line2D` instance for the *y* symbol
lines. The second element is a list of error bar cap lines,
the third element is a list of
:class:`~matplotlib.collections.LineCollection` instances for
the horizontal and vertical error ranges.
**Example:**
.. plot:: mpl_examples/pylab_examples/errorbar_demo.py
"""
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
if not self._hold: self.cla()
# make sure all the args are iterable; use lists not arrays to
# preserve units
if not iterable(x):
x = [x]
if not iterable(y):
y = [y]
if xerr is not None:
if not iterable(xerr):
xerr = [xerr]*len(x)
if yerr is not None:
if not iterable(yerr):
yerr = [yerr]*len(y)
l0 = None
if barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
barcols = []
caplines = []
lines_kw = {'label':'_nolegend_'}
if elinewidth:
lines_kw['linewidth'] = elinewidth
else:
if 'linewidth' in kwargs:
lines_kw['linewidth']=kwargs['linewidth']
if 'lw' in kwargs:
lines_kw['lw']=kwargs['lw']
if 'transform' in kwargs:
lines_kw['transform'] = kwargs['transform']
# arrays fine here, they are booleans and hence not units
if not iterable(lolims):
lolims = np.asarray([lolims]*len(x), bool)
else: lolims = np.asarray(lolims, bool)
if not iterable(uplims): uplims = np.array([uplims]*len(x), bool)
else: uplims = np.asarray(uplims, bool)
if not iterable(xlolims): xlolims = np.array([xlolims]*len(x), bool)
else: xlolims = np.asarray(xlolims, bool)
if not iterable(xuplims): xuplims = np.array([xuplims]*len(x), bool)
else: xuplims = np.asarray(xuplims, bool)
def xywhere(xs, ys, mask):
"""
return xs[mask], ys[mask] where mask is True but xs and
ys are not arrays
"""
assert len(xs)==len(ys)
assert len(xs)==len(mask)
xs = [thisx for thisx, b in zip(xs, mask) if b]
ys = [thisy for thisy, b in zip(ys, mask) if b]
return xs, ys
if capsize > 0:
plot_kw = {
'ms':2*capsize,
'label':'_nolegend_'}
if 'markeredgewidth' in kwargs:
plot_kw['markeredgewidth']=kwargs['markeredgewidth']
if 'mew' in kwargs:
plot_kw['mew']=kwargs['mew']
if 'transform' in kwargs:
plot_kw['transform'] = kwargs['transform']
if xerr is not None:
if (iterable(xerr) and len(xerr)==2 and
iterable(xerr[0]) and iterable(xerr[1])):
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[0])]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr[1])]
else:
# using list comps rather than arrays to preserve units
left = [thisx-thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
right = [thisx+thiserr for (thisx, thiserr)
in cbook.safezip(x,xerr)]
barcols.append( self.hlines(y, left, right, **lines_kw ) )
if capsize > 0:
if xlolims.any():
# can't use numpy logical indexing since left and
# y are lists
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend(
self.plot(leftlo, ylo, ls='None',
marker=mlines.CARETLEFT, **plot_kw) )
xlolims = ~xlolims
leftlo, ylo = xywhere(left, y, xlolims)
caplines.extend( self.plot(leftlo, ylo, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(left, y, 'k|', **plot_kw) )
if xuplims.any():
rightup, yup = xywhere(right, y, xuplims)
caplines.extend(
self.plot(rightup, yup, ls='None',
marker=mlines.CARETRIGHT, **plot_kw) )
xuplims = ~xuplims
rightup, yup = xywhere(right, y, xuplims)
caplines.extend( self.plot(rightup, yup, 'k|', **plot_kw) )
else:
caplines.extend( self.plot(right, y, 'k|', **plot_kw) )
if yerr is not None:
if (iterable(yerr) and len(yerr)==2 and
iterable(yerr[0]) and iterable(yerr[1])):
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[0])]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr[1])]
else:
# using list comps rather than arrays to preserve units
lower = [thisy-thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
upper = [thisy+thiserr for (thisy, thiserr)
in cbook.safezip(y,yerr)]
barcols.append( self.vlines(x, lower, upper, **lines_kw) )
if capsize > 0:
if lolims.any():
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend(
self.plot(xlo, lowerlo, ls='None',
marker=mlines.CARETDOWN, **plot_kw) )
lolims = ~lolims
xlo, lowerlo = xywhere(x, lower, lolims)
caplines.extend( self.plot(xlo, lowerlo, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, lower, 'k_', **plot_kw) )
if uplims.any():
xup, upperup = xywhere(x, upper, uplims)
caplines.extend(
self.plot(xup, upperup, ls='None',
marker=mlines.CARETUP, **plot_kw) )
uplims = ~uplims
xup, upperup = xywhere(x, upper, uplims)
caplines.extend( self.plot(xup, upperup, 'k_', **plot_kw) )
else:
caplines.extend( self.plot(x, upper, 'k_', **plot_kw) )
if not barsabove and fmt is not None:
l0, = self.plot(x,y,fmt,**kwargs)
if ecolor is None:
if l0 is None:
ecolor = self._get_lines._get_next_cycle_color()
else:
ecolor = l0.get_color()
for l in barcols:
l.set_color(ecolor)
for l in caplines:
l.set_color(ecolor)
self.autoscale_view()
return (l0, caplines, barcols)
errorbar.__doc__ = cbook.dedent(errorbar.__doc__) % martist.kwdocd
def boxplot(self, x, notch=0, sym='b+', vert=1, whis=1.5,
positions=None, widths=None):
"""
call signature::
boxplot(x, notch=0, sym='+', vert=1, whis=1.5,
positions=None, widths=None)
Make a box and whisker plot for each column of *x* or each
vector in sequence *x*. The box extends from the lower to
upper quartile values of the data, with a line at the median.
The whiskers extend from the box to show the range of the
data. Flier points are those past the end of the whiskers.
- *notch* = 0 (default) produces a rectangular box plot.
- *notch* = 1 will produce a notched box plot
*sym* (default 'b+') is the default symbol for flier points.
Enter an empty string ('') if you don't want to show fliers.
- *vert* = 1 (default) makes the boxes vertical.
- *vert* = 0 makes horizontal boxes. This seems goofy, but
that's how Matlab did it.
*whis* (default 1.5) defines the length of the whiskers as
a function of the inner quartile range. They extend to the
most extreme data point within ( ``whis*(75%-25%)`` ) data range.
*positions* (default 1,2,...,n) sets the horizontal positions of
the boxes. The ticks and limits are automatically set to match
the positions.
*widths* is either a scalar or a vector and sets the width of
each box. The default is 0.5, or ``0.15*(distance between extreme
positions)`` if that is smaller.
*x* is an array or a sequence of vectors.
Returns a dictionary mapping each component of the boxplot
to a list of the :class:`matplotlib.lines.Line2D`
instances created.
**Example:**
.. plot:: pyplots/boxplot_demo.py
"""
if not self._hold: self.cla()
holdStatus = self._hold
whiskers, caps, boxes, medians, fliers = [], [], [], [], []
# convert x to a list of vectors
if hasattr(x, 'shape'):
if len(x.shape) == 1:
if hasattr(x[0], 'shape'):
x = list(x)
else:
x = [x,]
elif len(x.shape) == 2:
nr, nc = x.shape
if nr == 1:
x = [x]
elif nc == 1:
x = [x.ravel()]
else:
x = [x[:,i] for i in xrange(nc)]
else:
raise ValueError, "input x can have no more than 2 dimensions"
if not hasattr(x[0], '__len__'):
x = [x]
col = len(x)
# get some plot info
if positions is None:
positions = range(1, col + 1)
if widths is None:
distance = max(positions) - min(positions)
widths = min(0.15*max(distance,1.0), 0.5)
if isinstance(widths, float) or isinstance(widths, int):
widths = np.ones((col,), float) * widths
# loop through columns, adding each to plot
self.hold(True)
for i,pos in enumerate(positions):
d = np.ravel(x[i])
row = len(d)
# get median and quartiles
q1, med, q3 = mlab.prctile(d,[25,50,75])
# get high extreme
iq = q3 - q1
hi_val = q3 + whis*iq
wisk_hi = np.compress( d <= hi_val , d )
if len(wisk_hi) == 0:
wisk_hi = q3
else:
wisk_hi = max(wisk_hi)
# get low extreme
lo_val = q1 - whis*iq
wisk_lo = np.compress( d >= lo_val, d )
if len(wisk_lo) == 0:
wisk_lo = q1
else:
wisk_lo = min(wisk_lo)
# get fliers - if we are showing them
flier_hi = []
flier_lo = []
flier_hi_x = []
flier_lo_x = []
if len(sym) != 0:
flier_hi = np.compress( d > wisk_hi, d )
flier_lo = np.compress( d < wisk_lo, d )
flier_hi_x = np.ones(flier_hi.shape[0]) * pos
flier_lo_x = np.ones(flier_lo.shape[0]) * pos
# get x locations for fliers, whisker, whisker cap and box sides
box_x_min = pos - widths[i] * 0.5
box_x_max = pos + widths[i] * 0.5
wisk_x = np.ones(2) * pos
cap_x_min = pos - widths[i] * 0.25
cap_x_max = pos + widths[i] * 0.25
cap_x = [cap_x_min, cap_x_max]
# get y location for median
med_y = [med, med]
# calculate 'regular' plot
if notch == 0:
# make our box vectors
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min ]
box_y = [q1, q1, q3, q3, q1 ]
# make our median line vectors
med_x = [box_x_min, box_x_max]
# calculate 'notch' plot
else:
notch_max = med + 1.57*iq/np.sqrt(row)
notch_min = med - 1.57*iq/np.sqrt(row)
if notch_max > q3:
notch_max = q3
if notch_min < q1:
notch_min = q1
# make our notched box vectors
box_x = [box_x_min, box_x_max, box_x_max, cap_x_max, box_x_max,
box_x_max, box_x_min, box_x_min, cap_x_min, box_x_min,
box_x_min ]
box_y = [q1, q1, notch_min, med, notch_max, q3, q3, notch_max,
med, notch_min, q1]
# make our median line vectors
med_x = [cap_x_min, cap_x_max]
med_y = [med, med]
# vertical or horizontal plot?
if vert:
def doplot(*args):
return self.plot(*args)
else:
def doplot(*args):
shuffled = []
for i in xrange(0, len(args), 3):
shuffled.extend([args[i+1], args[i], args[i+2]])
return self.plot(*shuffled)
whiskers.extend(doplot(wisk_x, [q1, wisk_lo], 'b--',
wisk_x, [q3, wisk_hi], 'b--'))
caps.extend(doplot(cap_x, [wisk_hi, wisk_hi], 'k-',
cap_x, [wisk_lo, wisk_lo], 'k-'))
boxes.extend(doplot(box_x, box_y, 'b-'))
medians.extend(doplot(med_x, med_y, 'r-'))
fliers.extend(doplot(flier_hi_x, flier_hi, sym,
flier_lo_x, flier_lo, sym))
# fix our axes/ticks up a little
if 1 == vert:
setticks, setlim = self.set_xticks, self.set_xlim
else:
setticks, setlim = self.set_yticks, self.set_ylim
newlimits = min(positions)-0.5, max(positions)+0.5
setlim(newlimits)
setticks(positions)
# reset hold status
self.hold(holdStatus)
return dict(whiskers=whiskers, caps=caps, boxes=boxes,
medians=medians, fliers=fliers)
def scatter(self, x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
faceted=True, verts=None,
**kwargs):
"""
call signatures::
scatter(x, y, s=20, c='b', marker='o', cmap=None, norm=None,
vmin=None, vmax=None, alpha=1.0, linewidths=None,
verts=None, **kwargs)
Make a scatter plot of *x* versus *y*, where *x*, *y* are 1-D
sequences of the same length, *N*.
Keyword arguments:
*s*:
size in points^2. It is a scalar or an array of the same
length as *x* and *y*.
*c*:
a color. *c* can be a single color format string, or a
sequence of color specifications of length *N*, or a
sequence of *N* numbers to be mapped to colors using the
*cmap* and *norm* specified via kwargs (see below). Note
that *c* should not be a single numeric RGB or RGBA
sequence because that is indistinguishable from an array
of values to be colormapped. *c* can be a 2-D array in
which the rows are RGB or RGBA, however.
*marker*:
can be one of:
===== ==============
Value Description
===== ==============
's' square
'o' circle
'^' triangle up
'>' triangle right
'v' triangle down
'<' triangle left
'd' diamond
'p' pentagram
'h' hexagon
'8' octagon
'+' plus
'x' cross
===== ==============
The marker can also be a tuple (*numsides*, *style*,
*angle*), which will create a custom, regular symbol.
*numsides*:
the number of sides
*style*:
the style of the regular symbol:
===== =============================================
Value Description
===== =============================================
0 a regular polygon
1 a star-like symbol
2 an asterisk
3 a circle (*numsides* and *angle* is ignored)
===== =============================================
*angle*:
the angle of rotation of the symbol
Finally, *marker* can be (*verts*, 0): *verts* is a
sequence of (*x*, *y*) vertices for a custom scatter
symbol. Alternatively, use the kwarg combination
*marker* = *None*, *verts* = *verts*.
Any or all of *x*, *y*, *s*, and *c* may be masked arrays, in
which case all masks will be combined and only unmasked points
will be plotted.
Other keyword arguments: the color mapping and normalization
arguments will be used only if *c* is an array of floats.
*cmap*: [ None | Colormap ]
A :class:`matplotlib.colors.Colormap` instance. If *None*,
defaults to rc ``image.cmap``. *cmap* is only used if *c*
is an array of floats.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0, 1. If *None*, use the default
:func:`normalize`. *norm* is only used if *c* is an array
of floats.
*vmin*/*vmax*:
*vmin* and *vmax* are used in conjunction with norm to
normalize luminance data. If either are None, the min and
max of the color array *C* is used. Note if you pass a
*norm* instance, your settings for *vmin* and *vmax* will
be ignored.
*alpha*: 0 <= scalar <= 1
The alpha value for the patches
*linewidths*: [ None | scalar | sequence ]
If *None*, defaults to (lines.linewidth,). Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Optional kwargs control the
:class:`~matplotlib.collections.Collection` properties; in
particular:
*edgecolors*:
'none' to plot faces with no outlines
*facecolors*:
'none' to plot unfilled outlines
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
A :class:`~matplotlib.collections.Collection` instance is
returned.
"""
if not self._hold: self.cla()
syms = { # a dict from symbol to (numsides, angle)
's' : (4,math.pi/4.0,0), # square
'o' : (20,3,0), # circle
'^' : (3,0,0), # triangle up
'>' : (3,math.pi/2.0,0), # triangle right
'v' : (3,math.pi,0), # triangle down
'<' : (3,3*math.pi/2.0,0), # triangle left
'd' : (4,0,0), # diamond
'p' : (5,0,0), # pentagram
'h' : (6,0,0), # hexagon
'8' : (8,0,0), # octagon
'+' : (4,0,2), # plus
'x' : (4,math.pi/4.0,2) # cross
}
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, s, c = cbook.delete_masked_points(x, y, s, c)
if is_string_like(c) or cbook.is_sequence_of_strings(c):
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
else:
sh = np.shape(c)
# The inherent ambiguity is resolved in favor of color
# mapping, not interpretation as rgb or rgba:
if len(sh) == 1 and sh[0] == len(x):
colors = None # use cmap, norm after collection is created
else:
colors = mcolors.colorConverter.to_rgba_array(c, alpha)
if not iterable(s):
scales = (s,)
else:
scales = s
if faceted:
edgecolors = None
else:
edgecolors = 'none'
warnings.warn(
'''replace "faceted=False" with "edgecolors='none'"''',
DeprecationWarning) #2008/04/18
sym = None
symstyle = 0
# to be API compatible
if marker is None and not (verts is None):
marker = (verts, 0)
verts = None
if is_string_like(marker):
# the standard way to define symbols using a string character
sym = syms.get(marker)
if sym is None and verts is None:
raise ValueError('Unknown marker symbol to scatter')
numsides, rotation, symstyle = syms[marker]
elif iterable(marker):
# accept marker to be:
# (numsides, style, [angle])
# or
# (verts[], style, [angle])
if len(marker)<2 or len(marker)>3:
raise ValueError('Cannot create markersymbol from marker')
if cbook.is_numlike(marker[0]):
# (numsides, style, [angle])
if len(marker)==2:
numsides, rotation = marker[0], 0.
elif len(marker)==3:
numsides, rotation = marker[0], marker[2]
sym = True
if marker[1] in (1,2):
symstyle = marker[1]
else:
verts = np.asarray(marker[0])
if sym is not None:
if symstyle==0:
collection = mcoll.RegularPolyCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==1:
collection = mcoll.StarPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==2:
collection = mcoll.AsteriskPolygonCollection(
numsides, rotation, scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
elif symstyle==3:
collection = mcoll.CircleCollection(
scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
else:
rescale = np.sqrt(max(verts[:,0]**2+verts[:,1]**2))
verts /= rescale
collection = mcoll.PolyCollection(
(verts,), scales,
facecolors = colors,
edgecolors = edgecolors,
linewidths = linewidths,
offsets = zip(x,y),
transOffset = self.transData,
)
collection.set_transform(mtransforms.IdentityTransform())
collection.set_alpha(alpha)
collection.update(kwargs)
if colors is None:
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(np.asarray(c))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
temp_x = x
temp_y = y
minx = np.amin(temp_x)
maxx = np.amax(temp_x)
miny = np.amin(temp_y)
maxy = np.amax(temp_y)
w = maxx-minx
h = maxy-miny
# the pad is a little hack to deal with the fact that we don't
# want to transform all the symbols whose scales are in points
# to data coords to get the exact bounding box for efficiency
# reasons. It can be done right if this is deemed important
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
scatter.__doc__ = cbook.dedent(scatter.__doc__) % martist.kwdocd
def hexbin(self, x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none',
reduce_C_function = np.mean,
**kwargs):
"""
call signature::
hexbin(x, y, C = None, gridsize = 100, bins = None,
xscale = 'linear', yscale = 'linear',
cmap=None, norm=None, vmin=None, vmax=None,
alpha=1.0, linewidths=None, edgecolors='none'
reduce_C_function = np.mean,
**kwargs)
Make a hexagonal binning plot of *x* versus *y*, where *x*,
*y* are 1-D sequences of the same length, *N*. If *C* is None
(the default), this is a histogram of the number of occurences
of the observations at (x[i],y[i]).
If *C* is specified, it specifies values at the coordinate
(x[i],y[i]). These values are accumulated for each hexagonal
bin and then reduced according to *reduce_C_function*, which
defaults to numpy's mean function (np.mean). (If *C* is
specified, it must also be a 1-D sequence of the same length
as *x* and *y*.)
*x*, *y* and/or *C* may be masked arrays, in which case only
unmasked points will be plotted.
Optional keyword arguments:
*gridsize*: [ 100 | integer ]
The number of hexagons in the *x*-direction, default is
100. The corresponding number of hexagons in the
*y*-direction is chosen such that the hexagons are
approximately regular. Alternatively, gridsize can be a
tuple with two elements specifying the number of hexagons
in the *x*-direction and the *y*-direction.
*bins*: [ None | 'log' | integer | sequence ]
If *None*, no binning is applied; the color of each hexagon
directly corresponds to its count value.
If 'log', use a logarithmic scale for the color
map. Internally, :math:`log_{10}(i+1)` is used to
determine the hexagon color.
If an integer, divide the counts in the specified number
of bins, and color the hexagons accordingly.
If a sequence of values, the values of the lower bound of
the bins to be used.
*xscale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the horizontal axis.
*scale*: [ 'linear' | 'log' ]
Use a linear or log10 scale on the vertical axis.
Other keyword arguments controlling color mapping and normalization
arguments:
*cmap*: [ None | Colormap ]
a :class:`matplotlib.cm.Colormap` instance. If *None*,
defaults to rc ``image.cmap``.
*norm*: [ None | Normalize ]
:class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1.
*vmin*/*vmax*: scalar
*vmin* and *vmax* are used in conjunction with *norm* to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. Note if you pass a norm instance, your settings
for *vmin* and *vmax* will be ignored.
*alpha*: scalar
the alpha value for the patches
*linewidths*: [ None | scalar ]
If *None*, defaults to rc lines.linewidth. Note that this
is a tuple, and if you set the linewidths argument you
must set it as a sequence of floats, as required by
:class:`~matplotlib.collections.RegularPolyCollection`.
Other keyword arguments controlling the Collection properties:
*edgecolors*: [ None | mpl color | color sequence ]
If 'none', draws the edges in the same color as the fill color.
This is the default, as it avoids unsightly unpainted pixels
between the hexagons.
If *None*, draws the outlines in the default color.
If a matplotlib color arg or sequence of rgba tuples, draws the
outlines in the specified color.
Here are the standard descriptions of all the
:class:`~matplotlib.collections.Collection` kwargs:
%(Collection)s
The return value is a
:class:`~matplotlib.collections.PolyCollection` instance; use
:meth:`~matplotlib.collection.PolyCollection.get_array` on
this :class:`~matplotlib.collections.PolyCollection` to get
the counts in each hexagon.
**Example:**
.. plot:: mpl_examples/pylab_examples/hexbin_demo.py
"""
if not self._hold: self.cla()
self._process_unit_info(xdata=x, ydata=y, kwargs=kwargs)
x, y, C = cbook.delete_masked_points(x, y, C)
# Set the size of the hexagon grid
if iterable(gridsize):
nx, ny = gridsize
else:
nx = gridsize
ny = int(nx/math.sqrt(3))
# Count the number of data in each hexagon
x = np.array(x, float)
y = np.array(y, float)
if xscale=='log':
x = np.log10(x)
if yscale=='log':
y = np.log10(y)
xmin = np.amin(x)
xmax = np.amax(x)
ymin = np.amin(y)
ymax = np.amax(y)
# In the x-direction, the hexagons exactly cover the region from
# xmin to xmax. Need some padding to avoid roundoff errors.
padding = 1.e-9 * (xmax - xmin)
xmin -= padding
xmax += padding
sx = (xmax-xmin) / nx
sy = (ymax-ymin) / ny
x = (x-xmin)/sx
y = (y-ymin)/sy
ix1 = np.round(x).astype(int)
iy1 = np.round(y).astype(int)
ix2 = np.floor(x).astype(int)
iy2 = np.floor(y).astype(int)
nx1 = nx + 1
ny1 = ny + 1
nx2 = nx
ny2 = ny
n = nx1*ny1+nx2*ny2
d1 = (x-ix1)**2 + 3.0 * (y-iy1)**2
d2 = (x-ix2-0.5)**2 + 3.0 * (y-iy2-0.5)**2
bdist = (d1<d2)
if C is None:
accum = np.zeros(n)
# Create appropriate views into "accum" array.
lattice1 = accum[:nx1*ny1]
lattice2 = accum[nx1*ny1:]
lattice1.shape = (nx1,ny1)
lattice2.shape = (nx2,ny2)
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]]+=1
else:
lattice2[ix2[i], iy2[i]]+=1
else:
# create accumulation arrays
lattice1 = np.empty((nx1,ny1),dtype=object)
for i in xrange(nx1):
for j in xrange(ny1):
lattice1[i,j] = []
lattice2 = np.empty((nx2,ny2),dtype=object)
for i in xrange(nx2):
for j in xrange(ny2):
lattice2[i,j] = []
for i in xrange(len(x)):
if bdist[i]:
lattice1[ix1[i], iy1[i]].append( C[i] )
else:
lattice2[ix2[i], iy2[i]].append( C[i] )
for i in xrange(nx1):
for j in xrange(ny1):
vals = lattice1[i,j]
if len(vals):
lattice1[i,j] = reduce_C_function( vals )
else:
lattice1[i,j] = np.nan
for i in xrange(nx2):
for j in xrange(ny2):
vals = lattice2[i,j]
if len(vals):
lattice2[i,j] = reduce_C_function( vals )
else:
lattice2[i,j] = np.nan
accum = np.hstack((
lattice1.astype(float).ravel(), lattice2.astype(float).ravel()))
good_idxs = ~np.isnan(accum)
px = xmin + sx * np.array([ 0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
py = ymin + sy * np.array([-0.5, 0.5, 1.0, 0.5, -0.5, -1.0]) / 3.0
polygons = np.zeros((6, n, 2), float)
polygons[:,:nx1*ny1,0] = np.repeat(np.arange(nx1), ny1)
polygons[:,:nx1*ny1,1] = np.tile(np.arange(ny1), nx1)
polygons[:,nx1*ny1:,0] = np.repeat(np.arange(nx2) + 0.5, ny2)
polygons[:,nx1*ny1:,1] = np.tile(np.arange(ny2), nx2) + 0.5
if C is not None:
# remove accumulation bins with no data
polygons = polygons[:,good_idxs,:]
accum = accum[good_idxs]
polygons = np.transpose(polygons, axes=[1,0,2])
polygons[:,:,0] *= sx
polygons[:,:,1] *= sy
polygons[:,:,0] += px
polygons[:,:,1] += py
if xscale=='log':
polygons[:,:,0] = 10**(polygons[:,:,0])
xmin = 10**xmin
xmax = 10**xmax
self.set_xscale('log')
if yscale=='log':
polygons[:,:,1] = 10**(polygons[:,:,1])
ymin = 10**ymin
ymax = 10**ymax
self.set_yscale('log')
if edgecolors=='none':
edgecolors = 'face'
collection = mcoll.PolyCollection(
polygons,
edgecolors = edgecolors,
linewidths = linewidths,
transOffset = self.transData,
)
# Transform accum if needed
if bins=='log':
accum = np.log10(accum+1)
elif bins!=None:
if not iterable(bins):
minimum, maximum = min(accum), max(accum)
bins-=1 # one less edge than bins
bins = minimum + (maximum-minimum)*np.arange(bins)/bins
bins = np.sort(bins)
accum = bins.searchsorted(accum)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_array(accum)
collection.set_cmap(cmap)
collection.set_norm(norm)
collection.set_alpha(alpha)
collection.update(kwargs)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
corners = ((xmin, ymin), (xmax, ymax))
self.update_datalim( corners)
self.autoscale_view()
# add the collection last
self.add_collection(collection)
return collection
hexbin.__doc__ = cbook.dedent(hexbin.__doc__) % martist.kwdocd
def arrow(self, x, y, dx, dy, **kwargs):
"""
call signature::
arrow(x, y, dx, dy, **kwargs)
Draws arrow on specified axis from (*x*, *y*) to (*x* + *dx*,
*y* + *dy*).
Optional kwargs control the arrow properties:
%(FancyArrow)s
**Example:**
.. plot:: mpl_examples/pylab_examples/arrow_demo.py
"""
a = mpatches.FancyArrow(x, y, dx, dy, **kwargs)
self.add_artist(a)
return a
arrow.__doc__ = cbook.dedent(arrow.__doc__) % martist.kwdocd
def quiverkey(self, *args, **kw):
qk = mquiver.QuiverKey(*args, **kw)
self.add_artist(qk)
return qk
quiverkey.__doc__ = mquiver.QuiverKey.quiverkey_doc
def quiver(self, *args, **kw):
if not self._hold: self.cla()
q = mquiver.Quiver(self, *args, **kw)
self.add_collection(q, False)
self.update_datalim(q.XY)
self.autoscale_view()
return q
quiver.__doc__ = mquiver.Quiver.quiver_doc
def barbs(self, *args, **kw):
"""
%(barbs_doc)s
**Example:**
.. plot:: mpl_examples/pylab_examples/barb_demo.py
"""
if not self._hold: self.cla()
b = mquiver.Barbs(self, *args, **kw)
self.add_collection(b)
self.update_datalim(b.get_offsets())
self.autoscale_view()
return b
barbs.__doc__ = cbook.dedent(barbs.__doc__) % {
'barbs_doc': mquiver.Barbs.barbs_doc}
def fill(self, *args, **kwargs):
"""
call signature::
fill(*args, **kwargs)
Plot filled polygons. *args* is a variable length argument,
allowing for multiple *x*, *y* pairs with an optional color
format string; see :func:`~matplotlib.pyplot.plot` for details
on the argument parsing. For example, to plot a polygon with
vertices at *x*, *y* in blue.::
ax.fill(x,y, 'b' )
An arbitrary number of *x*, *y*, *color* groups can be specified::
ax.fill(x1, y1, 'g', x2, y2, 'r')
Return value is a list of :class:`~matplotlib.patches.Patch`
instances that were added.
The same color strings that :func:`~matplotlib.pyplot.plot`
supports are supported by the fill format string.
If you would like to fill below a curve, eg. shade a region
between 0 and *y* along *x*, use :meth:`fill_between`
The *closed* kwarg will close the polygon when *True* (default).
kwargs control the Polygon properties:
%(Polygon)s
**Example:**
.. plot:: mpl_examples/pylab_examples/fill_demo.py
"""
if not self._hold: self.cla()
patches = []
for poly in self._get_patches_for_fill(*args, **kwargs):
self.add_patch( poly )
patches.append( poly )
self.autoscale_view()
return patches
fill.__doc__ = cbook.dedent(fill.__doc__) % martist.kwdocd
def fill_between(self, x, y1, y2=0, where=None, **kwargs):
"""
call signature::
fill_between(x, y1, y2=0, where=None, **kwargs)
Create a :class:`~matplotlib.collections.PolyCollection`
filling the regions between *y1* and *y2* where
``where==True``
*x*
an N length np array of the x data
*y1*
an N length scalar or np array of the x data
*y2*
an N length scalar or np array of the x data
*where*
if None, default to fill between everywhere. If not None,
it is a a N length numpy boolean array and the fill will
only happen over the regions where ``where==True``
*kwargs*
keyword args passed on to the :class:`PolyCollection`
kwargs control the Polygon properties:
%(PolyCollection)s
.. plot:: mpl_examples/pylab_examples/fill_between.py
"""
# Handle united data, such as dates
self._process_unit_info(xdata=x, ydata=y1, kwargs=kwargs)
self._process_unit_info(ydata=y2)
# Convert the arrays so we can work with them
x = np.asarray(self.convert_xunits(x))
y1 = np.asarray(self.convert_yunits(y1))
y2 = np.asarray(self.convert_yunits(y2))
if not cbook.iterable(y1):
y1 = np.ones_like(x)*y1
if not cbook.iterable(y2):
y2 = np.ones_like(x)*y2
if where is None:
where = np.ones(len(x), np.bool)
where = np.asarray(where)
assert( (len(x)==len(y1)) and (len(x)==len(y2)) and len(x)==len(where))
polys = []
for ind0, ind1 in mlab.contiguous_regions(where):
theseverts = []
xslice = x[ind0:ind1]
y1slice = y1[ind0:ind1]
y2slice = y2[ind0:ind1]
if not len(xslice):
continue
N = len(xslice)
X = np.zeros((2*N+2, 2), np.float)
# the purpose of the next two lines is for when y2 is a
# scalar like 0 and we want the fill to go all the way
# down to 0 even if none of the y1 sample points do
X[0] = xslice[0], y2slice[0]
X[N+1] = xslice[-1], y2slice[-1]
X[1:N+1,0] = xslice
X[1:N+1,1] = y1slice
X[N+2:,0] = xslice[::-1]
X[N+2:,1] = y2slice[::-1]
polys.append(X)
collection = mcoll.PolyCollection(polys, **kwargs)
# now update the datalim and autoscale
XY1 = np.array([x[where], y1[where]]).T
XY2 = np.array([x[where], y2[where]]).T
self.dataLim.update_from_data_xy(XY1, self.ignore_existing_data_limits,
updatex=True, updatey=True)
self.dataLim.update_from_data_xy(XY2, self.ignore_existing_data_limits,
updatex=False, updatey=True)
self.add_collection(collection)
self.autoscale_view()
return collection
fill_between.__doc__ = cbook.dedent(fill_between.__doc__) % martist.kwdocd
#### plotting z(x,y): imshow, pcolor and relatives, contour
def imshow(self, X, cmap=None, norm=None, aspect=None,
interpolation=None, alpha=1.0, vmin=None, vmax=None,
origin=None, extent=None, shape=None, filternorm=1,
filterrad=4.0, imlim=None, resample=None, url=None, **kwargs):
"""
call signature::
imshow(X, cmap=None, norm=None, aspect=None, interpolation=None,
alpha=1.0, vmin=None, vmax=None, origin=None, extent=None,
**kwargs)
Display the image in *X* to current axes. *X* may be a float
array, a uint8 array or a PIL image. If *X* is an array, *X*
can have the following shapes:
* MxN -- luminance (grayscale, float array only)
* MxNx3 -- RGB (float or uint8 array)
* MxNx4 -- RGBA (float or uint8 array)
The value for each component of MxNx3 and MxNx4 float arrays should be
in the range 0.0 to 1.0; MxN float arrays may be normalised.
An :class:`matplotlib.image.AxesImage` instance is returned.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance, eg. cm.jet.
If *None*, default to rc ``image.cmap`` value.
*cmap* is ignored when *X* has RGB(A) information
*aspect*: [ None | 'auto' | 'equal' | scalar ]
If 'auto', changes the image aspect ratio to match that of the axes
If 'equal', and *extent* is *None*, changes the axes
aspect ratio to match that of the image. If *extent* is
not *None*, the axes aspect ratio is changed to match that
of the extent.
If *None*, default to rc ``image.aspect`` value.
*interpolation*:
Acceptable values are *None*, 'nearest', 'bilinear',
'bicubic', 'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom', 'gaussian',
'bessel', 'mitchell', 'sinc', 'lanczos',
If *interpolation* is *None*, default to rc
``image.interpolation``. See also the *filternorm* and
*filterrad* parameters
*norm*: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance; if
*None*, default is ``normalization()``. This scales
luminance -> 0-1
*norm* is only used for an MxN float array.
*vmin*/*vmax*: [ None | scalar ]
Used to scale a luminance image to 0-1. If either is
*None*, the min and max of the luminance values will be
used. Note if *norm* is not *None*, the settings for
*vmin* and *vmax* will be ignored.
*alpha*: scalar
The alpha blending value, between 0 (transparent) and 1 (opaque)
*origin*: [ None | 'upper' | 'lower' ]
Place the [0,0] index of the array in the upper left or lower left
corner of the axes. If *None*, default to rc ``image.origin``.
*extent*: [ None | scalars (left, right, bottom, top) ]
Eata values of the axes. The default assigns zero-based row,
column indices to the *x*, *y* centers of the pixels.
*shape*: [ None | scalars (columns, rows) ]
For raw buffer images
*filternorm*:
A parameter for the antigrain image resize filter. From the
antigrain documentation, if *filternorm* = 1, the filter normalizes
integer values and corrects the rounding errors. It doesn't do
anything with the source floating point values, it corrects only
integers according to the rule of 1.0 which means that any sum of
pixel weights must be equal to 1.0. So, the filter function must
produce a graph of the proper shape.
*filterrad*:
The filter radius for filters that have a radius
parameter, i.e. when interpolation is one of: 'sinc',
'lanczos' or 'blackman'
Additional kwargs are :class:`~matplotlib.artist.Artist` properties:
%(Artist)s
**Example:**
.. plot:: mpl_examples/pylab_examples/image_demo.py
"""
if not self._hold: self.cla()
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
if aspect is None: aspect = rcParams['image.aspect']
self.set_aspect(aspect)
im = mimage.AxesImage(self, cmap, norm, interpolation, origin, extent,
filternorm=filternorm,
filterrad=filterrad, resample=resample, **kwargs)
im.set_data(X)
im.set_alpha(alpha)
self._set_artist_props(im)
im.set_clip_path(self.patch)
#if norm is None and shape is None:
# im.set_clim(vmin, vmax)
if vmin is not None or vmax is not None:
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
im.set_url(url)
xmin, xmax, ymin, ymax = im.get_extent()
corners = (xmin, ymin), (xmax, ymax)
self.update_datalim(corners)
if self._autoscaleon:
self.set_xlim((xmin, xmax))
self.set_ylim((ymin, ymax))
self.images.append(im)
return im
imshow.__doc__ = cbook.dedent(imshow.__doc__) % martist.kwdocd
def _pcolorargs(self, funcname, *args):
if len(args)==1:
C = args[0]
numRows, numCols = C.shape
X, Y = np.meshgrid(np.arange(numCols+1), np.arange(numRows+1) )
elif len(args)==3:
X, Y, C = args
else:
raise TypeError(
'Illegal arguments to %s; see help(%s)' % (funcname, funcname))
Nx = X.shape[-1]
Ny = Y.shape[0]
if len(X.shape) <> 2 or X.shape[0] == 1:
x = X.reshape(1,Nx)
X = x.repeat(Ny, axis=0)
if len(Y.shape) <> 2 or Y.shape[1] == 1:
y = Y.reshape(Ny, 1)
Y = y.repeat(Nx, axis=1)
if X.shape != Y.shape:
raise TypeError(
'Incompatible X, Y inputs to %s; see help(%s)' % (
funcname, funcname))
return X, Y, C
def pcolor(self, *args, **kwargs):
"""
call signatures::
pcolor(C, **kwargs)
pcolor(X, Y, C, **kwargs)
Create a pseudocolor plot of a 2-D array.
*C* is the array of color values.
*X* and *Y*, if given, specify the (*x*, *y*) coordinates of
the colored quadrilaterals; the quadrilateral for C[i,j] has
corners at::
(X[i, j], Y[i, j]),
(X[i, j+1], Y[i, j+1]),
(X[i+1, j], Y[i+1, j]),
(X[i+1, j+1], Y[i+1, j+1]).
Ideally the dimensions of *X* and *Y* should be one greater
than those of *C*; if the dimensions are the same, then the
last row and column of *C* will be ignored.
Note that the the column index corresponds to the
*x*-coordinate, and the row index corresponds to *y*; for
details, see the :ref:`Grid Orientation
<axes-pcolor-grid-orientation>` section below.
If either or both of *X* and *Y* are 1-D arrays or column vectors,
they will be expanded as needed into the appropriate 2-D arrays,
making a rectangular grid.
*X*, *Y* and *C* may be masked arrays. If either C[i, j], or one
of the vertices surrounding C[i,j] (*X* or *Y* at [i, j], [i+1, j],
[i, j+1],[i+1, j+1]) is masked, nothing is plotted.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If *None*, use
rc settings.
norm: [ None | Normalize ]
An :class:`matplotlib.colors.Normalize` instance is used
to scale luminance data to 0,1. If *None*, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If *None*, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.Collection`
instance.
.. _axes-pcolor-grid-orientation:
The grid orientation follows the Matlab(TM) convention: an
array *C* with shape (*nrows*, *ncolumns*) is plotted with
the column number as *X* and the row number as *Y*, increasing
up; hence it is plotted the way the array would be printed,
except that the *Y* axis is reversed. That is, *C* is taken
as *C*(*y*, *x*).
Similarly for :func:`~matplotlib.pyplot.meshgrid`::
x = np.arange(5)
y = np.arange(3)
X, Y = meshgrid(x,y)
is equivalent to:
X = array([[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]])
Y = array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2]])
so if you have::
C = rand( len(x), len(y))
then you need::
pcolor(X, Y, C.T)
or::
pcolor(C.T)
Matlab :func:`pcolor` always discards the last row and column
of *C*, but matplotlib displays the last row and column if *X* and
*Y* are not specified, or if *X* and *Y* have one more row and
column than *C*.
kwargs can be used to control the
:class:`~matplotlib.collection.PolyCollection` properties:
%(PolyCollection)s
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
X, Y, C = self._pcolorargs('pcolor', *args)
Ny, Nx = X.shape
# convert to MA, if necessary.
C = ma.asarray(C)
X = ma.asarray(X)
Y = ma.asarray(Y)
mask = ma.getmaskarray(X)+ma.getmaskarray(Y)
xymask = mask[0:-1,0:-1]+mask[1:,1:]+mask[0:-1,1:]+mask[1:,0:-1]
# don't plot if C or any of the surrounding vertices are masked.
mask = ma.getmaskarray(C)[0:Ny-1,0:Nx-1]+xymask
newaxis = np.newaxis
compress = np.compress
ravelmask = (mask==0).ravel()
X1 = compress(ravelmask, ma.filled(X[0:-1,0:-1]).ravel())
Y1 = compress(ravelmask, ma.filled(Y[0:-1,0:-1]).ravel())
X2 = compress(ravelmask, ma.filled(X[1:,0:-1]).ravel())
Y2 = compress(ravelmask, ma.filled(Y[1:,0:-1]).ravel())
X3 = compress(ravelmask, ma.filled(X[1:,1:]).ravel())
Y3 = compress(ravelmask, ma.filled(Y[1:,1:]).ravel())
X4 = compress(ravelmask, ma.filled(X[0:-1,1:]).ravel())
Y4 = compress(ravelmask, ma.filled(Y[0:-1,1:]).ravel())
npoly = len(X1)
xy = np.concatenate((X1[:,newaxis], Y1[:,newaxis],
X2[:,newaxis], Y2[:,newaxis],
X3[:,newaxis], Y3[:,newaxis],
X4[:,newaxis], Y4[:,newaxis],
X1[:,newaxis], Y1[:,newaxis]),
axis=1)
verts = xy.reshape((npoly, 5, 2))
#verts = zip(zip(X1,Y1),zip(X2,Y2),zip(X3,Y3),zip(X4,Y4))
C = compress(ravelmask, ma.filled(C[0:Ny-1,0:Nx-1]).ravel())
if shading == 'faceted':
edgecolors = (0,0,0,1),
linewidths = (0.25,)
else:
edgecolors = 'face'
linewidths = (1.0,)
kwargs.setdefault('edgecolors', edgecolors)
kwargs.setdefault('antialiaseds', (0,))
kwargs.setdefault('linewidths', linewidths)
collection = mcoll.PolyCollection(verts, **kwargs)
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
x = X.compressed()
y = Y.compressed()
minx = np.amin(x)
maxx = np.amax(x)
miny = np.amin(y)
maxy = np.amax(y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolor.__doc__ = cbook.dedent(pcolor.__doc__) % martist.kwdocd
def pcolormesh(self, *args, **kwargs):
"""
call signatures::
pcolormesh(C)
pcolormesh(X, Y, C)
pcolormesh(C, **kwargs)
*C* may be a masked array, but *X* and *Y* may not. Masked
array support is implemented via *cmap* and *norm*; in
contrast, :func:`~matplotlib.pyplot.pcolor` simply does not
draw quadrilaterals with masked colors or vertices.
Keyword arguments:
*cmap*: [ None | Colormap ]
A :class:`matplotlib.cm.Colormap` instance. If None, use
rc settings.
*norm*: [ None | Normalize ]
A :class:`matplotlib.colors.Normalize` instance is used to
scale luminance data to 0,1. If None, defaults to
:func:`normalize`.
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with *norm* to
normalize luminance data. If either are *None*, the min
and max of the color array *C* is used. If you pass a
*norm* instance, *vmin* and *vmax* will be ignored.
*shading*: [ 'flat' | 'faceted' ]
If 'faceted', a black grid is drawn around each rectangle; if
'flat', edges are not drawn. Default is 'flat', contrary to
Matlab(TM).
This kwarg is deprecated; please use 'edgecolors' instead:
* shading='flat' -- edgecolors='None'
* shading='faceted -- edgecolors='k'
*edgecolors*: [ None | 'None' | color | color sequence]
If None, the rc setting is used by default.
If 'None', edges will not be visible.
An mpl color or sequence of colors will set the edge color
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is a :class:`matplotlib.collection.QuadMesh`
object.
kwargs can be used to control the
:class:`matplotlib.collections.QuadMesh`
properties:
%(QuadMesh)s
.. seealso::
:func:`~matplotlib.pyplot.pcolor`:
For an explanation of the grid orientation and the
expansion of 1-D *X* and/or *Y* to 2-D arrays.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
shading = kwargs.pop('shading', 'flat')
edgecolors = kwargs.pop('edgecolors', 'None')
antialiased = kwargs.pop('antialiased', False)
X, Y, C = self._pcolorargs('pcolormesh', *args)
Ny, Nx = X.shape
# convert to one dimensional arrays
C = ma.ravel(C[0:Ny-1, 0:Nx-1]) # data point in each cell is value at
# lower left corner
X = X.ravel()
Y = Y.ravel()
coords = np.zeros(((Nx * Ny), 2), dtype=float)
coords[:, 0] = X
coords[:, 1] = Y
if shading == 'faceted' or edgecolors != 'None':
showedges = 1
else:
showedges = 0
collection = mcoll.QuadMesh(
Nx - 1, Ny - 1, coords, showedges,
antialiased=antialiased) # kwargs are not used
collection.set_alpha(alpha)
collection.set_array(C)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
collection.set_cmap(cmap)
collection.set_norm(norm)
if vmin is not None or vmax is not None:
collection.set_clim(vmin, vmax)
else:
collection.autoscale_None()
self.grid(False)
minx = np.amin(X)
maxx = np.amax(X)
miny = np.amin(Y)
maxy = np.amax(Y)
corners = (minx, miny), (maxx, maxy)
self.update_datalim( corners)
self.autoscale_view()
self.add_collection(collection)
return collection
pcolormesh.__doc__ = cbook.dedent(pcolormesh.__doc__) % martist.kwdocd
def pcolorfast(self, *args, **kwargs):
"""
pseudocolor plot of a 2-D array
Experimental; this is a version of pcolor that
does not draw lines, that provides the fastest
possible rendering with the Agg backend, and that
can handle any quadrilateral grid.
Call signatures::
pcolor(C, **kwargs)
pcolor(xr, yr, C, **kwargs)
pcolor(x, y, C, **kwargs)
pcolor(X, Y, C, **kwargs)
C is the 2D array of color values corresponding to quadrilateral
cells. Let (nr, nc) be its shape. C may be a masked array.
``pcolor(C, **kwargs)`` is equivalent to
``pcolor([0,nc], [0,nr], C, **kwargs)``
*xr*, *yr* specify the ranges of *x* and *y* corresponding to the
rectangular region bounding *C*. If::
xr = [x0, x1]
and::
yr = [y0,y1]
then *x* goes from *x0* to *x1* as the second index of *C* goes
from 0 to *nc*, etc. (*x0*, *y0*) is the outermost corner of
cell (0,0), and (*x1*, *y1*) is the outermost corner of cell
(*nr*-1, *nc*-1). All cells are rectangles of the same size.
This is the fastest version.
*x*, *y* are 1D arrays of length *nc* +1 and *nr* +1, respectively,
giving the x and y boundaries of the cells. Hence the cells are
rectangular but the grid may be nonuniform. The speed is
intermediate. (The grid is checked, and if found to be
uniform the fast version is used.)
*X* and *Y* are 2D arrays with shape (*nr* +1, *nc* +1) that specify
the (x,y) coordinates of the corners of the colored
quadrilaterals; the quadrilateral for C[i,j] has corners at
(X[i,j],Y[i,j]), (X[i,j+1],Y[i,j+1]), (X[i+1,j],Y[i+1,j]),
(X[i+1,j+1],Y[i+1,j+1]). The cells need not be rectangular.
This is the most general, but the slowest to render. It may
produce faster and more compact output using ps, pdf, and
svg backends, however.
Note that the the column index corresponds to the x-coordinate,
and the row index corresponds to y; for details, see
the "Grid Orientation" section below.
Optional keyword arguments:
*cmap*: [ None | Colormap ]
A cm Colormap instance from cm. If None, use rc settings.
*norm*: [ None | Normalize ]
An mcolors.Normalize instance is used to scale luminance data to
0,1. If None, defaults to normalize()
*vmin*/*vmax*: [ None | scalar ]
*vmin* and *vmax* are used in conjunction with norm to normalize
luminance data. If either are *None*, the min and max of the color
array *C* is used. If you pass a norm instance, *vmin* and *vmax*
will be *None*.
*alpha*: 0 <= scalar <= 1
the alpha blending value
Return value is an image if a regular or rectangular grid
is specified, and a QuadMesh collection in the general
quadrilateral case.
"""
if not self._hold: self.cla()
alpha = kwargs.pop('alpha', 1.0)
norm = kwargs.pop('norm', None)
cmap = kwargs.pop('cmap', None)
vmin = kwargs.pop('vmin', None)
vmax = kwargs.pop('vmax', None)
if norm is not None: assert(isinstance(norm, mcolors.Normalize))
if cmap is not None: assert(isinstance(cmap, mcolors.Colormap))
C = args[-1]
nr, nc = C.shape
if len(args) == 1:
style = "image"
x = [0, nc]
y = [0, nr]
elif len(args) == 3:
x, y = args[:2]
x = np.asarray(x)
y = np.asarray(y)
if x.ndim == 1 and y.ndim == 1:
if x.size == 2 and y.size == 2:
style = "image"
else:
dx = np.diff(x)
dy = np.diff(y)
if (np.ptp(dx) < 0.01*np.abs(dx.mean()) and
np.ptp(dy) < 0.01*np.abs(dy.mean())):
style = "image"
else:
style = "pcolorimage"
elif x.ndim == 2 and y.ndim == 2:
style = "quadmesh"
else:
raise TypeError("arguments do not match valid signatures")
else:
raise TypeError("need 1 argument or 3 arguments")
if style == "quadmesh":
# convert to one dimensional arrays
# This should also be moved to the QuadMesh class
C = ma.ravel(C) # data point in each cell is value
# at lower left corner
X = x.ravel()
Y = y.ravel()
Nx = nc+1
Ny = nr+1
# The following needs to be cleaned up; the renderer
# requires separate contiguous arrays for X and Y,
# but the QuadMesh class requires the 2D array.
coords = np.empty(((Nx * Ny), 2), np.float64)
coords[:, 0] = X
coords[:, 1] = Y
# The QuadMesh class can also be changed to
# handle relevant superclass kwargs; the initializer
# should do much more than it does now.
collection = mcoll.QuadMesh(nc, nr, coords, 0)
collection.set_alpha(alpha)
collection.set_array(C)
collection.set_cmap(cmap)
collection.set_norm(norm)
self.add_collection(collection)
xl, xr, yb, yt = X.min(), X.max(), Y.min(), Y.max()
ret = collection
else:
# One of the image styles:
xl, xr, yb, yt = x[0], x[-1], y[0], y[-1]
if style == "image":
im = mimage.AxesImage(self, cmap, norm,
interpolation='nearest',
origin='lower',
extent=(xl, xr, yb, yt),
**kwargs)
im.set_data(C)
im.set_alpha(alpha)
self.images.append(im)
ret = im
if style == "pcolorimage":
im = mimage.PcolorImage(self, x, y, C,
cmap=cmap,
norm=norm,
alpha=alpha,
**kwargs)
self.images.append(im)
ret = im
self._set_artist_props(ret)
if vmin is not None or vmax is not None:
ret.set_clim(vmin, vmax)
else:
ret.autoscale_None()
self.update_datalim(np.array([[xl, yb], [xr, yt]]))
self.autoscale_view(tight=True)
return ret
def contour(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = False
return mcontour.ContourSet(self, *args, **kwargs)
contour.__doc__ = mcontour.ContourSet.contour_doc
def contourf(self, *args, **kwargs):
if not self._hold: self.cla()
kwargs['filled'] = True
return mcontour.ContourSet(self, *args, **kwargs)
contourf.__doc__ = mcontour.ContourSet.contour_doc
def clabel(self, CS, *args, **kwargs):
return CS.clabel(*args, **kwargs)
clabel.__doc__ = mcontour.ContourSet.clabel.__doc__
def table(self, **kwargs):
"""
call signature::
table(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None):
Add a table to the current axes. Returns a
:class:`matplotlib.table.Table` instance. For finer grained
control over tables, use the :class:`~matplotlib.table.Table`
class and add it to the axes with
:meth:`~matplotlib.axes.Axes.add_table`.
Thanks to John Gill for providing the class and table.
kwargs control the :class:`~matplotlib.table.Table`
properties:
%(Table)s
"""
return mtable.table(self, **kwargs)
table.__doc__ = cbook.dedent(table.__doc__) % martist.kwdocd
def twinx(self):
"""
call signature::
ax = twinx()
create a twin of Axes for generating a plot with a sharex
x-axis but independent y axis. The y-axis of self will have
ticks on left and the returned axes will have ticks on the
right
"""
ax2 = self.figure.add_axes(self.get_position(True), sharex=self,
frameon=False)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position('right')
self.yaxis.tick_left()
return ax2
def twiny(self):
"""
call signature::
ax = twiny()
create a twin of Axes for generating a plot with a shared
y-axis but independent x axis. The x-axis of self will have
ticks on bottom and the returned axes will have ticks on the
top
"""
ax2 = self.figure.add_axes(self.get_position(True), sharey=self,
frameon=False)
ax2.xaxis.tick_top()
ax2.xaxis.set_label_position('top')
self.xaxis.tick_bottom()
return ax2
def get_shared_x_axes(self):
'Return a copy of the shared axes Grouper object for x axes'
return self._shared_x_axes
def get_shared_y_axes(self):
'Return a copy of the shared axes Grouper object for y axes'
return self._shared_y_axes
#### Data analysis
def hist(self, x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs):
"""
call signature::
hist(x, bins=10, range=None, normed=False, cumulative=False,
bottom=None, histtype='bar', align='mid',
orientation='vertical', rwidth=None, log=False, **kwargs)
Compute and draw the histogram of *x*. The return value is a
tuple (*n*, *bins*, *patches*) or ([*n0*, *n1*, ...], *bins*,
[*patches0*, *patches1*,...]) if the input contains multiple
data.
Keyword arguments:
*bins*:
Either an integer number of bins or a sequence giving the
bins. *x* are the data to be binned. *x* can be an array,
a 2D array with multiple data in its columns, or a list of
arrays with data of different length. Note, if *bins*
is an integer input argument=numbins, *bins* + 1 bin edges
will be returned, compatible with the semantics of
:func:`numpy.histogram` with the *new* = True argument.
Unequally spaced bins are supported if *bins* is a sequence.
*range*:
The lower and upper range of the bins. Lower and upper outliers
are ignored. If not provided, *range* is (x.min(), x.max()).
Range has no effect if *bins* is a sequence.
If *bins* is a sequence or *range* is specified, autoscaling is
set off (*autoscale_on* is set to *False*) and the xaxis limits
are set to encompass the full specified bin range.
*normed*:
If *True*, the first element of the return tuple will
be the counts normalized to form a probability density, i.e.,
``n/(len(x)*dbin)``. In a probability density, the integral of
the histogram should be 1; you can verify that with a
trapezoidal integration of the probability density function::
pdf, bins, patches = ax.hist(...)
print np.sum(pdf * np.diff(bins))
*cumulative*:
If *True*, then a histogram is computed where each bin
gives the counts in that bin plus all bins for smaller values.
The last bin gives the total number of datapoints. If *normed*
is also *True* then the histogram is normalized such that the
last bin equals 1. If *cumulative* evaluates to less than 0
(e.g. -1), the direction of accumulation is reversed. In this
case, if *normed* is also *True*, then the histogram is normalized
such that the first bin equals 1.
*histtype*: [ 'bar' | 'barstacked' | 'step' | 'stepfilled' ]
The type of histogram to draw.
- 'bar' is a traditional bar-type histogram. If multiple data
are given the bars are aranged side by side.
- 'barstacked' is a bar-type histogram where multiple
data are stacked on top of each other.
- 'step' generates a lineplot that is by default
unfilled.
- 'stepfilled' generates a lineplot that is by default
filled.
*align*: ['left' | 'mid' | 'right' ]
Controls how the histogram is plotted.
- 'left': bars are centered on the left bin edges.
- 'mid': bars are centered between the bin edges.
- 'right': bars are centered on the right bin edges.
*orientation*: [ 'horizontal' | 'vertical' ]
If 'horizontal', :func:`~matplotlib.pyplot.barh` will be
used for bar-type histograms and the *bottom* kwarg will be
the left edges.
*rwidth*:
The relative width of the bars as a fraction of the bin
width. If *None*, automatically compute the width. Ignored
if *histtype* = 'step' or 'stepfilled'.
*log*:
If *True*, the histogram axis will be set to a log scale.
If *log* is *True* and *x* is a 1D array, empty bins will
be filtered out and only the non-empty (*n*, *bins*,
*patches*) will be returned.
kwargs are used to update the properties of the hist
:class:`~matplotlib.patches.Rectangle` instances:
%(Rectangle)s
You can use labels for your histogram, and only the first
:class:`~matplotlib.patches.Rectangle` gets the label (the
others get the magic string '_nolegend_'. This will make the
histograms work in the intuitive way for bar charts::
ax.hist(10+2*np.random.randn(1000), label='men')
ax.hist(12+3*np.random.randn(1000), label='women', alpha=0.5)
ax.legend()
**Example:**
.. plot:: mpl_examples/pylab_examples/histogram_demo.py
"""
if not self._hold: self.cla()
# NOTE: the range keyword overwrites the built-in func range !!!
# needs to be fixed in with numpy !!!
if kwargs.get('width') is not None:
raise DeprecationWarning(
'hist now uses the rwidth to give relative width '
'and not absolute width')
try:
# make sure a copy is created: don't use asarray
x = np.transpose(np.array(x))
if len(x.shape)==1:
x.shape = (1,x.shape[0])
elif len(x.shape)==2 and x.shape[1]<x.shape[0]:
warnings.warn('2D hist should be nsamples x nvariables; '
'this looks transposed')
except ValueError:
# multiple hist with data of different length
if iterable(x[0]) and not is_string_like(x[0]):
tx = []
for i in xrange(len(x)):
tx.append( np.array(x[i]) )
x = tx
else:
raise ValueError, 'Can not use providet data to create a histogram'
# Check whether bins or range are given explicitly. In that
# case do not autoscale axes.
binsgiven = (cbook.iterable(bins) or range != None)
# check the version of the numpy
if np.__version__ < "1.3": # version 1.1 and 1.2
hist_kwargs = dict(range=range,
normed=bool(normed), new=True)
else: # version 1.3 and later, drop new=True
hist_kwargs = dict(range=range,
normed=bool(normed))
n = []
for i in xrange(len(x)):
# this will automatically overwrite bins,
# so that each histogram uses the same bins
m, bins = np.histogram(x[i], bins, **hist_kwargs)
n.append(m)
if cumulative:
slc = slice(None)
if cbook.is_numlike(cumulative) and cumulative < 0:
slc = slice(None,None,-1)
if normed:
n = [(m * np.diff(bins))[slc].cumsum()[slc] for m in n]
else:
n = [m[slc].cumsum()[slc] for m in n]
patches = []
if histtype.startswith('bar'):
totwidth = np.diff(bins)
stacked = False
if rwidth is not None: dr = min(1., max(0., rwidth))
elif len(n)>1: dr = 0.8
else: dr = 1.0
if histtype=='bar':
width = dr*totwidth/len(n)
dw = width
if len(n)>1:
boffset = -0.5*dr*totwidth*(1.-1./len(n))
else:
boffset = 0.0
elif histtype=='barstacked':
width = dr*totwidth
boffset, dw = 0.0, 0.0
stacked = True
else:
raise ValueError, 'invalid histtype: %s' % histtype
if align == 'mid' or align == 'edge':
boffset += 0.5*totwidth
elif align == 'right':
boffset += totwidth
elif align != 'left' and align != 'center':
raise ValueError, 'invalid align: %s' % align
if orientation == 'horizontal':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.barh(bins[:-1]+boffset, m, height=width,
left=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
elif orientation == 'vertical':
for m in n:
color = self._get_lines._get_next_cycle_color()
patch = self.bar(bins[:-1]+boffset, m, width=width,
bottom=bottom, align='center', log=log,
color=color)
patches.append(patch)
if stacked:
if bottom is None: bottom = 0.0
bottom += m
boffset += dw
else:
raise ValueError, 'invalid orientation: %s' % orientation
elif histtype.startswith('step'):
x = np.zeros( 2*len(bins), np.float )
y = np.zeros( 2*len(bins), np.float )
x[0::2], x[1::2] = bins, bins
if align == 'left' or align == 'center':
x -= 0.5*(bins[1]-bins[0])
elif align == 'right':
x += 0.5*(bins[1]-bins[0])
elif align != 'mid' and align != 'edge':
raise ValueError, 'invalid align: %s' % align
if log:
y[0],y[-1] = 1e-100, 1e-100
if orientation == 'horizontal':
self.set_xscale('log')
elif orientation == 'vertical':
self.set_yscale('log')
fill = False
if histtype == 'stepfilled':
fill = True
elif histtype != 'step':
raise ValueError, 'invalid histtype: %s' % histtype
for m in n:
y[1:-1:2], y[2::2] = m, m
if orientation == 'horizontal':
x,y = y,x
elif orientation != 'vertical':
raise ValueError, 'invalid orientation: %s' % orientation
color = self._get_lines._get_next_cycle_color()
if fill:
patches.append( self.fill(x, y,
closed=False, facecolor=color) )
else:
patches.append( self.fill(x, y,
closed=False, edgecolor=color, fill=False) )
# adopted from adjust_x/ylim part of the bar method
if orientation == 'horizontal':
xmin, xmax = 0, self.dataLim.intervalx[1]
for m in n:
xmin = np.amin(m[m!=0]) # filter out the 0 height bins
xmin = max(xmin*0.9, 1e-100)
self.dataLim.intervalx = (xmin, xmax)
elif orientation == 'vertical':
ymin, ymax = 0, self.dataLim.intervaly[1]
for m in n:
ymin = np.amin(m[m!=0]) # filter out the 0 height bins
ymin = max(ymin*0.9, 1e-100)
self.dataLim.intervaly = (ymin, ymax)
self.autoscale_view()
else:
raise ValueError, 'invalid histtype: %s' % histtype
label = kwargs.pop('label', '')
for patch in patches:
for p in patch:
p.update(kwargs)
p.set_label(label)
label = '_nolegend_'
if binsgiven:
self.set_autoscale_on(False)
if orientation == 'vertical':
self.autoscale_view(scalex=False, scaley=True)
XL = self.xaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_xbound(XL)
else:
self.autoscale_view(scalex=True, scaley=False)
YL = self.yaxis.get_major_locator().view_limits(bins[0], bins[-1])
self.set_ybound(YL)
if len(n)==1:
return n[0], bins, cbook.silent_list('Patch', patches[0])
else:
return n, bins, cbook.silent_list('Lists of Patches', patches)
hist.__doc__ = cbook.dedent(hist.__doc__) % martist.kwdocd
def psd(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
psd(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The power spectral density by Welch's average periodogram
method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute *Pxx*, with a
scaling to correct for power loss due to windowing. *Fs* is the
sampling frequency.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
Returns the tuple (*Pxx*, *freqs*).
For plotting, the power is plotted as
:math:`10\log_{10}(P_{xx})` for decibels, though *Pxx* itself
is returned.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D` properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/psd_demo.py
"""
if not self._hold: self.cla()
pxx, freqs = mlab.psd(x, NFFT, Fs, detrend, window, noverlap, pad_to,
sides, scale_by_freq)
pxx.shape = len(freqs),
freqs += Fc
if scale_by_freq in (None, True):
psd_units = 'dB/Hz'
else:
psd_units = 'dB'
self.plot(freqs, 10*np.log10(pxx), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Power Spectral Density (%s)' % psd_units)
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
logi = int(np.log10(intv))
if logi==0: logi=.1
step = 10*logi
#print vmin, vmax, step, intv, math.floor(vmin), math.ceil(vmax)+1
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxx, freqs
psd_doc_dict = dict()
psd_doc_dict.update(martist.kwdocd)
psd_doc_dict.update(mlab.kwdocd)
psd_doc_dict['PSD'] = cbook.dedent(psd_doc_dict['PSD'])
psd.__doc__ = cbook.dedent(psd.__doc__) % psd_doc_dict
def csd(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
csd(x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
Returns the tuple (*Pxy*, *freqs*). *P* is the cross spectrum
(complex valued), and :math:`10\log_{10}|P_{xy}|` is
plotted.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
References:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the Line2D properties:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/csd_demo.py
.. seealso:
:meth:`psd`
For a description of the optional parameters.
"""
if not self._hold: self.cla()
pxy, freqs = mlab.csd(x, y, NFFT, Fs, detrend, window, noverlap,
pad_to, sides, scale_by_freq)
pxy.shape = len(freqs),
# pxy is complex
freqs += Fc
self.plot(freqs, 10*np.log10(np.absolute(pxy)), **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Cross Spectrum Magnitude (dB)')
self.grid(True)
vmin, vmax = self.viewLim.intervaly
intv = vmax-vmin
step = 10*int(np.log10(intv))
ticks = np.arange(math.floor(vmin), math.ceil(vmax)+1, step)
self.set_yticks(ticks)
return pxy, freqs
csd.__doc__ = cbook.dedent(csd.__doc__) % psd_doc_dict
def cohere(self, x, y, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs):
"""
call signature::
cohere(x, y, NFFT=256, Fs=2, Fc=0, detrend = mlab.detrend_none,
window = mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None, **kwargs)
cohere the coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the x extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
The return value is a tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector.
kwargs are applied to the lines.
References:
* Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
kwargs control the :class:`~matplotlib.lines.Line2D`
properties of the coherence plot:
%(Line2D)s
**Example:**
.. plot:: mpl_examples/pylab_examples/cohere_demo.py
"""
if not self._hold: self.cla()
cxy, freqs = mlab.cohere(x, y, NFFT, Fs, detrend, window, noverlap,
scale_by_freq)
freqs += Fc
self.plot(freqs, cxy, **kwargs)
self.set_xlabel('Frequency')
self.set_ylabel('Coherence')
self.grid(True)
return cxy, freqs
cohere.__doc__ = cbook.dedent(cohere.__doc__) % psd_doc_dict
def specgram(self, x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None):
"""
call signature::
specgram(x, NFFT=256, Fs=2, Fc=0, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None)
Compute a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the PSD of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
%(PSD)s
*Fc*: integer
The center frequency of *x* (defaults to 0), which offsets
the y extents of the plot to reflect the frequency range used
when a signal is acquired and then filtered and downsampled to
baseband.
*cmap*:
A :class:`matplotlib.cm.Colormap` instance; if *None* use
default determined by rc
*xextent*:
The image extent along the x-axis. xextent = (xmin,xmax)
The default is (0,max(bins)), where bins is the return
value from :func:`mlab.specgram`
Return value is (*Pxx*, *freqs*, *bins*, *im*):
- *bins* are the time points the spectrogram is calculated over
- *freqs* is an array of frequencies
- *Pxx* is a len(times) x len(freqs) array of power
- *im* is a :class:`matplotlib.image.AxesImage` instance
Note: If *x* is real (i.e. non-complex), only the positive
spectrum is shown. If *x* is complex, both positive and
negative parts of the spectrum are shown. This can be
overridden using the *sides* keyword argument.
**Example:**
.. plot:: mpl_examples/pylab_examples/specgram_demo.py
"""
if not self._hold: self.cla()
Pxx, freqs, bins = mlab.specgram(x, NFFT, Fs, detrend,
window, noverlap, pad_to, sides, scale_by_freq)
Z = 10. * np.log10(Pxx)
Z = np.flipud(Z)
if xextent is None: xextent = 0, np.amax(bins)
xmin, xmax = xextent
freqs += Fc
extent = xmin, xmax, freqs[0], freqs[-1]
im = self.imshow(Z, cmap, extent=extent)
self.axis('auto')
return Pxx, freqs, bins, im
specgram.__doc__ = cbook.dedent(specgram.__doc__) % psd_doc_dict
del psd_doc_dict #So that this does not become an Axes attribute
def spy(self, Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs):
"""
call signature::
spy(Z, precision=0, marker=None, markersize=None,
aspect='equal', **kwargs)
``spy(Z)`` plots the sparsity pattern of the 2-D array *Z*.
If *precision* is 0, any non-zero value will be plotted;
else, values of :math:`|Z| > precision` will be plotted.
For :class:`scipy.sparse.spmatrix` instances, there is a
special case: if *precision* is 'present', any value present in
the array will be plotted, even if it is identically zero.
The array will be plotted as it would be printed, with
the first index (row) increasing down and the second
index (column) increasing to the right.
By default aspect is 'equal', so that each array element
occupies a square space; set the aspect kwarg to 'auto'
to allow the plot to fill the plot box, or to any scalar
number to specify the aspect ratio of an array element
directly.
Two plotting styles are available: image or marker. Both
are available for full arrays, but only the marker style
works for :class:`scipy.sparse.spmatrix` instances.
If *marker* and *markersize* are *None*, an image will be
returned and any remaining kwargs are passed to
:func:`~matplotlib.pyplot.imshow`; else, a
:class:`~matplotlib.lines.Line2D` object will be returned with
the value of marker determining the marker type, and any
remaining kwargs passed to the
:meth:`~matplotlib.axes.Axes.plot` method.
If *marker* and *markersize* are *None*, useful kwargs include:
* *cmap*
* *alpha*
.. seealso::
:func:`~matplotlib.pyplot.imshow`
For controlling colors, e.g. cyan background and red marks,
use::
cmap = mcolors.ListedColormap(['c','r'])
If *marker* or *markersize* is not *None*, useful kwargs include:
* *marker*
* *markersize*
* *color*
Useful values for *marker* include:
* 's' square (default)
* 'o' circle
* '.' point
* ',' pixel
.. seealso::
:func:`~matplotlib.pyplot.plot`
"""
if precision is None:
precision = 0
warnings.DeprecationWarning("Use precision=0 instead of None")
# 2008/10/03
if marker is None and markersize is None and hasattr(Z, 'tocoo'):
marker = 's'
if marker is None and markersize is None:
Z = np.asarray(Z)
mask = np.absolute(Z)>precision
if 'cmap' not in kwargs:
kwargs['cmap'] = mcolors.ListedColormap(['w', 'k'],
name='binary')
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
ret = self.imshow(mask, interpolation='nearest', aspect=aspect,
extent=extent, origin='upper', **kwargs)
else:
if hasattr(Z, 'tocoo'):
c = Z.tocoo()
if precision == 'present':
y = c.row
x = c.col
else:
nonzero = np.absolute(c.data) > precision
y = c.row[nonzero]
x = c.col[nonzero]
else:
Z = np.asarray(Z)
nonzero = np.absolute(Z)>precision
y, x = np.nonzero(nonzero)
if marker is None: marker = 's'
if markersize is None: markersize = 10
marks = mlines.Line2D(x, y, linestyle='None',
marker=marker, markersize=markersize, **kwargs)
self.add_line(marks)
nr, nc = Z.shape
self.set_xlim(xmin=-0.5, xmax=nc-0.5)
self.set_ylim(ymin=nr-0.5, ymax=-0.5)
self.set_aspect(aspect)
ret = marks
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return ret
def matshow(self, Z, **kwargs):
'''
Plot a matrix or array as an image.
The matrix will be shown the way it would be printed,
with the first row at the top. Row and column numbering
is zero-based.
Argument:
*Z* anything that can be interpreted as a 2-D array
kwargs all are passed to :meth:`~matplotlib.axes.Axes.imshow`.
:meth:`matshow` sets defaults for *extent*, *origin*,
*interpolation*, and *aspect*; use care in overriding the
*extent* and *origin* kwargs, because they interact. (Also,
if you want to change them, you probably should be using
imshow directly in your own version of matshow.)
Returns: an :class:`matplotlib.image.AxesImage` instance.
'''
Z = np.asarray(Z)
nr, nc = Z.shape
extent = [-0.5, nc-0.5, nr-0.5, -0.5]
kw = {'extent': extent,
'origin': 'upper',
'interpolation': 'nearest',
'aspect': 'equal'} # (already the imshow default)
kw.update(kwargs)
im = self.imshow(Z, **kw)
self.title.set_y(1.05)
self.xaxis.tick_top()
self.xaxis.set_ticks_position('both')
self.xaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
self.yaxis.set_major_locator(mticker.MaxNLocator(nbins=9,
steps=[1, 2, 5, 10],
integer=True))
return im
class SubplotBase:
"""
Base class for subplots, which are :class:`Axes` instances with
additional methods to facilitate generating and manipulating a set
of :class:`Axes` within a figure.
"""
def __init__(self, fig, *args, **kwargs):
"""
*fig* is a :class:`matplotlib.figure.Figure` instance.
*args* is the tuple (*numRows*, *numCols*, *plotNum*), where
the array of subplots in the figure has dimensions *numRows*,
*numCols*, and where *plotNum* is the number of the subplot
being created. *plotNum* starts at 1 in the upper left
corner and increases to the right.
If *numRows* <= *numCols* <= *plotNum* < 10, *args* can be the
decimal integer *numRows* * 100 + *numCols* * 10 + *plotNum*.
"""
self.figure = fig
if len(args)==1:
s = str(args[0])
if len(s) != 3:
raise ValueError('Argument to subplot must be a 3 digits long')
rows, cols, num = map(int, s)
elif len(args)==3:
rows, cols, num = args
else:
raise ValueError( 'Illegal argument to subplot')
total = rows*cols
num -= 1 # convert from matlab to python indexing
# ie num in range(0,total)
if num >= total:
raise ValueError( 'Subplot number exceeds total subplots')
self._rows = rows
self._cols = cols
self._num = num
self.update_params()
# _axes_class is set in the subplot_class_factory
self._axes_class.__init__(self, fig, self.figbox, **kwargs)
def get_geometry(self):
'get the subplot geometry, eg 2,2,3'
return self._rows, self._cols, self._num+1
# COVERAGE NOTE: Never used internally or from examples
def change_geometry(self, numrows, numcols, num):
'change subplot geometry, eg. from 1,1,1 to 2,2,3'
self._rows = numrows
self._cols = numcols
self._num = num-1
self.update_params()
self.set_position(self.figbox)
def update_params(self):
'update the subplot position from fig.subplotpars'
rows = self._rows
cols = self._cols
num = self._num
pars = self.figure.subplotpars
left = pars.left
right = pars.right
bottom = pars.bottom
top = pars.top
wspace = pars.wspace
hspace = pars.hspace
totWidth = right-left
totHeight = top-bottom
figH = totHeight/(rows + hspace*(rows-1))
sepH = hspace*figH
figW = totWidth/(cols + wspace*(cols-1))
sepW = wspace*figW
rowNum, colNum = divmod(num, cols)
figBottom = top - (rowNum+1)*figH - rowNum*sepH
figLeft = left + colNum*(figW + sepW)
self.figbox = mtransforms.Bbox.from_bounds(figLeft, figBottom,
figW, figH)
self.rowNum = rowNum
self.colNum = colNum
self.numRows = rows
self.numCols = cols
if 0:
print 'rcn', rows, cols, num
print 'lbrt', left, bottom, right, top
print 'self.figBottom', self.figBottom
print 'self.figLeft', self.figLeft
print 'self.figW', self.figW
print 'self.figH', self.figH
print 'self.rowNum', self.rowNum
print 'self.colNum', self.colNum
print 'self.numRows', self.numRows
print 'self.numCols', self.numCols
def is_first_col(self):
return self.colNum==0
def is_first_row(self):
return self.rowNum==0
def is_last_row(self):
return self.rowNum==self.numRows-1
def is_last_col(self):
return self.colNum==self.numCols-1
# COVERAGE NOTE: Never used internally or from examples
def label_outer(self):
"""
set the visible property on ticklabels so xticklabels are
visible only if the subplot is in the last row and yticklabels
are visible only if the subplot is in the first column
"""
lastrow = self.is_last_row()
firstcol = self.is_first_col()
for label in self.get_xticklabels():
label.set_visible(lastrow)
for label in self.get_yticklabels():
label.set_visible(firstcol)
_subplot_classes = {}
def subplot_class_factory(axes_class=None):
# This makes a new class that inherits from SubclassBase and the
# given axes_class (which is assumed to be a subclass of Axes).
# This is perhaps a little bit roundabout to make a new class on
# the fly like this, but it means that a new Subplot class does
# not have to be created for every type of Axes.
if axes_class is None:
axes_class = Axes
new_class = _subplot_classes.get(axes_class)
if new_class is None:
new_class = new.classobj("%sSubplot" % (axes_class.__name__),
(SubplotBase, axes_class),
{'_axes_class': axes_class})
_subplot_classes[axes_class] = new_class
return new_class
# This is provided for backward compatibility
Subplot = subplot_class_factory()
martist.kwdocd['Axes'] = martist.kwdocd['Subplot'] = martist.kwdoc(Axes)
"""
# this is some discarded code I was using to find the minimum positive
# data point for some log scaling fixes. I realized there was a
# cleaner way to do it, but am keeping this around as an example for
# how to get the data out of the axes. Might want to make something
# like this a method one day, or better yet make get_verts an Artist
# method
minx, maxx = self.get_xlim()
if minx<=0 or maxx<=0:
# find the min pos value in the data
xs = []
for line in self.lines:
xs.extend(line.get_xdata(orig=False))
for patch in self.patches:
xs.extend([x for x,y in patch.get_verts()])
for collection in self.collections:
xs.extend([x for x,y in collection.get_verts()])
posx = [x for x in xs if x>0]
if len(posx):
minx = min(posx)
maxx = max(posx)
# warning, probably breaks inverted axis
self.set_xlim((0.1*minx, maxx))
"""
| gpl-3.0 |
gmsanchez/nmpc_comparison | cstr_startup_colloc.py | 1 | 9778 | # Linear and nonlinear control of startup of a CSTR.
import mpctools as mpc
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import gridspec
import time
# Define some parameters and then the CSTR model.
Nx = 3
Nu = 2
Nd = 1
# Ny = Nx
Delta = .25
# eps = 1e-6 # Use this as a small number.
T0 = 350
c0 = 1
r = .219
k0 = 7.2e10
E = 8750
U = 54.94
rho = 1000
Cp = .239
dH = -5e4
def ode(x,u,d):
# Grab the states, controls, and disturbance. We would like to write
#
# [c, T, h] = x[0:Nx]
# [Tc, F] = u[0:Nu]
# [F0] = d[0:Nd]
#
# but this doesn't work in Casadi 3.0. So, we're stuck with the following:
c = x[0]
T = x[1]
h = x[2]
Tc = u[0]
F = u[1]
F0 = d[0]
# Now create the ODE.
rate = k0*c*np.exp(-E/T)
dxdt = np.array([
F0*(c0 - c)/(np.pi*r**2*h) - rate,
F0*(T0 - T)/(np.pi*r**2*h)
- dH/(rho*Cp)*rate
+ 2*U/(r*rho*Cp)*(Tc - T),
(F0 - F)/(np.pi*r**2)
])
return dxdt
# Turn into casadi function and simulator.
ode_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="ode")
ode_rk4_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],
funcname="ode_rk4",rk4=False,Delta=Delta)
cstr = mpc.DiscreteSimulator(ode, Delta, [Nx,Nu,Nd], ["x","u","d"])
# Steady-state values.
cs = .878
Ts = 324.5
hs = .659
Fs = .1
Tcs = 300
F0s = .1
# Update the steady-state values a few times to make sure they don't move.
for i in range(10):
[cs,Ts,hs] = cstr.sim([cs,Ts,hs],[Tcs,Fs],[F0s]).tolist()
xs = np.array([cs,Ts,hs])
us = np.array([Tcs,Fs])
ds = np.array([F0s])
# Now get a linearization at this steady state.
#ss = mpc.util.getLinearizedModel(ode_casadi, [xs,us,ds], ["A","B","Bp"], Delta)
#A = ss["A"]
#B = ss["B"]
#Bp = ss["Bp"]
#C = np.eye(Nx)
# Weighting matrices for controller.
Q = .5*np.diag(xs**-2)
R = 2*np.diag(us**-2)
# model_casadi = mpc.getCasadiFunc(ode,[Nx,Nu,Nd],["x","u","d"],funcname="cstr")
#[K, Pi] = mpc.util.dlqr(A,B,Q,R)
# Define casadi functions.
Fnonlinear = ode_rk4_casadi
# def measurement(x,d):
# return x
# h = mpc.getCasadiFunc(measurement,[Nx,Nd],["x","d"],funcname="h")
#def linmodel(x,u,d):
# Ax = mpc.mtimes(A,x-xs) + xs
# Bu = mpc.mtimes(B,u-us)
# Bpd = mpc.mtimes(Bp,d-ds)
# return Ax + Bu + Bpd
#Flinear = mpc.getCasadiFunc(linmodel,[Nx,Nu,Nd],["x","u","d"],funcname="F")
def stagecost(x,u,xsp,usp,Q,R):
# Return deviation variables.
dx = x - xsp
du = u - usp
# Calculate stage cost.
return mpc.mtimes(dx.T,Q,dx) + mpc.mtimes(du.T,R,du)
largs = ["x","u","x_sp","u_sp","Q","R"]
l = mpc.getCasadiFunc(stagecost,[Nx,Nu,Nx,Nu,(Nx,Nx),(Nu,Nu)],largs,
funcname="l")
def costtogo(x,xsp):
# Deviation variables.
dx = x - xsp
# Calculate cost to go.
return mpc.mtimes(dx.T,10*Q,dx)
Pf = mpc.getCasadiFunc(costtogo,[Nx,Nx],["x","s_xp"],funcname="Pf")
# First see what happens if we try to start up the reactor under no control.
Nsim = 100
x0 = np.array([.05*cs,.75*Ts,.5*hs])
xcl = {}
ucl = {}
xcl["uncont"] = np.zeros((Nsim+1,Nx))
xcl["uncont"][0,:] = x0
ucl["uncont"] = np.tile(us,(Nsim,1))
for t in range(Nsim):
xcl["uncont"][t+1,:] = cstr.sim(xcl["uncont"][t,:],ucl["uncont"][t,:],ds)
# Build a solver for the linear and nonlinear models.
Nt = 15
sp = {"x" : np.tile(xs, (Nt+1,1)), "u" : np.tile(us, (Nt,1))}
#xguesslin = np.zeros((Nt+1,Nx))
#xguesslin[0,:] = x0
#for t in range(Nt):
# xguesslin[t+1,:] = A.dot(xguesslin[t,:] - xs) + xs
#guesslin = {"x" : xguesslin, "u" : np.tile(us,(Nt,1))}
guessnonlin = sp.copy()
# Control bounds.
umax = np.array([.05*Tcs,.15*Fs])
dumax = .2*umax # Maximum for rate-of-change.
bounds = dict(uub=[us + umax],ulb=[us - umax])
ub = {"u" : np.tile(us + umax, (Nt,1)), "Du" : np.tile(dumax, (Nt,1))}
lb = {"u" : np.tile(us - umax, (Nt,1)), "Du" : np.tile(-dumax, (Nt,1))}
N = {"x":Nx, "u":Nu, "p":Nd, "t":Nt, "c":3}
p = np.tile(ds, (Nt,1)) # Parameters for system.
nmpc_commonargs = {
"N" : N,
"Delta": Delta,
"x0" : x0,
"lb" : lb,
"ub" : ub,
"p" : p,
"verbosity" : 0,
"Pf" : Pf,
"l" : l,
"sp" : sp,
"uprev" : us,
"funcargs" : {"l" : largs},
"extrapar" : {"Q" : Q, "R" : R}, # In case we want to tune online.
}
solvers = {}
# solvers["lmpc"] = mpc.nmpc(f=Flinear,guess=guesslin,**nmpc_commonargs)
solvers["nmpc"] = mpc.nmpc(f=Fnonlinear,guess=guessnonlin,**nmpc_commonargs)
# Also build steady-state target finders.
contVars = [0,2]
#sstarg_commonargs = {
# "N" : N,
# "lb" : {"u" : np.tile(us - umax, (1,1))},
# "ub" : {"u" : np.tile(us + umax, (1,1))},
# "verbosity" : 0,
## "h" : h,
# "p" : np.array([ds]),
#}
#sstargs = {}
# sstargs["lmpc"] = mpc.sstarg(f=Flinear,**sstarg_commonargs)
# sstargs["nmpc"] = mpc.sstarg(f=Fnonlinear,**sstarg_commonargs)
# Now simulate the process under control.
tcl = {}
for method in solvers.keys():
xcl[method] = np.zeros((Nsim+1,Nx))
xcl[method][0,:] = x0
tcl[method] = np.zeros((Nsim+1,1))
thisx = x0
ucl[method] = np.zeros((Nsim,Nu))
# ysp = np.tile(xs,(Nsim+1,1))
xsp = np.zeros((Nsim+1,Nx))
usp = np.zeros((Nsim,Nu))
# ysp[int(Nsim/3):int(2*Nsim/3),:] = xs*np.array([.85,.75,1.15])
for t in range(Nsim):
# Figure out setpoints.
# if t == 0 or not np.all(ysp[t,:] == ysp[t-1,:]):
# thisysp = ysp[t,:]
# sstargs[method].fixvar("y",0,thisysp[contVars],contVars)
# sstargs[method].guess["u",0] = us
# sstargs[method].guess["x",0] = thisysp
# sstargs[method].guess["y",0] = thisysp
# sstargs[method].solve()
#
# print "%10s %3d: %s" % ("sstarg",t,sstargs[method].stats["status"])
# if sstargs[method].stats["status"] != "Solve_Succeeded":
# print "***Target finder failed!"
# break
#
# xsp[t,:] = np.squeeze(sstargs[method].var["x",0])
# usp[t,:] = np.squeeze(sstargs[method].var["u",0])
#
# solvers[method].par["x_sp"] = [xsp[t,:]]*(Nt + 1)
# solvers[method].par["u_sp"] = [usp[t,:]]*Nt
# Fix initial condition and solve.
t0 = time.time()
solvers[method].fixvar("x",0,thisx)
solvers[method].solve()
print "%10s %3d: %s" % (method,t,solvers[method].stats["status"])
if solvers[method].stats["status"] != "Solve_Succeeded":
print "***Solver failed!"
break
else:
solvers[method].saveguess()
thisu = np.squeeze(solvers[method].var["u"][0])
ucl[method][t,:] = thisu
t1 = time.time()
tcl[method][t] = t1-t0
thisx = cstr.sim(thisx,thisu,ds)
xcl[method][t+1,:] = thisx
# Update previous u.
solvers[method].par["u_prev",0] = ucl[method][t,:]
# Define plotting function.
def cstrplot(x,u,xsp=None,contVars=[],title=None,colors={},labels={},
markers={},keys=None,bounds=None,ilegend=0):
if keys is None:
keys = x.keys()
for k in keys:
u[k] = np.concatenate((u[k],u[k][-1:,:]))
ylabelsx = ["$c$ (mol/L)", "$T$ (K)", "$h$ (m)"]
ylabelsu = ["$T_c$ (K)", "$F$ (kL/min)"]
gs = gridspec.GridSpec(Nx*Nu,2)
fig = plt.figure(figsize=(10,6),facecolor="none")
leglines = []
leglabels = []
for i in range(Nx):
ax = fig.add_subplot(gs[i*Nu:(i+1)*Nu,0])
for k in keys:
t = np.arange(0,x[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k),
"marker":markers.get(k,"")}
[line] = ax.plot(t,x[k][:,i],markeredgecolor="none",**args)
if i == ilegend:
leglines.append(line)
leglabels.append(args["label"])
if i in contVars and xsp is not None:
ax.step(t,xsp[:,i],linestyle="--",color="black",where="post")
ax.set_ylabel(ylabelsx[i])
mpc.plots.zoomaxis(ax,yscale=1.1)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
for i in range(Nu):
ax = fig.add_subplot(gs[i*Nx:(i+1)*Nx,1])
for k in keys:
t = np.arange(0,u[k].shape[0])*Delta
args = {"color":colors.get(k,"black"), "label":labels.get(k,k)}
ax.step(t,u[k][:,i],where="post",**args)
if bounds is not None:
for b in set(["uub", "ulb"]).intersection(bounds.keys()):
ax.plot(np.array([t[0],t[-1]]),np.ones((2,))*bounds[b][i],
'--k')
ax.set_ylabel(ylabelsu[i])
mpc.plots.zoomaxis(ax,yscale=1.25)
mpc.plots.prettyaxesbox(ax)
mpc.plots.prettyaxesbox(ax,
facecolor="white",front=False)
ax.set_xlabel("Time (min)")
fig.legend(leglines,leglabels,loc="lower center",ncol=len(keys))
fig.tight_layout(pad=.5,rect=(0,.075,1,1))
if title is not None:
fig.canvas.set_window_title(title)
return fig
x = xcl['nmpc']
u = ucl['nmpc']
ptimes = tcl['nmpc']
# Make plots.
keys = ["uncont", "nmpc"]
colors = {"lmpc":"blue", "nmpc":"green", "uncont":"red"}
labels = {"lmpc":"LMPC", "nmpc":"NMPC", "uncont":"Uncontrolled"}
markers = {"lmpc":"s", "nmpc":"o", "uncont":"^"}
plotbounds = dict([(k,bounds[k][0]) for k in ["ulb","uub"]])
fig = cstrplot(xcl, ucl, colors=colors, contVars=contVars, labels=labels,
keys=keys, markers={}, bounds=plotbounds, ilegend=2)
fig.show()
# mpc.plots.showandsave(fig,"cstr_startup.pdf",facecolor="none")
| gpl-3.0 |
fzalkow/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | examples/decomposition/plot_image_denoising.py | 70 | 6249 | """
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of a raccoon face image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
from sklearn.utils.testing import SkipTest
from sklearn.utils.fixes import sp_version
if sp_version < (0, 12):
raise SkipTest("Skipping because SciPy version earlier than 0.12.0 and "
"thus does not include the scipy.misc.face() image.")
###############################################################################
try:
from scipy import misc
face = misc.face(gray=True)
except AttributeError:
# Old versions of scipy have face in the top level package
face = sp.face(gray=True)
# Convert from uint8 representation with values between 0 and 255 to
# a floating point representation with values between 0 and 1.
face = face / 255
# downsample for higher speed
face = face[::2, ::2] + face[1::2, ::2] + face[::2, 1::2] + face[1::2, 1::2]
face /= 4.0
height, width = face.shape
# Distort the right half of the image
print('Distorting image...')
distorted = face.copy()
distorted[:, width // 2:] += 0.075 * np.random.randn(height, width // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :width // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from face patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, face, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, width // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = face.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, width // 2:] = reconstruct_from_patches_2d(
patches, (height, width // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], face,
title + ' (time: %.1fs)' % dt)
plt.show()
| bsd-3-clause |
hstau/manifold-cryo | fit_1D_open_manifold_3D.py | 1 | 5015 | import numpy as np
import get_fit_1D_open_manifold_3D_param
import solve_d_R_d_tau_p_3D
import a
from scipy.io import loadmat
import matplotlib.pyplot as plt
#import matplotlib.pyplot as plt
'''
function [a,b,tau] = fit_1D_open_manifold_3D(psi)
%
% fit_1D_open_manifold_3D
%
% fit the eigenvectors for a 1D open manifold to the model
% x_ij = a_j cos(j*pi*tau_i) + b_j.
%
% j goes from 1 to 3 (this is only for 3D systems).
%
% i goes from 1 to nS where nS is the number of data points to be fitted.
%
% For a fixed set of a_j and b_j, j=1:3, tau_i for i=1:nS are
% obtained by putting dR/d(tau_i) to zero.
%
% For a fixed set of tau_i, i=1:nS, a_j and b_j for j=1:3 are
% obtained by solving 3 sets of 2x2 linear equations.
%
% Fit parameters and initial set of {\tau} are specified in
%
% get_fit_1D_open_manifold_3D_param.m
%
% copyright (c) Russell Fung 2014
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
global p nDim a b x x_fit
'''
'''
def plot_fitted_curve(hFig):
global x x_fit
h = plt.figure(hFig)
hsp = plt.subplot(2,2,1)
plot3(x(:,1),x(:,2),x(:,3),'b.','lineWidth',1);
hold on
plot3(x_fit(:,1),x_fit(:,2),x_fit(:,3),'g.','lineWidth',1);
hold off
set(hsp,'lineWidth',2,'fontSize',15);
hsp = subplot(2,2,2);
plotRF(hsp,x(:,1),x(:,2),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,2),'g.');
hsp = subplot(2,2,3);
plotRF(hsp,x(:,1),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,1),x_fit(:,3),'g.');
hsp = subplot(2,2,4);
plotRF(hsp,x(:,2),x(:,3),'','','','b.');
addplotRF(hsp,x_fit(:,2),x_fit(:,3),'g.');
drawnow
%end
'''
eps = 1e-4
#global maxIter,delta_a_max, delta_b_max,delta_tau_max,a_b_tau_result
def op(psi):
a.init()
#global p, nDim, a, b, x, x_fit
a.nDim = 3
#tau = get_fit_1D_open_manifold_3D_param
tau = get_fit_1D_open_manifold_3D_param.op(psi)
aux = np.zeros((tau.shape[0],5)) #added
nS = a.x.shape[0]
for iter in xrange(1,a.maxIter+1):
string ='iteration ' + str(iter)
print string
'''
#%%%%%%%%%%%%%%%%%%%%%
#% solve for a and b %
#%%%%%%%%%%%%%%%%%%%%%
'''
a_old = a.a
b_old = a.b
j_pi_tau = np.dot(tau,np.pi*np.array([[1,2,3]]))
cos_j_pi_tau = np.cos(j_pi_tau)
A11 = np.sum(cos_j_pi_tau**2, axis=0)
A12 = np.sum(cos_j_pi_tau, axis=0)
A21 = A12
A22 = nS
x_cos_j_pi_tau = a.x*cos_j_pi_tau
b1 = np.sum(x_cos_j_pi_tau, axis=0)
b2 = np.sum(a.x, axis=0)
coeff = np.zeros((2,3))
for qq in xrange(3):
A = np.array([[A11[qq],A12[qq]],[A21[qq], A22]])
b = np.array([b1[qq], b2[qq]])
coeff[:,qq] = np.linalg.solve(A,b)
a.a = coeff[0,:]
a.b = coeff[1,:]
'''
%%%%%%%%%%%%%%%%%%%%%%%%%
#% plot the fitted curve %
%%%%%%%%%%%%%%%%%%%%%%%%%
'''
j_pi_tau = np.dot(np.linspace(0,1,1000).reshape(-1,1),np.array([[1,2,3]]))*np.pi
cos_j_pi_tau = np.cos(j_pi_tau)
tmp = a.a*cos_j_pi_tau
a.x_fit = tmp + a.b
#%plot_fitted_curve(iter)
'''
%%%%%%%%%%%%%%%%%
#% solve for tau %
%%%%%%%%%%%%%%%%%
'''
tau_old = tau
for a.p in xrange(nS):
tau[a.p],beta = solve_d_R_d_tau_p_3D.op() #added
for kk in xrange(beta.shape[0]):
aux[a.p,kk] = beta[kk]
'''
if iter == 0:
data = loadmat('aux0.mat') # (this is for < v7.3
elif iter == 1:
data = loadmat('aux1.mat') # (this is for < v7.3
else:
data = loadmat('aux2.mat') # (this is for < v7.3
imaux = data['aux']
plt.subplot(2, 2, 1)
plt.imshow(aux, cmap=plt.get_cmap('gray'),aspect=0.1)
plt.title('aux')
plt.subplot(2, 2, 2)
plt.imshow(imaux, cmap=plt.get_cmap('gray'), aspect=0.1)
plt.title('imaux')
plt.show()
'''
'''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#% calculate the changes in fitting parameters %
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
delta_a = np.fabs(a.a-a_old)/(np.fabs(a.a)+eps)
delta_b = np.fabs(a.b-b_old)/(np.fabs(a.b)+eps)
delta_tau = np.fabs(tau-tau_old)
delta_a = max(delta_a)*100
delta_b = max(delta_b)*100
delta_tau = max(delta_tau)
print ' changes in fitting parameters: \n'
string = ' amplitudes: '+ str(delta_a) + '\n' + \
' offsets: ' + str(delta_b) + ' \n' +\
' values of tau: ' + str(delta_tau) + ' \n'
print string
if (delta_a<a.delta_a_max) and (delta_b < a.delta_b_max) and (delta_tau < a.delta_tau_max):
break
return (a.a,a.b,tau)
| gpl-2.0 |
jmcarp/osf.io | scripts/analytics/addons.py | 21 | 2200 | # -*- coding: utf-8 -*-
import os
import re
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from website.app import init_app
from .utils import plot_dates, oid_to_datetime, mkdirp
log_collection = database['nodelog']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'addons')
mkdirp(FIG_PATH)
ADDONS = [
'box',
'dataverse',
'dropbox',
'figshare',
'github',
'googledrive',
'mendeley',
's3',
'zotero',
]
def get_collection_datetimes(collection, _id='_id', query=None):
query = query or {}
return [
oid_to_datetime(record[_id])
for record in collection.find({}, {_id: True})
]
def analyze_model(model):
dates = get_collection_datetimes(model._storage[0].store)
return {
'dates': dates,
'count': len(dates),
}
def analyze_addon_installs(name):
config = settings.ADDONS_AVAILABLE_DICT[name]
results = {
key: analyze_model(model)
for key, model in config.settings_models.iteritems()
}
return results
def analyze_addon_logs(name):
pattern = re.compile('^{0}'.format(name), re.I)
logs = log_collection.find({'action': {'$regex': pattern}}, {'date': True})
return [
record['date']
for record in logs
]
def analyze_addon(name):
installs = analyze_addon_installs(name)
for model, result in installs.iteritems():
if not result['dates']:
continue
fig = plot_dates(result['dates'])
plt.title('{} configurations: {} ({} total)'.format(name, model, len(result['dates'])))
plt.savefig(os.path.join(FIG_PATH, '{}-installs-{}.png'.format(name, model)))
plt.close()
log_dates = analyze_addon_logs(name)
if not log_dates:
return
fig = plot_dates(log_dates)
plt.title('{} actions ({} total)'.format(name, len(log_dates)))
plt.savefig(os.path.join(FIG_PATH, '{}-actions.png'.format(name)))
plt.close()
def main():
init_app(routes=False)
for addon in ADDONS:
if addon in settings.ADDONS_AVAILABLE_DICT:
analyze_addon(addon)
if __name__ == '__main__':
main()
| apache-2.0 |
wanglei828/apollo | modules/tools/plot_planning/speed_dsteering_data.py | 1 | 3396 | #!/usr/bin/env python
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import sys
from record_reader import RecordItemReader
import matplotlib.pyplot as plt
from cyber_py.record import RecordReader
from modules.canbus.proto import chassis_pb2
class SpeedDsteeringData:
def __init__(self):
self.last_steering_percentage = None
self.last_speed_mps = None
self.last_timestamp_sec = None
self.speed_data = []
self.d_steering_data = []
def add(self, chassis):
steering_percentage = chassis.steering_percentage
speed_mps = chassis.speed_mps
timestamp_sec = chassis.header.timestamp_sec
if self.last_timestamp_sec is None:
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
return
if (timestamp_sec - self.last_timestamp_sec) > 0.02:
d_steering = (steering_percentage - self.last_steering_percentage) \
/ (timestamp_sec - self.last_timestamp_sec)
self.speed_data.append(speed_mps)
self.d_steering_data.append(d_steering)
self.last_steering_percentage = steering_percentage
self.last_speed_mps = speed_mps
self.last_timestamp_sec = timestamp_sec
def get_speed_dsteering(self):
return self.speed_data, self.d_steering_data
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
folders = sys.argv[1:]
fig, ax = plt.subplots()
colors = ["g", "b", "r", "m", "y"]
markers = ["o", "o", "o", "o"]
for i in range(len(folders)):
folder = folders[i]
color = colors[i % len(colors)]
marker = markers[i % len(markers)]
fns = [f for f in listdir(folder) if isfile(join(folder, f))]
for fn in fns:
reader = RecordItemReader(folder+"/"+fn)
processor = SpeedDsteeringData()
last_pose_data = None
last_chassis_data = None
topics = ["/apollo/localization/pose", "/apollo/canbus/chassis"]
for data in reader.read(topics):
if "chassis" in data:
last_chassis_data = data["chassis"]
if last_chassis_data is not None:
processor.add(last_chassis_data)
#last_pose_data = None
#last_chassis_data = None
data_x, data_y = processor.get_speed_dsteering()
ax.scatter(data_x, data_y, c=color, marker=marker, alpha=0.2)
plt.show()
| apache-2.0 |
rmcgibbo/scipy | scipy/stats/_discrete_distns.py | 15 | 20781 | #
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
from __future__ import division, print_function, absolute_import
from scipy import special
from scipy.special import entr, gammaln as gamln
from numpy import floor, ceil, log, exp, sqrt, log1p, expm1, tanh, cosh, sinh
import numpy as np
from ._distn_infrastructure import (
rv_discrete, _lazywhere, _ncx2_pdf, _ncx2_cdf, get_distribution_names)
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n, k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0, 1,..., n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.binomial(n, p, self._size)
def _argcheck(self, n, p):
self.b = n
return (n >= 0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) + gamln(n-k+1)))
return combiln + special.xlogy(k, p) + special.xlog1py(n-k, -p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k, n, p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q, n, p))
vals1 = np.maximum(vals - 1, 0)
temp = special.bdtr(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p, moments='mv'):
q = 1.0 - p
mu = n * p
var = n * p * q
g1, g2 = None, None
if 's' in moments:
g1 = (q - p) / sqrt(var)
if 'k' in moments:
g2 = (1.0 - 6*p*q) / var
return mu, var, g1, g2
def _entropy(self, n, p):
k = np.r_[0:n + 1]
vals = self._pmf(k, n, p)
return np.sum(entr(vals), axis=0)
binom = binom_gen(name='binom')
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0, 1}``.
`bernoulli` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return binom_gen._rvs(self, 1, p)
def _argcheck(self, p):
return (p >= 0) & (p <= 1)
def _logpmf(self, x, p):
return binom._logpmf(x, 1, p)
def _pmf(self, x, p):
return binom._pmf(x, 1, p)
def _cdf(self, x, p):
return binom._cdf(x, 1, p)
def _sf(self, x, p):
return binom._sf(x, 1, p)
def _ppf(self, q, p):
return binom._ppf(q, 1, p)
def _stats(self, p):
return binom._stats(1, p)
def _entropy(self, p):
return entr(p) + entr(1-p)
bernoulli = bernoulli_gen(b=1, name='bernoulli')
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, n, p):
return self._random_state.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n > 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + special.xlog1py(x, -p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
# skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k, n, p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q, n, p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, n, p)
return np.where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom')
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
return self._random_state.geometric(p, size=self._size)
def _argcheck(self, p):
return (p <= 1) & (p >= 0)
def _pmf(self, k, p):
return np.power(1-p, k-1) * p
def _logpmf(self, k, p):
return special.xlog1py(k - 1, -p) + log(p)
def _cdf(self, x, p):
k = floor(x)
return -expm1(log1p(-p)*k)
def _sf(self, x, p):
return np.exp(self._logsf(x, p))
def _logsf(self, x, p):
k = floor(x)
return k*log1p(-p)
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = self._cdf(vals-1, p)
return np.where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = np.polyval([1, -6, 6], p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1, name='geom', longname="A geometric")
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for max(0, N - (M-n)) <= k <= min(n, N)
%(after_notes)s
Examples
--------
>>> from scipy.stats import hypergeom
>>> import matplotlib.pyplot as plt
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return self._random_state.hypergeometric(n, M-n, N, size=self._size)
def _argcheck(self, M, n, N):
cond = (M > 0) & (n >= 0) & (N >= 0)
cond &= (n <= M) & (N <= M)
self.a = max(N-(M-n), 0)
self.b = min(n, N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
# same as the following but numerically more precise
# return comb(good, k) * comb(bad, N-k) / comb(tot, N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
# tot, good, sample_size = M, n, N
# "wikipedia".replace('N', 'M').replace('n', 'N').replace('K', 'n')
M, n, N = 1.*M, 1.*n, 1.*N
m = M - n
p = n/M
mu = N*p
var = m*n*N*(M - N)*1.0/(M*M*(M-1))
g1 = (m - n)*(M-2*N) / (M-2.0) * sqrt((M-1.0) / (m*n*N*(M-N)))
g2 = M*(M+1) - 6.*N*(M-N) - 6.*n*m
g2 *= (M-1)*M*M
g2 += 6.*n*N*(M-N)*m*(5.*M-6)
g2 /= n * N * (M-N) * m * (M-2.) * (M-3.)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = np.r_[N - (M - n):min(n, N) + 1]
vals = self.pmf(k, M, n, N)
return np.sum(entr(vals), axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual
# integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom')
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, p):
# looks wrong for p>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return self._random_state.logseries(p, size=self._size)
def _argcheck(self, p):
return (p > 0) & (p < 1)
def _pmf(self, k, p):
return -np.power(p, k) * 1.0 / k / log(1 - p)
def _stats(self, p):
r = log(1 - p)
mu = p / (p - 1.0) / r
mu2p = -p / r / (p - 1.0)**2
var = mu2p - mu*mu
mu3p = -p / r * (1.0+p) / (1.0 - p)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / np.power(var, 1.5)
mu4p = -p / r * (
1.0 / (p-1)**2 - 6*p / (p - 1)**3 + 6*p*p / (p-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1, name='logser', longname='A logarithmic')
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu):
return self._random_state.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k, mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k, mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q, mu))
vals1 = np.maximum(vals - 1, 0)
temp = special.pdtr(vals1, mu)
return np.where(temp >= q, vals1, vals)
def _stats(self, mu):
var = mu
tmp = np.asarray(mu)
g1 = sqrt(1.0 / tmp)
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson')
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda_))*exp(-lambda_*k)
for ``k*lambda_ >= 0``.
`planck` takes ``lambda_`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = np.inf
return 1
elif (lambda_ < 0):
self.a = -np.inf
self.b = 0
return 1
else:
return 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck', longname='A discrete exponential ')
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda_)*exp(-lambda_*k)/(1-exp(-lambda_*N))
for ``k = 0,..., N-1``.
`boltzmann` takes ``lambda_`` and ``N`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return np.where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',
longname='A truncated discrete exponential ')
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(high - low)
for ``k = low, ..., high - 1``.
`randint` takes ``low`` and ``high`` as shape parameters.
Note the difference to the numpy ``random_integers`` which
returns integers on a *closed* interval ``[low, high]``.
%(after_notes)s
%(example)s
"""
def _argcheck(self, low, high):
self.a = low
self.b = high - 1
return (high > low)
def _pmf(self, k, low, high):
p = np.ones_like(k) / (high - low)
return np.where((k >= low) & (k < high), p, 0.)
def _cdf(self, x, low, high):
k = floor(x)
return (k - low + 1.) / (high - low)
def _ppf(self, q, low, high):
vals = ceil(q * (high - low) + low) - 1
vals1 = (vals - 1).clip(low, high)
temp = self._cdf(vals1, low, high)
return np.where(temp >= q, vals1, vals)
def _stats(self, low, high):
m2, m1 = np.asarray(high), np.asarray(low)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d*d - 1) / 12.0
g1 = 0.0
g2 = -6.0/5.0 * (d*d + 1.0) / (d*d - 1.0)
return mu, var, g1, g2
def _rvs(self, low, high=None):
"""An array of *size* random integers >= ``low`` and < ``high``.
If ``high`` is ``None``, then range is >=0 and < low
"""
return self._random_state.randint(low, high, self._size)
def _entropy(self, low, high):
return log(high - low)
randint = randint_gen(name='randint', longname='A discrete uniform '
'(random integer)')
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k, a) = 1/(zeta(a) * k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _rvs(self, a):
return self._random_state.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / special.zeta(a, 1) / k**a
return Pk
def _munp(self, n, a):
return _lazywhere(
a > n + 1, (a, n),
lambda a, n: special.zeta(a - n, 1) / special.zeta(a, 1),
np.inf)
zipf = zipf_gen(a=1, name='zipf', longname='A Zipf')
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a > 0``.
`dlaplace` takes ``a`` as shape parameter.
%(after_notes)s
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0) * exp(-a * abs(k))
def _cdf(self, x, a):
k = floor(x)
f = lambda k, a: 1.0 - exp(-a * k) / (exp(a) + 1)
f2 = lambda k, a: exp(a * (k+1)) / (exp(a) + 1)
return _lazywhere(k >= 0, (k, a), f=f, f2=f2)
def _ppf(self, q, a):
const = 1 + exp(a)
vals = ceil(np.where(q < 1.0 / (1 + exp(-a)), log(q*const) / a - 1,
-log((1-q) * const) / a))
vals1 = vals - 1
return np.where(self._cdf(vals1, a) >= q, vals1, vals)
def _stats(self, a):
ea = exp(a)
mu2 = 2.*ea/(ea-1.)**2
mu4 = 2.*ea*(ea**2+10.*ea+1.) / (ea-1.)**4
return 0., mu2, 0., mu4/mu2**2 - 3.
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-np.inf,
name='dlaplace', longname='A discrete Laplacian')
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(after_notes)s
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return (self._random_state.poisson(mu1, n) -
self._random_state.poisson(mu2, n))
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0,
_ncx2_pdf(2*mu2, 2*(1-x), 2*mu1)*2,
_ncx2_pdf(2*mu1, 2*(1+x), 2*mu2)*2)
# ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = floor(x)
px = np.where(x < 0,
_ncx2_cdf(2*mu2, -2*x, 2*mu1),
1-_ncx2_cdf(2*mu1, 2*(x+1), 2*mu2))
return px
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam')
# Collect names of classes and objects in this module.
pairs = list(globals().items())
_distn_names, _distn_gen_names = get_distribution_names(pairs, rv_discrete)
__all__ = _distn_names + _distn_gen_names
| bsd-3-clause |
Arn-O/kadenze-deep-creative-apps | session-5/libs/inception.py | 13 | 4890 | """
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import os
import numpy as np
from tensorflow.python.platform import gfile
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import resize as imresize
from .utils import download_and_extract_tar, download_and_extract_zip
def inception_download(data_dir='inception', version='v5'):
"""Download a pretrained inception network.
Parameters
----------
data_dir : str, optional
Location of the pretrained inception network download.
version : str, optional
Version of the model: ['v3'] or 'v5'.
"""
if version == 'v3':
download_and_extract_tar(
'https://s3.amazonaws.com/cadl/models/inception-2015-12-05.tgz',
data_dir)
return (os.path.join(data_dir, 'classify_image_graph_def.pb'),
os.path.join(data_dir, 'imagenet_synset_to_human_label_map.txt'))
else:
download_and_extract_zip(
'https://s3.amazonaws.com/cadl/models/inception5h.zip', data_dir)
return (os.path.join(data_dir, 'tensorflow_inception_graph.pb'),
os.path.join(data_dir, 'imagenet_comp_graph_label_strings.txt'))
def get_inception_model(data_dir='inception', version='v5'):
"""Get a pretrained inception network.
Parameters
----------
data_dir : str, optional
Location of the pretrained inception network download.
version : str, optional
Version of the model: ['v3'] or 'v5'.
Returns
-------
net : dict
{'graph_def': graph_def, 'labels': synsets}
where the graph_def is a tf.GraphDef and the synsets
map an integer label from 0-1000 to a list of names
"""
# Download the trained net
model, labels = inception_download(data_dir, version)
# Parse the ids and synsets
txt = open(labels).readlines()
synsets = [(key, val.strip()) for key, val in enumerate(txt)]
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/issues/582')
return {
'graph_def': graph_def,
'labels': synsets,
'preprocess': preprocess,
'deprocess': deprocess
}
def preprocess(img, crop=True, resize=True, dsize=(299, 299)):
if img.dtype != np.uint8:
img *= 255.0
if crop:
crop = np.min(img.shape[:2])
r = (img.shape[0] - crop) // 2
c = (img.shape[1] - crop) // 2
cropped = img[r: r + crop, c: c + crop]
else:
cropped = img
if resize:
rsz = imresize(cropped, dsize, preserve_range=True)
else:
rsz = cropped
if rsz.ndim == 2:
rsz = rsz[..., np.newaxis]
rsz = rsz.astype(np.float32)
# subtract imagenet mean
return (rsz - 117)
def deprocess(img):
return np.clip(img + 117, 0, 255).astype(np.uint8)
def test_inception():
"""Loads the inception network and applies it to a test image.
"""
with tf.Session() as sess:
net = get_inception_model()
tf.import_graph_def(net['graph_def'], name='inception')
g = tf.get_default_graph()
names = [op.name for op in g.get_operations()]
x = g.get_tensor_by_name(names[0] + ':0')
softmax = g.get_tensor_by_name(names[-3] + ':0')
from skimage import data
img = preprocess(data.coffee())[np.newaxis]
res = np.squeeze(softmax.eval(feed_dict={x: img}))
print([(res[idx], net['labels'][idx])
for idx in res.argsort()[-5:][::-1]])
"""Let's visualize the network's gradient activation
when backpropagated to the original input image. This
is effectively telling us which pixels contribute to the
predicted class or given neuron"""
pools = [name for name in names if 'pool' in name.split('/')[-1]]
fig, axs = plt.subplots(1, len(pools))
for pool_i, poolname in enumerate(pools):
pool = g.get_tensor_by_name(poolname + ':0')
pool.get_shape()
neuron = tf.reduce_max(pool, 1)
saliency = tf.gradients(neuron, x)
neuron_idx = tf.arg_max(pool, 1)
this_res = sess.run([saliency[0], neuron_idx],
feed_dict={x: img})
grad = this_res[0][0] / np.max(np.abs(this_res[0]))
axs[pool_i].imshow((grad * 128 + 128).astype(np.uint8))
axs[pool_i].set_title(poolname)
| apache-2.0 |
DailyActie/Surrogate-Model | 01-codes/deap-master/examples/coev/coop_evol.py | 1 | 6195 | # This file is part of DEAP.
#
# DEAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# DEAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with DEAP. If not, see <http://www.gnu.org/licenses/>.
"""This example contains the evolving test from *Potter, M. and De Jong, K.,
2001, Cooperative Coevolution: An Architecture for Evolving Co-adapted
Subcomponents.* section 4.2.4. The number of species is evolved by adding and
removing species as stagnation occurs.
"""
import random
try:
import matplotlib.pyplot as plt
plt.figure()
except:
plt = False
import numpy
from deap import algorithms
from deap import tools
import coop_base
IND_SIZE = coop_base.IND_SIZE
SPECIES_SIZE = coop_base.SPECIES_SIZE
NUM_SPECIES = 1
TARGET_SIZE = 30
IMPROVMENT_TRESHOLD = 0.5
IMPROVMENT_LENGTH = 5
EXTINCTION_TRESHOLD = 5.0
noise = "*##*###*###*****##*##****#*##*###*#****##******##*#**#*#**######"
schematas = ("1##1###1###11111##1##1111#1##1###1#1111##111111##1#11#1#11######",
"1##1###1###11111##1##1000#0##0###0#0000##000000##0#00#0#00######",
"0##0###0###00000##0##0000#0##0###0#0000##001111##1#11#1#11######")
toolbox = coop_base.toolbox
toolbox.register("evaluateContribution", coop_base.matchSetContribution)
def main(extended=True, verbose=True):
target_set = []
species = []
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", numpy.mean)
stats.register("std", numpy.std)
stats.register("min", numpy.min)
stats.register("max", numpy.max)
logbook = tools.Logbook()
logbook.header = "gen", "species", "evals", "std", "min", "avg", "max"
ngen = 300
g = 0
for i in range(len(schematas)):
size = int(TARGET_SIZE / len(schematas))
target_set.extend(toolbox.target_set(schematas[i], size))
species = [toolbox.species() for _ in range(NUM_SPECIES)]
species_index = list(range(NUM_SPECIES))
last_index_added = species_index[-1]
# Init with random a representative for each species
representatives = [random.choice(species[i]) for i in range(NUM_SPECIES)]
best_fitness_history = [None] * IMPROVMENT_LENGTH
if plt and extended:
contribs = [[]]
stag_gen = []
collab = []
while g < ngen:
# Initialize a container for the next generation representatives
next_repr = [None] * len(species)
for (i, s), j in zip(enumerate(species), species_index):
# Vary the species individuals
s = algorithms.varAnd(s, toolbox, 0.6, 1.0)
# Get the representatives excluding the current species
r = representatives[:i] + representatives[i + 1:]
for ind in s:
# Evaluate and set the individual fitness
ind.fitness.values = toolbox.evaluate([ind] + r, target_set)
record = stats.compile(s)
logbook.record(gen=g, species=j, evals=len(s), **record)
if verbose:
print(logbook.stream)
# Select the individuals
species[i] = toolbox.select(s, len(s)) # Tournament selection
next_repr[i] = toolbox.get_best(s)[0] # Best selection
if plt and extended:
# Book keeping of the collaborative fitness
collab.append(next_repr[i].fitness.values[0])
g += 1
representatives = next_repr
# Keep representatives fitness for stagnation detection
best_fitness_history.pop(0)
best_fitness_history.append(representatives[0].fitness.values[0])
try:
diff = best_fitness_history[-1] - best_fitness_history[0]
except TypeError:
diff = float("inf")
if plt and extended:
for (i, rep), j in zip(enumerate(representatives), species_index):
contribs[j].append((toolbox.evaluateContribution(representatives,
target_set, i)[0], g - 1))
if diff < IMPROVMENT_TRESHOLD:
if len(species) > 1:
contributions = []
for i in range(len(species)):
contributions.append(toolbox.evaluateContribution(representatives, target_set, i)[0])
for i in reversed(range(len(species))):
if contributions[i] < EXTINCTION_TRESHOLD:
species.pop(i)
species_index.pop(i)
representatives.pop(i)
last_index_added += 1
best_fitness_history = [None] * IMPROVMENT_LENGTH
species.append(toolbox.species())
species_index.append(last_index_added)
representatives.append(random.choice(species[-1]))
if extended and plt:
stag_gen.append(g - 1)
contribs.append([])
if extended:
for r in representatives:
# print final representatives without noise
print("".join(str(x) for x, y in zip(r, noise) if y == "*"))
if extended and plt: # Ploting of the evolution
line1, = plt.plot(collab, "--", color="k")
for con in contribs:
try:
con, g = zip(*con)
line2, = plt.plot(g, con, "-", color="k")
except ValueError:
pass
axis = plt.axis("tight")
for s in stag_gen:
plt.plot([s, s], [0, axis[-1]], "--", color="k")
plt.legend((line1, line2), ("Collaboration", "Contribution"), loc="center right")
plt.xlabel("Generations")
plt.ylabel("Fitness")
plt.show()
if __name__ == "__main__":
main()
| mit |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/calibration/plot_calibration_multiclass.py | 1 | 7780 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
# plt.show()
pltshow(plt)
| mit |
alfonsokim/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/quiver.py | 69 | 36790 | """
Support for plotting vector fields.
Presently this contains Quiver and Barb. Quiver plots an arrow in the
direction of the vector, with the size of the arrow related to the
magnitude of the vector.
Barbs are like quiver in that they point along a vector, but
the magnitude of the vector is given schematically by the presence of barbs
or flags on the barb.
This will also become a home for things such as standard
deviation ellipses, which can and will be derived very easily from
the Quiver code.
"""
import numpy as np
from numpy import ma
import matplotlib.collections as collections
import matplotlib.transforms as transforms
import matplotlib.text as mtext
import matplotlib.artist as martist
import matplotlib.font_manager as font_manager
from matplotlib.cbook import delete_masked_points
from matplotlib.patches import CirclePolygon
import math
_quiver_doc = """
Plot a 2-D field of arrows.
call signatures::
quiver(U, V, **kw)
quiver(U, V, C, **kw)
quiver(X, Y, U, V, **kw)
quiver(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the arrow locations (default is tail of
arrow; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the arrow vectors
*C*:
an optional array used to map colors to the arrows
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*units*: ['width' | 'height' | 'dots' | 'inches' | 'x' | 'y' ]
arrow units; the arrow dimensions *except for length* are in
multiples of this unit.
* 'width' or 'height': the width or height of the axes
* 'dots' or 'inches': pixels or inches, based on the figure dpi
* 'x' or 'y': *X* or *Y* data units
The arrows scale differently depending on the units. For
'x' or 'y', the arrows get larger as one zooms in; for other
units, the arrow size is independent of the zoom state. For
'width or 'height', the arrow size increases with the width and
height of the axes, respectively, when the the window is resized;
for 'dots' or 'inches', resizing does not change the arrows.
*angles*: ['uv' | 'xy' | array]
With the default 'uv', the arrow aspect ratio is 1, so that
if *U*==*V* the angle of the arrow on the plot is 45 degrees
CCW from the *x*-axis.
With 'xy', the arrow points from (x,y) to (x+u, y+v).
Alternatively, arbitrary angles may be specified as an array
of values in degrees, CCW from the *x*-axis.
*scale*: [ None | float ]
data units per arrow unit, e.g. m/s per plot width; a smaller
scale parameter makes the arrow longer. If *None*, a simple
autoscaling algorithm is used, based on the average vector length
and the number of vectors.
*width*:
shaft width in arrow units; default depends on choice of units,
above, and number of vectors; a typical starting value is about
0.005 times the width of the plot.
*headwidth*: scalar
head width as multiple of shaft width, default is 3
*headlength*: scalar
head length as multiple of shaft width, default is 5
*headaxislength*: scalar
head length at shaft intersection, default is 4.5
*minshaft*: scalar
length below which arrow scales, in units of head length. Do not
set this to less than 1, or small arrows will look terrible!
Default is 1
*minlength*: scalar
minimum length as a multiple of shaft width; if an arrow length
is less than this, plot a dot (hexagon) of this diameter instead.
Default is 1.
*pivot*: [ 'tail' | 'middle' | 'tip' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*.
*color*: [ color | color sequence ]
This is a synonym for the
:class:`~matplotlib.collections.PolyCollection` facecolor kwarg.
If *C* has been set, *color* has no effect.
The defaults give a slightly swept-back arrow; to make the head a
triangle, make *headaxislength* the same as *headlength*. To make the
arrow more pointed, reduce *headwidth* or increase *headlength* and
*headaxislength*. To make the head smaller relative to the shaft,
scale down all the head parameters. You will probably do best to leave
minshaft alone.
linewidths and edgecolors can be used to customize the arrow
outlines. Additional :class:`~matplotlib.collections.PolyCollection`
keyword arguments:
%(PolyCollection)s
""" % martist.kwdocd
_quiverkey_doc = """
Add a key to a quiver plot.
call signature::
quiverkey(Q, X, Y, U, label, **kw)
Arguments:
*Q*:
The Quiver instance returned by a call to quiver.
*X*, *Y*:
The location of the key; additional explanation follows.
*U*:
The length of the key
*label*:
a string with the length and units of the key
Keyword arguments:
*coordinates* = [ 'axes' | 'figure' | 'data' | 'inches' ]
Coordinate system and units for *X*, *Y*: 'axes' and 'figure' are
normalized coordinate systems with 0,0 in the lower left and 1,1
in the upper right; 'data' are the axes data coordinates (used for
the locations of the vectors in the quiver plot itself); 'inches'
is position in the figure in inches, with 0,0 at the lower left
corner.
*color*:
overrides face and edge colors from *Q*.
*labelpos* = [ 'N' | 'S' | 'E' | 'W' ]
Position the label above, below, to the right, to the left of the
arrow, respectively.
*labelsep*:
Distance in inches between the arrow and the label. Default is
0.1
*labelcolor*:
defaults to default :class:`~matplotlib.text.Text` color.
*fontproperties*:
A dictionary with keyword arguments accepted by the
:class:`~matplotlib.font_manager.FontProperties` initializer:
*family*, *style*, *variant*, *size*, *weight*
Any additional keyword arguments are used to override vector
properties taken from *Q*.
The positioning of the key depends on *X*, *Y*, *coordinates*, and
*labelpos*. If *labelpos* is 'N' or 'S', *X*, *Y* give the position
of the middle of the key arrow. If *labelpos* is 'E', *X*, *Y*
positions the head, and if *labelpos* is 'W', *X*, *Y* positions the
tail; in either of these two cases, *X*, *Y* is somewhere in the
middle of the arrow+label key object.
"""
class QuiverKey(martist.Artist):
""" Labelled arrow for use as a quiver plot scale key.
"""
halign = {'N': 'center', 'S': 'center', 'E': 'left', 'W': 'right'}
valign = {'N': 'bottom', 'S': 'top', 'E': 'center', 'W': 'center'}
pivot = {'N': 'mid', 'S': 'mid', 'E': 'tip', 'W': 'tail'}
def __init__(self, Q, X, Y, U, label, **kw):
martist.Artist.__init__(self)
self.Q = Q
self.X = X
self.Y = Y
self.U = U
self.coord = kw.pop('coordinates', 'axes')
self.color = kw.pop('color', None)
self.label = label
self._labelsep_inches = kw.pop('labelsep', 0.1)
self.labelsep = (self._labelsep_inches * Q.ax.figure.dpi)
def on_dpi_change(fig):
self.labelsep = (self._labelsep_inches * fig.dpi)
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
Q.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
self.labelpos = kw.pop('labelpos', 'N')
self.labelcolor = kw.pop('labelcolor', None)
self.fontproperties = kw.pop('fontproperties', dict())
self.kw = kw
_fp = self.fontproperties
#boxprops = dict(facecolor='red')
self.text = mtext.Text(text=label, # bbox=boxprops,
horizontalalignment=self.halign[self.labelpos],
verticalalignment=self.valign[self.labelpos],
fontproperties=font_manager.FontProperties(**_fp))
if self.labelcolor is not None:
self.text.set_color(self.labelcolor)
self._initialized = False
self.zorder = Q.zorder + 0.1
__init__.__doc__ = _quiverkey_doc
def _init(self):
if True: ##not self._initialized:
self._set_transform()
_pivot = self.Q.pivot
self.Q.pivot = self.pivot[self.labelpos]
self.verts = self.Q._make_verts(np.array([self.U]),
np.zeros((1,)))
self.Q.pivot = _pivot
kw = self.Q.polykw
kw.update(self.kw)
self.vector = collections.PolyCollection(self.verts,
offsets=[(self.X,self.Y)],
transOffset=self.get_transform(),
**kw)
if self.color is not None:
self.vector.set_color(self.color)
self.vector.set_transform(self.Q.get_transform())
self._initialized = True
def _text_x(self, x):
if self.labelpos == 'E':
return x + self.labelsep
elif self.labelpos == 'W':
return x - self.labelsep
else:
return x
def _text_y(self, y):
if self.labelpos == 'N':
return y + self.labelsep
elif self.labelpos == 'S':
return y - self.labelsep
else:
return y
def draw(self, renderer):
self._init()
self.vector.draw(renderer)
x, y = self.get_transform().transform_point((self.X, self.Y))
self.text.set_x(self._text_x(x))
self.text.set_y(self._text_y(y))
self.text.draw(renderer)
def _set_transform(self):
if self.coord == 'data':
self.set_transform(self.Q.ax.transData)
elif self.coord == 'axes':
self.set_transform(self.Q.ax.transAxes)
elif self.coord == 'figure':
self.set_transform(self.Q.ax.figure.transFigure)
elif self.coord == 'inches':
self.set_transform(self.Q.ax.figure.dpi_scale_trans)
else:
raise ValueError('unrecognized coordinates')
def set_figure(self, fig):
martist.Artist.set_figure(self, fig)
self.text.set_figure(fig)
def contains(self, mouseevent):
# Maybe the dictionary should allow one to
# distinguish between a text hit and a vector hit.
if (self.text.contains(mouseevent)[0]
or self.vector.contains(mouseevent)[0]):
return True, {}
return False, {}
quiverkey_doc = _quiverkey_doc
class Quiver(collections.PolyCollection):
"""
Specialized PolyCollection for arrows.
The only API method is set_UVC(), which can be used
to change the size, orientation, and color of the
arrows; their locations are fixed when the class is
instantiated. Possibly this method will be useful
in animations.
Much of the work in this class is done in the draw()
method so that as much information as possible is available
about the plot. In subsequent draw() calls, recalculation
is limited to things that might have changed, so there
should be no performance penalty from putting the calculations
in the draw() method.
"""
def __init__(self, ax, *args, **kw):
self.ax = ax
X, Y, U, V, C = self._parse_args(*args)
self.X = X
self.Y = Y
self.XY = np.hstack((X[:,np.newaxis], Y[:,np.newaxis]))
self.N = len(X)
self.scale = kw.pop('scale', None)
self.headwidth = kw.pop('headwidth', 3)
self.headlength = float(kw.pop('headlength', 5))
self.headaxislength = kw.pop('headaxislength', 4.5)
self.minshaft = kw.pop('minshaft', 1)
self.minlength = kw.pop('minlength', 1)
self.units = kw.pop('units', 'width')
self.angles = kw.pop('angles', 'uv')
self.width = kw.pop('width', None)
self.color = kw.pop('color', 'k')
self.pivot = kw.pop('pivot', 'tail')
kw.setdefault('facecolors', self.color)
kw.setdefault('linewidths', (0,))
collections.PolyCollection.__init__(self, [], offsets=self.XY,
transOffset=ax.transData,
closed=False,
**kw)
self.polykw = kw
self.set_UVC(U, V, C)
self._initialized = False
self.keyvec = None
self.keytext = None
def on_dpi_change(fig):
self._new_UV = True # vertices depend on width, span
# which in turn depend on dpi
self._initialized = False # simple brute force update
# works because _init is called
# at the start of draw.
self.ax.figure.callbacks.connect('dpi_changed', on_dpi_change)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _quiver_doc
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def _init(self):
"""initialization delayed until first draw;
allow time for axes setup.
"""
# It seems that there are not enough event notifications
# available to have this work on an as-needed basis at present.
if True: ##not self._initialized:
trans = self._set_transform()
ax = self.ax
sx, sy = trans.inverted().transform_point(
(ax.bbox.width, ax.bbox.height))
self.span = sx
sn = max(8, min(25, math.sqrt(self.N)))
if self.width is None:
self.width = 0.06 * self.span / sn
def draw(self, renderer):
self._init()
if self._new_UV or self.angles == 'xy':
verts = self._make_verts(self.U, self.V)
self.set_verts(verts, closed=False)
self._new_UV = False
collections.PolyCollection.draw(self, renderer)
def set_UVC(self, U, V, C=None):
self.U = U.ravel()
self.V = V.ravel()
if C is not None:
self.set_array(C.ravel())
self._new_UV = True
def _set_transform(self):
ax = self.ax
if self.units in ('x', 'y'):
if self.units == 'x':
dx0 = ax.viewLim.width
dx1 = ax.bbox.width
else:
dx0 = ax.viewLim.height
dx1 = ax.bbox.height
dx = dx1/dx0
else:
if self.units == 'width':
dx = ax.bbox.width
elif self.units == 'height':
dx = ax.bbox.height
elif self.units == 'dots':
dx = 1.0
elif self.units == 'inches':
dx = ax.figure.dpi
else:
raise ValueError('unrecognized units')
trans = transforms.Affine2D().scale(dx)
self.set_transform(trans)
return trans
def _angles(self, U, V, eps=0.001):
xy = self.ax.transData.transform(self.XY)
uv = ma.hstack((U[:,np.newaxis], V[:,np.newaxis])).filled(0)
xyp = self.ax.transData.transform(self.XY + eps * uv)
dxy = xyp - xy
ang = ma.arctan2(dxy[:,1], dxy[:,0])
return ang
def _make_verts(self, U, V):
uv = ma.asarray(U+V*1j)
a = ma.absolute(uv)
if self.scale is None:
sn = max(10, math.sqrt(self.N))
scale = 1.8 * a.mean() * sn / self.span # crude auto-scaling
self.scale = scale
length = a/(self.scale*self.width)
X, Y = self._h_arrows(length)
if self.angles == 'xy':
theta = self._angles(U, V).filled(0)[:,np.newaxis]
elif self.angles == 'uv':
theta = np.angle(ma.asarray(uv[..., np.newaxis]).filled(0))
else:
theta = ma.asarray(self.angles*np.pi/180.0).filled(0)
xy = (X+Y*1j) * np.exp(1j*theta)*self.width
xy = xy[:,:,np.newaxis]
XY = ma.concatenate((xy.real, xy.imag), axis=2)
return XY
def _h_arrows(self, length):
""" length is in arrow width units """
# It might be possible to streamline the code
# and speed it up a bit by using complex (x,y)
# instead of separate arrays; but any gain would be slight.
minsh = self.minshaft * self.headlength
N = len(length)
length = length.reshape(N, 1)
# x, y: normal horizontal arrow
x = np.array([0, -self.headaxislength,
-self.headlength, 0], np.float64)
x = x + np.array([0,1,1,1]) * length
y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
y = np.repeat(y[np.newaxis,:], N, axis=0)
# x0, y0: arrow without shaft, for short vectors
x0 = np.array([0, minsh-self.headaxislength,
minsh-self.headlength, minsh], np.float64)
y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
ii = [0,1,2,3,2,1,0]
X = x.take(ii, 1)
Y = y.take(ii, 1)
Y[:, 3:] *= -1
X0 = x0.take(ii)
Y0 = y0.take(ii)
Y0[3:] *= -1
shrink = length/minsh
X0 = shrink * X0[np.newaxis,:]
Y0 = shrink * Y0[np.newaxis,:]
short = np.repeat(length < minsh, 7, axis=1)
#print 'short', length < minsh
# Now select X0, Y0 if short, otherwise X, Y
X = ma.where(short, X0, X)
Y = ma.where(short, Y0, Y)
if self.pivot[:3] == 'mid':
X -= 0.5 * X[:,3, np.newaxis]
elif self.pivot[:3] == 'tip':
X = X - X[:,3, np.newaxis] #numpy bug? using -= does not
# work here unless we multiply
# by a float first, as with 'mid'.
tooshort = length < self.minlength
if tooshort.any():
# Use a heptagonal dot:
th = np.arange(0,7,1, np.float64) * (np.pi/3.0)
x1 = np.cos(th) * self.minlength * 0.5
y1 = np.sin(th) * self.minlength * 0.5
X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
tooshort = ma.repeat(tooshort, 7, 1)
X = ma.where(tooshort, X1, X)
Y = ma.where(tooshort, Y1, Y)
return X, Y
quiver_doc = _quiver_doc
_barbs_doc = """
Plot a 2-D field of barbs.
call signatures::
barb(U, V, **kw)
barb(U, V, C, **kw)
barb(X, Y, U, V, **kw)
barb(X, Y, U, V, C, **kw)
Arguments:
*X*, *Y*:
The x and y coordinates of the barb locations
(default is head of barb; see *pivot* kwarg)
*U*, *V*:
give the *x* and *y* components of the barb shaft
*C*:
an optional array used to map colors to the barbs
All arguments may be 1-D or 2-D arrays or sequences. If *X* and *Y*
are absent, they will be generated as a uniform grid. If *U* and *V*
are 2-D arrays but *X* and *Y* are 1-D, and if len(*X*) and len(*Y*)
match the column and row dimensions of *U*, then *X* and *Y* will be
expanded with :func:`numpy.meshgrid`.
*U*, *V*, *C* may be masked arrays, but masked *X*, *Y* are not
supported at present.
Keyword arguments:
*length*:
Length of the barb in points; the other parts of the barb
are scaled against this.
Default is 9
*pivot*: [ 'tip' | 'middle' ]
The part of the arrow that is at the grid point; the arrow rotates
about this point, hence the name *pivot*. Default is 'tip'
*barbcolor*: [ color | color sequence ]
Specifies the color all parts of the barb except any flags. This
parameter is analagous to the *edgecolor* parameter for polygons,
which can be used instead. However this parameter will override
facecolor.
*flagcolor*: [ color | color sequence ]
Specifies the color of any flags on the barb. This parameter is
analagous to the *facecolor* parameter for polygons, which can be
used instead. However this parameter will override facecolor. If
this is not set (and *C* has not either) then *flagcolor* will be
set to match *barbcolor* so that the barb has a uniform color. If
*C* has been set, *flagcolor* has no effect.
*sizes*:
A dictionary of coefficients specifying the ratio of a given
feature to the length of the barb. Only those values one wishes to
override need to be included. These features include:
- 'spacing' - space between features (flags, full/half barbs)
- 'height' - height (distance from shaft to top) of a flag or
full barb
- 'width' - width of a flag, twice the width of a full barb
- 'emptybarb' - radius of the circle used for low magnitudes
*fill_empty*:
A flag on whether the empty barbs (circles) that are drawn should
be filled with the flag color. If they are not filled, they will
be drawn such that no color is applied to the center. Default is
False
*rounding*:
A flag to indicate whether the vector magnitude should be rounded
when allocating barb components. If True, the magnitude is
rounded to the nearest multiple of the half-barb increment. If
False, the magnitude is simply truncated to the next lowest
multiple. Default is True
*barb_increments*:
A dictionary of increments specifying values to associate with
different parts of the barb. Only those values one wishes to
override need to be included.
- 'half' - half barbs (Default is 5)
- 'full' - full barbs (Default is 10)
- 'flag' - flags (default is 50)
*flip_barb*:
Either a single boolean flag or an array of booleans. Single
boolean indicates whether the lines and flags should point
opposite to normal for all barbs. An array (which should be the
same size as the other data arrays) indicates whether to flip for
each individual barb. Normal behavior is for the barbs and lines
to point right (comes from wind barbs having these features point
towards low pressure in the Northern Hemisphere.) Default is
False
Barbs are traditionally used in meteorology as a way to plot the speed
and direction of wind observations, but can technically be used to
plot any two dimensional vector quantity. As opposed to arrows, which
give vector magnitude by the length of the arrow, the barbs give more
quantitative information about the vector magnitude by putting slanted
lines or a triangle for various increments in magnitude, as show
schematically below::
: /\ \\
: / \ \\
: / \ \ \\
: / \ \ \\
: ------------------------------
.. note the double \\ at the end of each line to make the figure
.. render correctly
The largest increment is given by a triangle (or "flag"). After those
come full lines (barbs). The smallest increment is a half line. There
is only, of course, ever at most 1 half line. If the magnitude is
small and only needs a single half-line and no full lines or
triangles, the half-line is offset from the end of the barb so that it
can be easily distinguished from barbs with a single full line. The
magnitude for the barb shown above would nominally be 65, using the
standard increments of 50, 10, and 5.
linewidths and edgecolors can be used to customize the barb.
Additional :class:`~matplotlib.collections.PolyCollection` keyword
arguments:
%(PolyCollection)s
""" % martist.kwdocd
class Barbs(collections.PolyCollection):
'''
Specialized PolyCollection for barbs.
The only API method is :meth:`set_UVC`, which can be used to
change the size, orientation, and color of the arrows. Locations
are changed using the :meth:`set_offsets` collection method.
Possibly this method will be useful in animations.
There is one internal function :meth:`_find_tails` which finds
exactly what should be put on the barb given the vector magnitude.
From there :meth:`_make_barbs` is used to find the vertices of the
polygon to represent the barb based on this information.
'''
#This may be an abuse of polygons here to render what is essentially maybe
#1 triangle and a series of lines. It works fine as far as I can tell
#however.
def __init__(self, ax, *args, **kw):
self._pivot = kw.pop('pivot', 'tip')
self._length = kw.pop('length', 7)
barbcolor = kw.pop('barbcolor', None)
flagcolor = kw.pop('flagcolor', None)
self.sizes = kw.pop('sizes', dict())
self.fill_empty = kw.pop('fill_empty', False)
self.barb_increments = kw.pop('barb_increments', dict())
self.rounding = kw.pop('rounding', True)
self.flip = kw.pop('flip_barb', False)
#Flagcolor and and barbcolor provide convenience parameters for setting
#the facecolor and edgecolor, respectively, of the barb polygon. We
#also work here to make the flag the same color as the rest of the barb
#by default
if None in (barbcolor, flagcolor):
kw['edgecolors'] = 'face'
if flagcolor:
kw['facecolors'] = flagcolor
elif barbcolor:
kw['facecolors'] = barbcolor
else:
#Set to facecolor passed in or default to black
kw.setdefault('facecolors', 'k')
else:
kw['edgecolors'] = barbcolor
kw['facecolors'] = flagcolor
#Parse out the data arrays from the various configurations supported
x, y, u, v, c = self._parse_args(*args)
self.x = x
self.y = y
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
#Make a collection
barb_size = self._length**2 / 4 #Empirically determined
collections.PolyCollection.__init__(self, [], (barb_size,), offsets=xy,
transOffset=ax.transData, **kw)
self.set_transform(transforms.IdentityTransform())
self.set_UVC(u, v, c)
__init__.__doc__ = """
The constructor takes one required argument, an Axes
instance, followed by the args and kwargs described
by the following pylab interface documentation:
%s""" % _barbs_doc
def _find_tails(self, mag, rounding=True, half=5, full=10, flag=50):
'''
Find how many of each of the tail pieces is necessary. Flag
specifies the increment for a flag, barb for a full barb, and half for
half a barb. Mag should be the magnitude of a vector (ie. >= 0).
This returns a tuple of:
(*number of flags*, *number of barbs*, *half_flag*, *empty_flag*)
*half_flag* is a boolean whether half of a barb is needed,
since there should only ever be one half on a given
barb. *empty_flag* flag is an array of flags to easily tell if
a barb is empty (too low to plot any barbs/flags.
'''
#If rounding, round to the nearest multiple of half, the smallest
#increment
if rounding:
mag = half * (mag / half + 0.5).astype(np.int)
num_flags = np.floor(mag / flag).astype(np.int)
mag = np.mod(mag, flag)
num_barb = np.floor(mag / full).astype(np.int)
mag = np.mod(mag, full)
half_flag = mag >= half
empty_flag = ~(half_flag | (num_flags > 0) | (num_barb > 0))
return num_flags, num_barb, half_flag, empty_flag
def _make_barbs(self, u, v, nflags, nbarbs, half_barb, empty_flag, length,
pivot, sizes, fill_empty, flip):
'''
This function actually creates the wind barbs. *u* and *v*
are components of the vector in the *x* and *y* directions,
respectively.
*nflags*, *nbarbs*, and *half_barb*, empty_flag* are,
*respectively, the number of flags, number of barbs, flag for
*half a barb, and flag for empty barb, ostensibly obtained
*from :meth:`_find_tails`.
*length* is the length of the barb staff in points.
*pivot* specifies the point on the barb around which the
entire barb should be rotated. Right now, valid options are
'head' and 'middle'.
*sizes* is a dictionary of coefficients specifying the ratio
of a given feature to the length of the barb. These features
include:
- *spacing*: space between features (flags, full/half
barbs)
- *height*: distance from shaft of top of a flag or full
barb
- *width* - width of a flag, twice the width of a full barb
- *emptybarb* - radius of the circle used for low
magnitudes
*fill_empty* specifies whether the circle representing an
empty barb should be filled or not (this changes the drawing
of the polygon).
*flip* is a flag indicating whether the features should be flipped to
the other side of the barb (useful for winds in the southern
hemisphere.
This function returns list of arrays of vertices, defining a polygon for
each of the wind barbs. These polygons have been rotated to properly
align with the vector direction.
'''
#These control the spacing and size of barb elements relative to the
#length of the shaft
spacing = length * sizes.get('spacing', 0.125)
full_height = length * sizes.get('height', 0.4)
full_width = length * sizes.get('width', 0.25)
empty_rad = length * sizes.get('emptybarb', 0.15)
#Controls y point where to pivot the barb.
pivot_points = dict(tip=0.0, middle=-length/2.)
#Check for flip
if flip: full_height = -full_height
endx = 0.0
endy = pivot_points[pivot.lower()]
#Get the appropriate angle for the vector components. The offset is due
#to the way the barb is initially drawn, going down the y-axis. This
#makes sense in a meteorological mode of thinking since there 0 degrees
#corresponds to north (the y-axis traditionally)
angles = -(ma.arctan2(v, u) + np.pi/2)
#Used for low magnitude. We just get the vertices, so if we make it
#out here, it can be reused. The center set here should put the
#center of the circle at the location(offset), rather than at the
#same point as the barb pivot; this seems more sensible.
circ = CirclePolygon((0,0), radius=empty_rad).get_verts()
if fill_empty:
empty_barb = circ
else:
#If we don't want the empty one filled, we make a degenerate polygon
#that wraps back over itself
empty_barb = np.concatenate((circ, circ[::-1]))
barb_list = []
for index, angle in np.ndenumerate(angles):
#If the vector magnitude is too weak to draw anything, plot an
#empty circle instead
if empty_flag[index]:
#We can skip the transform since the circle has no preferred
#orientation
barb_list.append(empty_barb)
continue
poly_verts = [(endx, endy)]
offset = length
#Add vertices for each flag
for i in range(nflags[index]):
#The spacing that works for the barbs is a little to much for
#the flags, but this only occurs when we have more than 1 flag.
if offset != length: offset += spacing / 2.
poly_verts.extend([[endx, endy + offset],
[endx + full_height, endy - full_width/2 + offset],
[endx, endy - full_width + offset]])
offset -= full_width + spacing
#Add vertices for each barb. These really are lines, but works
#great adding 3 vertices that basically pull the polygon out and
#back down the line
for i in range(nbarbs[index]):
poly_verts.extend([(endx, endy + offset),
(endx + full_height, endy + offset + full_width/2),
(endx, endy + offset)])
offset -= spacing
#Add the vertices for half a barb, if needed
if half_barb[index]:
#If the half barb is the first on the staff, traditionally it is
#offset from the end to make it easy to distinguish from a barb
#with a full one
if offset == length:
poly_verts.append((endx, endy + offset))
offset -= 1.5 * spacing
poly_verts.extend([(endx, endy + offset),
(endx + full_height/2, endy + offset + full_width/4),
(endx, endy + offset)])
#Rotate the barb according the angle. Making the barb first and then
#rotating it made the math for drawing the barb really easy. Also,
#the transform framework makes doing the rotation simple.
poly_verts = transforms.Affine2D().rotate(-angle).transform(
poly_verts)
barb_list.append(poly_verts)
return barb_list
#Taken shamelessly from Quiver
def _parse_args(self, *args):
X, Y, U, V, C = [None]*5
args = list(args)
if len(args) == 3 or len(args) == 5:
C = ma.asarray(args.pop(-1)).ravel()
V = ma.asarray(args.pop(-1))
U = ma.asarray(args.pop(-1))
nn = np.shape(U)
nc = nn[0]
nr = 1
if len(nn) > 1:
nr = nn[1]
if len(args) == 2: # remaining after removing U,V,C
X, Y = [np.array(a).ravel() for a in args]
if len(X) == nc and len(Y) == nr:
X, Y = [a.ravel() for a in np.meshgrid(X, Y)]
else:
indexgrid = np.meshgrid(np.arange(nc), np.arange(nr))
X, Y = [np.ravel(a) for a in indexgrid]
return X, Y, U, V, C
def set_UVC(self, U, V, C=None):
self.u = ma.asarray(U).ravel()
self.v = ma.asarray(V).ravel()
if C is not None:
c = ma.asarray(C).ravel()
x,y,u,v,c = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v, c)
else:
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(),
self.u, self.v)
magnitude = np.sqrt(u*u + v*v)
flags, barbs, halves, empty = self._find_tails(magnitude,
self.rounding, **self.barb_increments)
#Get the vertices for each of the barbs
plot_barbs = self._make_barbs(u, v, flags, barbs, halves, empty,
self._length, self._pivot, self.sizes, self.fill_empty, self.flip)
self.set_verts(plot_barbs)
#Set the color array
if C is not None:
self.set_array(c)
#Update the offsets in case the masked data changed
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
self._offsets = xy
def set_offsets(self, xy):
'''
Set the offsets for the barb polygons. This saves the offets passed in
and actually sets version masked as appropriate for the existing U/V
data. *offsets* should be a sequence.
ACCEPTS: sequence of pairs of floats
'''
self.x = xy[:,0]
self.y = xy[:,1]
x,y,u,v = delete_masked_points(self.x.ravel(), self.y.ravel(), self.u,
self.v)
xy = np.hstack((x[:,np.newaxis], y[:,np.newaxis]))
collections.PolyCollection.set_offsets(self, xy)
set_offsets.__doc__ = collections.PolyCollection.set_offsets.__doc__
barbs_doc = _barbs_doc
| agpl-3.0 |
AnthonyHullDiamond/scanning | org.eclipse.scanning.points/scripts/scanpointgenerator/plotgenerator.py | 2 | 4453 | ###
# Copyright (c) 2016, 2017 Diamond Light Source Ltd.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v1.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v10.html
#
# Contributors:
# Tom Cobb - initial API and implementation and/or initial documentation
# Gary Yendell - initial API and implementation and/or initial documentation
# Charles Mita - initial API and implementation and/or initial documentation
#
###
from scanpointgenerator import CompoundGenerator, RectangularROI, CircularROI
MARKER_SIZE = 10
def plot_generator(gen, excluder=None, show_indexes=True):
from matplotlib.patches import Rectangle, Circle
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
if excluder is not None:
for roi in excluder.rois:
overlay = plt.subplot(111, aspect='equal')
if isinstance(roi, RectangularROI):
overlay.add_patch(Rectangle(roi.start, roi.width, roi.height, fill=False))
if isinstance(roi, CircularROI):
overlay.add_patch(Circle(roi.centre, roi.radius, fill=False))
if not isinstance(gen, CompoundGenerator):
excluders = [] if excluder is None else [excluder]
gen = CompoundGenerator([gen], excluders, [])
gen.prepare()
# points for spline generation
x, y = [], []
# capture points and indexes
capx, capy, capi = [], [], []
# segment start for colour changing
starts = []
for point in gen.iterator():
# If lower is different from last then include it
xlower = point.lower["x"]
ylower = point.lower.get("y", 0)
if len(x) == 0 or x[-1] != xlower or y[-1] != ylower:
if len(x) != 0:
# add in a tiny fractional distance
xneg = x[-1] - xlower > 0
yneg = y[-1] - ylower > 0
xdiff = (x[-1] - x[-2]) * 0.01
ydiff = (y[-1] - y[-2]) * 0.01
for i in range(3):
x.append(x[-1] + xdiff)
y.append(y[-1] + ydiff)
# add the padding on the input
if xneg:
xdiff *= -1
if yneg:
ydiff *= -1
for i in reversed(range(3)):
x.append(xlower + xdiff * (i + 1))
y.append(ylower + ydiff * (i + 1))
starts.append(len(x))
x.append(xlower)
y.append(ylower)
# Add in capture points
xpos = point.positions["x"]
ypos = point.positions.get("y", 0)
x.append(xpos)
y.append(ypos)
capx.append(xpos)
capy.append(ypos)
capi.append(point.indexes)
# And upper point
starts.append(len(x))
x.append(point.upper["x"])
y.append(point.upper.get("y", 0))
# # Plot labels
plt.xlabel("X (%s)" % gen.units["x"])
if "y" in gen.units:
plt.ylabel("Y (%s)" % gen.units["y"])
else:
plt.tick_params(left='off', labelleft='off')
# Define curves parametrically
x = np.array(x)
y = np.array(y)
t = np.zeros(len(x))
t[1:] = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)
t = np.cumsum(t)
t /= t[-1]
tck, _ = interpolate.splprep([x, y], s=0)
# Plot each line
for i, start in enumerate(starts):
if i + 1 < len(starts):
end = starts[i+1]
else:
end = len(x) - 1
tnew = np.linspace(t[start], t[end], num=1001, endpoint=True)
sx, sy = interpolate.splev(tnew, tck)
plt.plot(sx, sy, linewidth=2)
# And the capture points
plt.plot(capx, capy, linestyle="", marker="x", color="k",
markersize=MARKER_SIZE)
# And a start position
plt.plot([x[0]], [y[0]], 'bo')
plt.annotate("Start", (x[0], y[0]), xytext=(MARKER_SIZE/2, MARKER_SIZE/2),
textcoords='offset points')
# And the indexes
if show_indexes:
for i, x, y in zip(capi, capx, capy):
plt.annotate(i, (x, y), xytext=(MARKER_SIZE/2, MARKER_SIZE/2),
textcoords='offset points')
#indexes = ["%s (size %d)" % z for z in zip(gen.index_names, gen.index_dims)]
#plt.title("Dataset: [%s]" % (", ".join(indexes)))
plt.show()
| epl-1.0 |
anshumang/picongpu-evpath | examples/ThermalTest/tools/dispersion.py | 11 | 2689 | #!/usr/bin/env python
#
# Copyright 2013 Heiko Burau, Axel Huebl
#
# This file is part of PIConGPU.
#
# PIConGPU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PIConGPU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PIConGPU.
# If not, see <http://www.gnu.org/licenses/>.
#
#___________P A R A M E T E R S___________
omega_plasma = 6.718e13 # SI unit: 1/s
v_th = 1.0e8 # SI unit: m/s
c = 2.9979e8 # SI unit: m/s
delta_t = 2.5e-15 # SI unit: s
delta_z = c * delta_t # SI unit: m
#_________________________________________
from numpy import *
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
data_trans = loadtxt("eField_zt_trans.dat")
data_long = loadtxt("eField_zt_long.dat")
N_z = len(data_trans[:,0])
N_t = len(data_trans[0,:])
omega_max = pi*(N_t-1)/(N_t*delta_t)/omega_plasma
k_max = pi * (N_z-1)/(N_z*delta_z)
# __________________transversal plot______________________
ax = plt.subplot(211, autoscale_on=False, xlim=(-k_max, k_max), ylim=(-1, 10))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2e'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
plt.xlabel(r"$k [1/m]$")
plt.ylabel(r"$\omega / \omega_{pe} $")
data_trans = fft.fftshift(fft.fft2(data_trans))
plt.imshow(abs(data_trans), extent=(-k_max, k_max, -omega_max, omega_max), aspect='auto', interpolation='nearest')
plt.colorbar()
# plot analytical dispersion relation
x = linspace(-k_max, k_max, 200)
y = sqrt(c**2 * x**2 + omega_plasma**2)/omega_plasma
plt.plot(x, y, 'r--', linewidth=1)
# ___________________longitudinal plot_____________________
ax = plt.subplot(212, autoscale_on=False, xlim=(-k_max, k_max), ylim=(-1, 10))
ax.xaxis.set_major_formatter(FormatStrFormatter('%2.2e'))
ax.yaxis.set_major_formatter(FormatStrFormatter('%0.0f'))
plt.xlabel(r"$k [1/m]$")
plt.ylabel(r"$\omega / \omega_{pe} $")
data_long = fft.fftshift(fft.fft2(data_long))
plt.imshow(abs(data_long), extent=(-k_max, k_max, -omega_max, omega_max), aspect='auto', interpolation='nearest')
plt.colorbar()
# plot analytical dispersion relation
x = linspace(-k_max, k_max, 200)
y = sqrt(3 * v_th**2 * x**2 + omega_plasma**2)/omega_plasma
plt.plot(x, y, 'r--', linewidth=1)
plt.show()
| gpl-3.0 |
ovgu-FINken/paparazzi_pre_merge | sw/airborne/test/math/compare_utm_enu.py | 77 | 2714 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
import os
PPRZ_SRC = os.getenv("PAPARAZZI_SRC", "../../../..")
sys.path.append(PPRZ_SRC + "/sw/lib/python")
from pprz_math.geodetic import *
from pprz_math.algebra import DoubleRMat, DoubleEulers, DoubleVect3
from math import radians, degrees, tan
import matplotlib.pyplot as plt
import numpy as np
# Origin at ENAC
UTM_EAST0 = 377349 # in m
UTM_NORTH0 = 4824583 # in m
UTM_ZONE0 = 31
ALT0 = 147.000 # in m
utm_origin = UtmCoor_d(north=UTM_NORTH0, east=UTM_EAST0, alt=ALT0, zone=UTM_ZONE0)
print("origin %s" % utm_origin)
lla_origin = utm_origin.to_lla()
ecef_origin = lla_origin.to_ecef()
ltp_origin = ecef_origin.to_ltp_def()
print(ltp_origin)
# convergence angle to "true north" is approx 1 deg here
earth_radius = 6378137.0
n = 0.9996 * earth_radius
UTM_DELTA_EAST = 500000.
dist_to_meridian = utm_origin.east - UTM_DELTA_EAST
conv = dist_to_meridian / n * tan(lla_origin.lat)
# or (middle meridian of UTM zone 31 is at 3deg)
#conv = atan(tan(lla_origin.lon - radians(3))*sin(lla_origin.lat))
print("approx. convergence angle (north error compared to meridian): %f deg" % degrees(conv))
# Rotation matrix to correct for "true north"
R = DoubleEulers(psi=-conv).to_rmat()
# calculate ENU coordinates for 100 points in 100m distance
nb_points = 100
dist_points = 100
enu_res = np.zeros((nb_points, 2))
enu_res_c = np.zeros((nb_points, 2))
utm_res = np.zeros((nb_points, 2))
for i in range(0, nb_points):
utm = UtmCoor_d()
utm.north = i * dist_points + utm_origin.north
utm.east = i * dist_points+ utm_origin.east
utm.alt = utm_origin.alt
utm.zone = utm_origin.zone
#print(utm)
utm_res[i, 0] = utm.east - utm_origin.east
utm_res[i, 1] = utm.north - utm_origin.north
lla = utm.to_lla()
#print(lla)
ecef = lla.to_ecef()
enu = ecef.to_enu(ltp_origin)
enu_res[i, 0] = enu.x
enu_res[i, 1] = enu.y
enu_c = R * DoubleVect3(enu.x, enu.y, enu.z)
enu_res_c[i, 0] = enu_c.x
enu_res_c[i, 1] = enu_c.y
#print(enu)
dist = np.linalg.norm(utm_res, axis=1)
error = np.linalg.norm(utm_res - enu_res, axis=1)
error_c = np.linalg.norm(utm_res - enu_res_c, axis=1)
plt.figure(1)
plt.subplot(311)
plt.title("utm vs. enu")
plt.plot(enu_res[:, 0], enu_res[:, 1], 'g', label="ENU")
plt.plot(utm_res[:, 0], utm_res[:, 1], 'r', label="UTM")
plt.ylabel("y/north [m]")
plt.xlabel("x/east [m]")
plt.legend(loc='upper left')
plt.subplot(312)
plt.plot(dist, error, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error [m]")
plt.subplot(313)
plt.plot(dist, error_c, 'r')
plt.xlabel("dist from origin [m]")
plt.ylabel("error with north fix [m]")
plt.show()
| gpl-2.0 |
prabhamatta/Analyzing-Open-Data | notebooks/Day_06_B_Generators.py | 3 | 10025 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=1>
# Goals
# <markdowncell>
# **To practice using generators to yield geographical entities of various types.**
#
# Generators are a bit complicated, and I won't try to explain all the intricacies here. I will show you how to use `yield` in a function definition to return a generator. From [Definition of a generator](http://docs.python.org/2/glossary.html#term-generator):
#
# <blockquote>A function which returns an iterator. It looks like a normal function except that it contains yield statements for producing a series a values usable in a for-loop or that can be retrieved one at a time with the next() function. Each yield temporarily suspends processing, remembering the location execution state (including local variables and pending try-statements). When the generator resumes, it picks-up where it left-off (in contrast to functions which start fresh on every invocation)</blockquote>
#
# For some background on Python generators:
#
# * [iterator - The Python yield keyword explained - Stack Overflow](http://stackoverflow.com/questions/231767/the-python-yield-keyword-explained/231855#231855)
# * [Improve Your Python: 'yield' and Generators Explained](http://www.jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/)
#
# Why use generators: http://stackoverflow.com/a/102632/7782
#
# <blockquote>Generators are good for calculating large sets of results (in particular calculations involving loops themselves) where you don't know if you are going to need all results, or where you don't want to allocate the memory for all results at the same time. </blockquote>
#
# Also, let's also practice using [itertools.islice](http://www.python.org/doc//current/library/itertools.html#itertools.islice) and [enumerate](http://docs.python.org/2/library/functions.html#enumerate) -- two of my favorite constructions in Python
# <markdowncell>
# From http://api.census.gov/data/2010/sf1/geo.html, geographic entities we are specifically interested in this exercise:
#
# * state-county
# * state-county-tract
#
# * state-place
# * state-metropolitan statistical area/micropolitan statistical area
# * state-metropolitan statistical area/micropolitan statistical area-metropolitan division
# * state-combined statistical area
# <codecell>
# usual imports for numpy, pandas, matplotlib
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame, Series, Index
import pandas as pd
# <codecell>
# check that CENSUS_KEY is defined
import census
import us
import settings
assert settings.CENSUS_KEY is not None
# <codecell>
# instantiate our Census object
c = census.Census(key=settings.CENSUS_KEY)
# <headingcell level=1>
# A bit of warmup with Generators
# <codecell>
import string
print list(string.lowercase)
# <codecell>
def abcs():
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z']
"""a generator that returns """
for letter in alphabet:
yield letter
# a generator that gives you the letters of the alphabet a letter at a time
say_abcs = abcs()
# <codecell>
# run this line over and over again to see the letters one at a time
say_abcs.next()
# <codecell>
# you can use list to grab all the items in an iterator. But be careful if the number
# of items is large or even infinite! In this case, we're ok
list(abcs())
# <markdowncell>
# Demonstration of how to use [enumerate](http://docs.python.org/2/library/functions.html#enumerate):
#
# <blockquote>Return an enumerate object. sequence must be a sequence, an iterator, or some other object which supports iteration. The next() method of the iterator returned by enumerate() returns a tuple containing a count (from start which defaults to 0) and the values obtained from iterating over sequence</blockquote>
#
# <codecell>
for (i, letter) in enumerate(abcs()):
print i, letter
# <markdowncell>
# You can use itertools.islice [itertools.islice](http://www.python.org/doc//current/library/itertools.html#itertools.islice) to return parts of the iterator.
#
# <blockquote>Make an iterator that returns selected elements from the iterable. If start is non-zero, then elements from the iterable are skipped until start is reached. Afterward, elements are returned consecutively unless step is set higher than one which results in items being skipped. If stop is None, then iteration continues until the iterator is exhausted, if at all; otherwise, it stops at the specified position. Unlike regular slicing, islice() does not support negative values for start, stop, or step. Can be used to extract related fields from data where the internal structure has been flattened (for example, a multi-line report may list a name field on every third line).</blockquote>
# <codecell>
# let's get the first 10 letters of the alphabet
from itertools import islice
list(islice(abcs(), 10))
# <codecell>
# you can use None to get all items in islice
# from docs: "If stop is None, then iteration continues until the iterator is exhausted,"
list(islice(abcs(), None))
# <codecell>
# itertools.count can in principle generate an infinite sequence
# http://www.python.org/doc//current/library/itertools.html#itertools.count
from itertools import count
# count starting zero
my_counter = count(0)
# <codecell>
# try it out
my_counter.next()
# <codecell>
# DON'T do list(count(0)) -> you'll be trying to generate an infinite list
# but use an upper limit
list(islice(count(0),10))
# <codecell>
# start, stop
list(islice(count(),1,3))
# <headingcell level=1>
# Generator for US Counties
# <codecell>
# get the syntax down for getting counties from CA -- so that then we can use it later
r = c.sf1.get('NAME,P0010001', geo={'for':'county:*',
'in':'state:{fips}'.format(fips=us.states.CA.fips)})
r[:5]
# <markdowncell>
# With the census API, you can get the counties with one single call to the census API or state-by-state. The `counties` generator below takes the first approach while `counties2` takes the second approach. Although `counties` is more efficient in most cases I can think of, it will be useful to know how to do calls on a state-by-state basis. For example, when we to query on a census tract level or below, we will need to work on a state-by-state basis.
# <codecell>
def counties(variables='NAME'):
"""ask for all the states"""
# tabulate a set of fips codes for the states
states_fips = set([s.fips for s in us.states.STATES])
geo={'for':'county:*',
'in':'state:*'}
for county in c.sf1.get(variables, geo=geo):
# eliminate counties whose states aren't in a state or DC
if county['state'] in states_fips:
yield county
def counties2(variables='NAME'):
"""generator for all counties"""
# since we can get all the counties in one call,
# this function is for demonstrating the use of walking through
# the states to get at the counties
for state in us.states.STATES:
geo={'for':'county:*',
'in':'state:{fips}'.format(fips=state.fips)}
for county in c.sf1.get(variables, geo=geo):
yield county
# <codecell>
counties_list = list(counties('NAME,P0010001'))
# <codecell>
# add up the population to make sure we have the total right
counties_df = DataFrame(counties_list)
counties_df.P0010001 = counties_df.P0010001.astype('int')
counties_df.P0010001.sum()
# <markdowncell>
# One reason for writing all the counties in the form of a Python generator is tha you can easily control the number of counties we work with at any given time -- and then easily scaling out to get all of them.
# <codecell>
# make a list of the first ten counties
from itertools import islice
list(islice(counties2(),10))
# <headingcell level=1>
# Generator for Census Tracts
# <markdowncell>
# The following generator loops through all the states to get at the individual counties to then get at the census tracts.
# <codecell>
def tracts(variables='NAME'):
for state in us.states.STATES:
# handy to print out state to monitor progress
print state.fips, state
counties_in_state={'for':'county:*',
'in':'state:{fips}'.format(fips=state.fips)}
for county in c.sf1.get('NAME', geo=counties_in_state):
# print county['state'], county['NAME']
tracts_in_county = {'for':'tract:*',
'in': 'state:{s_fips} county:{c_fips}'.format(s_fips=state.fips,
c_fips=county['county'])}
for tract in c.sf1.get(variables,geo=tracts_in_county):
yield tract
# <codecell>
r = list(islice(tracts('NAME,P0010001'),10))
tracts_df = DataFrame(r)
tracts_df.P0010001 = tracts_df.P0010001.astype('int')
tracts_df['FIPS'] = tracts_df.apply(lambda s: s['state']+s['county']+s['tract'], axis=1)
print "number of tracts", len(tracts_df)
print "total pop", tracts_df.P0010001.sum()
tracts_df.head()
# <markdowncell>
# Good to save the DataFrame so we can load up the census tracts without having call the census api again.
#
# I/O: http://pandas.pydata.org/pandas-docs/dev/io.html
#
# Today, we'll use [pickle format](http://docs.python.org/2/library/pickle.html) and look at other formats.
# <codecell>
TRACT_FILE_PICKLE = "tracts.pickle"
# UNCOMMENT THIS LINE TO SAVE YOUR FILE
# tracts_df.to_pickle(TRACT_FILE_PICKLE)
# <markdowncell>
# Let's read the DataFrame from disk to confirm that we were able to save the file properly.
# <codecell>
df = pd.read_pickle(TRACT_FILE_PICKLE)
df.head()
# <codecell>
# UNCOMMENT TO DO COMPARISON
# you can compare the saved file to the file from disk
# np.all(tracts_df == df)
| apache-2.0 |
mjvakili/ccppabc | ccppabc/code/archive/wp_covariance.py | 1 | 1717 | from halotools.empirical_models import Zheng07 , model_defaults
from halotools.mock_observables import wp
from halotools.mock_observables.clustering import tpcf
from halotools.empirical_models.mock_helpers import (three_dim_pos_bundle,
infer_mask_from_kwargs)
from halotools.mock_observables.clustering import wp
from halotools.sim_manager import supported_sims
import matplotlib.pyplot as plt
plt.switch_backend("Agg")
import time
import numpy as np
model = Zheng07()
xir = []
for i in range(500):
model.populate_mock()
xir.append(model.mock.compute_galaxy_clustering()[1])
covar = np.cov(np.array(xir).T)
np.savetxt("clustering_covariance_Mr20.dat" , covar)
"""
a = time.time()
model.mock.compute_galaxy_clustering()
print time.time() - a
rbins = model_defaults.default_rbins
rbin_centers = (rbins[1:] + rbins[:-1])/2.
cat = supported_sims.HaloCatalog()
l = cat.Lbox
print l
p_bins = np.linspace(0,l/2,200)
mask = infer_mask_from_kwargs(model.mock.galaxy_table)
pos = three_dim_pos_bundle(table=model.mock.galaxy_table,
key1='x', key2='y', key3='z', mask=mask,
return_complement=False)
figure = plt.figure(figsize=(10,10))
cl = wp(pos , rbins, p_bins , period = l , estimator = 'Landy-Szalay')
for n_pbins in np.array([2,8,16]):
p_bins = np.linspace(0 , l/2 , n_pbins)
a = time.time()
clustering = wp(pos, rbins, p_bins , period = l , estimator = 'Landy-Szalay')
print time.time() - a
plt.plot(rbin_centers , (clustering)/cl , label = "$N\pi_{bin}$="+str(n_pbins) , lw = 2)
plt.xscale("Log")
plt.yscale("Log")
plt.legend()
plt.savefig("/home/mj/public_html/wpex.png")"""
| mit |
ywang037/delta-ntu-slerp4 | Training/train_mobilenet_casia_1771.py | 1 | 7420 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 4 14:47:47 2017
@author: slerp4
Compared with _debug version, this version excludes RMSprop optimizer
"""
#import tensorflow as tf
from keras import backend as K
from keras.applications.mobilenet import MobileNet
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD, Adam
from keras.callbacks import LearningRateScheduler, CSVLogger
import os, importlib
from timeit import default_timer as timer
import datetime
import math
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import tensorflow as tf
# check and set tensorflow as backend
if K.backend() != 'tensorflow':
os.environ['KERAS_BACKEND'] = 'tensorflow'
importlib.reload(K)
assert K.backend() == 'tensorflow'
print('{} backend is sucessfully set'.format(K.backend()))
elif K.backend() == 'tensorflow':
print('{} backend has already been set'.format(K.backend()))
# force to use gpu:0 tesla k20c
# Creates a graph.
with tf.device('/device:GPU:0'):
a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a')
b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b')
c = tf.matmul(a, b)
# Creates a session with log_device_placement set to True.
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
# Runs the op.
print(sess.run(c))
# training hyper parameters
train_data_dir = '.\Datasets\casia-1771'
numclass = 1771
num_train_samples = 233505
batch_size = 64
#epochs = 100
alpha = 0.5 # choices=[0.25, 0.5, 0.75, 1.0]
inputsize = 224 # choices=[128, 160, 192, 224, 224], >=32 is ok
'''
# define step decay function - used to visualize learning rate change
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.lr = []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.lr.append(step_decay(len(self.losses)))
print('Current learning rate:', step_decay(len(self.losses)))
'''
# learning rate schedule
def step_decay(epoch):
# initial_lrate = 0.01
drop = 0.5
epochs_drop = 20.0
lrate = init_lr * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
# Setup the model
# using CASIA-WebFaces dataset for training, 10575 identities in total
model = MobileNet(alpha=alpha, depth_multiplier=1, dropout=1e-3,
include_top=True, weights=None, input_tensor=None, pooling=None, classes=numclass)
model.summary()
print('\nPrepare to train cnn model {}-MobileNet-224 with top layer included'.format(alpha))
#print('Total classes: {}'.format(numclass))
#print('Training samples: {}'.format(num_train_samples))
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
while optimizer_chosen not in ['A', 'B']:
optimizer_chosen = input('Optimizer (A: SGD/B: Adam)? ')
epochs = int(input('Number of epochs? '))
while epochs < 0:
epochs = int(input('Use a positive integer as the number of epochs: '))
init_lr = float(input('Initial learning rate? '))
while init_lr < 0 or init_lr>0.2:
init_lr = float(input('Use a learning rate in [0, 0.2]: '))
# preparing training data
print('\nDataset path: '+ train_data_dir)
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# load training and testing data
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(224, 224),
batch_size=batch_size)
# define the format of names of several files
stamp = str(alpha)+'-mobilenet-'+str(inputsize)+'-c{}-'.format(numclass)+'b{}-'.format(batch_size)+'e{}-'.format(epochs)
if optimizer_chosen == 'A':
# using step-decaying sgd
method = 'SGD'
print('\nUsing step-decaying stochastic gradient descent')
print('learning rate folds every 20 epochs')
sgd = SGD(lr=0.0, momentum=0.9, decay=0.0, nesterov=False)
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
'''
# use following scripts to have learning rate displayed
# learning schedule callback
loss_history = LossHistory()
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [loss_history, lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=2)
'''
# learning schedule callback
lrate = LearningRateScheduler(step_decay)
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
callbacks_list = [lrate, csv_logger]
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples//batch_size,
epochs=epochs, validation_data=None, callbacks=callbacks_list, verbose=1)
elif optimizer_chosen == 'B':
# using adam update as adaptive learning rate method
method = 'Adam'
print('\nUsing using adam update as adaptive learning rate method')
adam = Adam(lr=init_lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) # original lr=0.001
# compile the model
# loss = mse can be tried also
train_start = timer()
model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
# training logger callback, log in csv file
record = stamp + method + '-lr{}'.format(init_lr)
csv_logger = CSVLogger(record+'.csv',append=True, separator=',')
# train the model
history = model.fit_generator(train_generator, steps_per_epoch=num_train_samples // batch_size,
epochs=epochs, validation_data=None, callbacks=[csv_logger], verbose=1)
train_end = timer()
mins, secs = divmod(train_end-train_start,60)
hour, mins = divmod(mins,60)
print('Training process took %d:%02d:%02d' % (hour,mins,secs))
# set a stamp of file name for saving the record and weights
now = datetime.datetime.now() #current date and time
save_name = record +'-'+now.strftime("%Y%m%d-%H%M")
#print(history.history)
print(history.history.keys())
# print plots of acc and loss in one pdf
pp = PdfPages(save_name +'.pdf')
# summarize history for accuracy
plt.plot(history.history['acc']) # plt.plot(history.history['val_acc'])
plt_title = str(alpha)+'-mobilenet-'+str(inputsize)+' trained on small dataset'
plt_legend = method + ', {} classes'.format(numclass)+', batch size ={}'.format(batch_size)
plt.title(plt_title)
plt.ylabel('Model accuracy')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='lower right')
pp.savefig()
plt.show()
# summarize history for loss
plt.plot(history.history['loss']) #plt.plot(history.history['val_loss'])
plt.title(plt_title)
plt.ylabel('Model loss')
plt.xlabel('Epoch')
plt.legend([plt_legend], loc='upper left') #plt.legend(['train', 'test'], loc='upper left')
pp.savefig()
plt.show()
pp.close()
# save trained weights
model.save_weights(save_name +'.h5')
| mit |
endolith/scipy | scipy/stats/stats.py | 5 | 316939 | # Copyright 2002 Gary Strangman. All rights reserved
# Copyright 2002-2016 The SciPy Developers
#
# The original code from Gary Strangman was heavily adapted for
# use in SciPy by Travis Oliphant. The original code came with the
# following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Gary Strangman be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
"""
A collection of basic statistical functions for Python.
References
----------
.. [CRCProbStat2000] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
"""
import warnings
import math
from math import gcd
from collections import namedtuple
import numpy as np
from numpy import array, asarray, ma
from scipy.spatial.distance import cdist
from scipy.ndimage import measurements
from scipy._lib._util import (check_random_state, MapWrapper,
rng_integers, float_factorial)
import scipy.special as special
from scipy import linalg
from . import distributions
from . import mstats_basic
from ._stats_mstats_common import (_find_repeats, linregress, theilslopes,
siegelslopes)
from ._stats import (_kendall_dis, _toint64, _weightedrankedtau,
_local_correlations)
from dataclasses import make_dataclass
from ._hypotests import _all_partitions
# Functions/classes in other files should be added in `__init__.py`, not here
__all__ = ['find_repeats', 'gmean', 'hmean', 'mode', 'tmean', 'tvar',
'tmin', 'tmax', 'tstd', 'tsem', 'moment', 'variation',
'skew', 'kurtosis', 'describe', 'skewtest', 'kurtosistest',
'normaltest', 'jarque_bera', 'itemfreq',
'scoreatpercentile', 'percentileofscore',
'cumfreq', 'relfreq', 'obrientransform',
'sem', 'zmap', 'zscore', 'iqr', 'gstd', 'median_absolute_deviation',
'median_abs_deviation',
'sigmaclip', 'trimboth', 'trim1', 'trim_mean',
'f_oneway', 'F_onewayConstantInputWarning',
'F_onewayBadInputSizesWarning',
'PearsonRConstantInputWarning', 'PearsonRNearConstantInputWarning',
'pearsonr', 'fisher_exact',
'SpearmanRConstantInputWarning', 'spearmanr', 'pointbiserialr',
'kendalltau', 'weightedtau', 'multiscale_graphcorr',
'linregress', 'siegelslopes', 'theilslopes', 'ttest_1samp',
'ttest_ind', 'ttest_ind_from_stats', 'ttest_rel',
'kstest', 'ks_1samp', 'ks_2samp',
'chisquare', 'power_divergence',
'tiecorrect', 'ranksums', 'kruskal', 'friedmanchisquare',
'rankdata',
'combine_pvalues', 'wasserstein_distance', 'energy_distance',
'brunnermunzel', 'alexandergovern']
def _contains_nan(a, nan_policy='propagate'):
policies = ['propagate', 'raise', 'omit']
if nan_policy not in policies:
raise ValueError("nan_policy must be one of {%s}" %
', '.join("'%s'" % s for s in policies))
try:
# Calling np.sum to avoid creating a huge array into memory
# e.g. np.isnan(a).any()
with np.errstate(invalid='ignore'):
contains_nan = np.isnan(np.sum(a))
except TypeError:
# This can happen when attempting to sum things which are not
# numbers (e.g. as in the function `mode`). Try an alternative method:
try:
contains_nan = np.nan in set(a.ravel())
except TypeError:
# Don't know what to do. Fall back to omitting nan values and
# issue a warning.
contains_nan = False
nan_policy = 'omit'
warnings.warn("The input array could not be properly "
"checked for nan values. nan values "
"will be ignored.", RuntimeWarning)
if contains_nan and nan_policy == 'raise':
raise ValueError("The input contains nan values")
return contains_nan, nan_policy
def _chk_asarray(a, axis):
if axis is None:
a = np.ravel(a)
outaxis = 0
else:
a = np.asarray(a)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
return a, outaxis
def _chk2_asarray(a, b, axis):
if axis is None:
a = np.ravel(a)
b = np.ravel(b)
outaxis = 0
else:
a = np.asarray(a)
b = np.asarray(b)
outaxis = axis
if a.ndim == 0:
a = np.atleast_1d(a)
if b.ndim == 0:
b = np.atleast_1d(b)
return a, b, outaxis
def _shape_with_dropped_axis(a, axis):
"""
Given an array `a` and an integer `axis`, return the shape
of `a` with the `axis` dimension removed.
Examples
--------
>>> a = np.zeros((3, 5, 2))
>>> _shape_with_dropped_axis(a, 1)
(3, 2)
"""
shp = list(a.shape)
try:
del shp[axis]
except IndexError:
raise np.AxisError(axis, a.ndim) from None
return tuple(shp)
def _broadcast_shapes(shape1, shape2):
"""
Given two shapes (i.e. tuples of integers), return the shape
that would result from broadcasting two arrays with the given
shapes.
Examples
--------
>>> _broadcast_shapes((2, 1), (4, 1, 3))
(4, 2, 3)
"""
d = len(shape1) - len(shape2)
if d <= 0:
shp1 = (1,)*(-d) + shape1
shp2 = shape2
else:
shp1 = shape1
shp2 = (1,)*d + shape2
shape = []
for n1, n2 in zip(shp1, shp2):
if n1 == 1:
n = n2
elif n2 == 1 or n1 == n2:
n = n1
else:
raise ValueError(f'shapes {shape1} and {shape2} could not be '
'broadcast together')
shape.append(n)
return tuple(shape)
def _broadcast_shapes_with_dropped_axis(a, b, axis):
"""
Given two arrays `a` and `b` and an integer `axis`, find the
shape of the broadcast result after dropping `axis` from the
shapes of `a` and `b`.
Examples
--------
>>> a = np.zeros((5, 2, 1))
>>> b = np.zeros((1, 9, 3))
>>> _broadcast_shapes_with_dropped_axis(a, b, 1)
(5, 3)
"""
shp1 = _shape_with_dropped_axis(a, axis)
shp2 = _shape_with_dropped_axis(b, axis)
try:
shp = _broadcast_shapes(shp1, shp2)
except ValueError:
raise ValueError(f'non-axis shapes {shp1} and {shp2} could not be '
'broadcast together') from None
return shp
def gmean(a, axis=0, dtype=None, weights=None):
"""Compute the geometric mean along the specified axis.
Return the geometric average of the array elements.
That is: n-th root of (x1 * x2 * ... * xn)
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the geometric mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If dtype is not specified, it defaults to the
dtype of a, unless a has an integer dtype with a precision less than
that of the default platform integer. In that case, the default
platform integer is used.
weights : array_like, optional
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given `axis`) or of the same shape as `a`.
Default is None, which gives each value a weight of 1.0.
Returns
-------
gmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
hmean : Harmonic mean
Notes
-----
The geometric average is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity because masked
arrays automatically mask any non-finite values.
References
----------
.. [1] "Weighted Geometric Mean", *Wikipedia*, https://en.wikipedia.org/wiki/Weighted_geometric_mean.
Examples
--------
>>> from scipy.stats import gmean
>>> gmean([1, 4])
2.0
>>> gmean([1, 2, 3, 4, 5, 6, 7])
3.3800151591412964
"""
if not isinstance(a, np.ndarray):
# if not an ndarray object attempt to convert it
log_a = np.log(np.array(a, dtype=dtype))
elif dtype:
# Must change the default dtype allowing array type
if isinstance(a, np.ma.MaskedArray):
log_a = np.log(np.ma.asarray(a, dtype=dtype))
else:
log_a = np.log(np.asarray(a, dtype=dtype))
else:
log_a = np.log(a)
if weights is not None:
weights = np.asanyarray(weights, dtype=dtype)
return np.exp(np.average(log_a, axis=axis, weights=weights))
def hmean(a, axis=0, dtype=None):
"""Calculate the harmonic mean along the specified axis.
That is: n / (1/x1 + 1/x2 + ... + 1/xn)
Parameters
----------
a : array_like
Input array, masked array or object that can be converted to an array.
axis : int or None, optional
Axis along which the harmonic mean is computed. Default is 0.
If None, compute over the whole array `a`.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults to the
dtype of `a`, unless `a` has an integer `dtype` with a precision less
than that of the default platform integer. In that case, the default
platform integer is used.
Returns
-------
hmean : ndarray
See `dtype` parameter above.
See Also
--------
numpy.mean : Arithmetic average
numpy.average : Weighted average
gmean : Geometric mean
Notes
-----
The harmonic mean is computed over a single dimension of the input
array, axis=0 by default, or all values in the array if axis=None.
float64 intermediate and return values are used for integer inputs.
Use masked arrays to ignore any non-finite values in the input or that
arise in the calculations such as Not a Number and infinity.
Examples
--------
>>> from scipy.stats import hmean
>>> hmean([1, 4])
1.6000000000000001
>>> hmean([1, 2, 3, 4, 5, 6, 7])
2.6997245179063363
"""
if not isinstance(a, np.ndarray):
a = np.array(a, dtype=dtype)
if np.all(a >= 0):
# Harmonic mean only defined if greater than or equal to to zero.
if isinstance(a, np.ma.MaskedArray):
size = a.count(axis)
else:
if axis is None:
a = a.ravel()
size = a.shape[0]
else:
size = a.shape[axis]
with np.errstate(divide='ignore'):
return size / np.sum(1.0 / a, axis=axis, dtype=dtype)
else:
raise ValueError("Harmonic mean only defined if all elements greater "
"than or equal to zero")
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
def mode(a, axis=0, nan_policy='propagate'):
"""Return an array of the modal (most common) value in the passed array.
If there is more than one such value, only the smallest is returned.
The bin-count for the modal bins is also returned.
Parameters
----------
a : array_like
n-dimensional array of which to find mode(s).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mode : ndarray
Array of modal values.
count : ndarray
Array of counts for each mode.
Examples
--------
>>> a = np.array([[6, 8, 3, 0],
... [3, 2, 1, 7],
... [8, 1, 8, 4],
... [5, 3, 0, 5],
... [4, 7, 5, 9]])
>>> from scipy import stats
>>> stats.mode(a)
ModeResult(mode=array([[3, 1, 0, 0]]), count=array([[1, 1, 1, 1]]))
To get mode of whole array, specify ``axis=None``:
>>> stats.mode(a, axis=None)
ModeResult(mode=array([3]), count=array([3]))
"""
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return ModeResult(np.array([]), np.array([]))
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.mode(a, axis)
if a.dtype == object and np.nan in set(a.ravel()):
# Fall back to a slower method since np.unique does not work with NaN
scores = set(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape, dtype=a.dtype)
oldcounts = np.zeros(testshape, dtype=int)
for score in scores:
template = (a == score)
counts = np.sum(template, axis, keepdims=True)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return ModeResult(mostfrequent, oldcounts)
def _mode1D(a):
vals, cnts = np.unique(a, return_counts=True)
return vals[cnts.argmax()], cnts.max()
# np.apply_along_axis will convert the _mode1D tuples to a numpy array,
# casting types in the process.
# This recreates the results without that issue
# View of a, rotated so the requested axis is last
in_dims = list(range(a.ndim))
a_view = np.transpose(a, in_dims[:axis] + in_dims[axis+1:] + [axis])
inds = np.ndindex(a_view.shape[:-1])
modes = np.empty(a_view.shape[:-1], dtype=a.dtype)
counts = np.empty(a_view.shape[:-1], dtype=np.int_)
for ind in inds:
modes[ind], counts[ind] = _mode1D(a_view[ind])
newshape = list(a.shape)
newshape[axis] = 1
return ModeResult(modes.reshape(newshape), counts.reshape(newshape))
def _mask_to_limits(a, limits, inclusive):
"""Mask an array for values outside of given limits.
This is primarily a utility function.
Parameters
----------
a : array
limits : (float or None, float or None)
A tuple consisting of the (lower limit, upper limit). Values in the
input array less than the lower limit or greater than the upper limit
will be masked out. None implies no limit.
inclusive : (bool, bool)
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to lower or upper are allowed.
Returns
-------
A MaskedArray.
Raises
------
A ValueError if there are no values within the given limits.
"""
lower_limit, upper_limit = limits
lower_include, upper_include = inclusive
am = ma.MaskedArray(a)
if lower_limit is not None:
if lower_include:
am = ma.masked_less(am, lower_limit)
else:
am = ma.masked_less_equal(am, lower_limit)
if upper_limit is not None:
if upper_include:
am = ma.masked_greater(am, upper_limit)
else:
am = ma.masked_greater_equal(am, upper_limit)
if am.count() == 0:
raise ValueError("No array values within given limits")
return am
def tmean(a, limits=None, inclusive=(True, True), axis=None):
"""Compute the trimmed mean.
This function finds the arithmetic mean of given values, ignoring values
outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None (default), then all
values are used. Either of the limit values in the tuple can also be
None representing a half-open interval.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to compute test. Default is None.
Returns
-------
tmean : float
Trimmed mean.
See Also
--------
trim_mean : Returns mean after trimming a proportion from both tails.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmean(x)
9.5
>>> stats.tmean(x, (3,17))
10.0
"""
a = asarray(a)
if limits is None:
return np.mean(a, None)
am = _mask_to_limits(a.ravel(), limits, inclusive)
return am.mean(axis=axis)
def tvar(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed variance.
This function computes the sample variance of an array of values,
while ignoring values which are outside of given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tvar : float
Trimmed variance.
Notes
-----
`tvar` computes the unbiased sample variance, i.e. it uses a correction
factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tvar(x)
35.0
>>> stats.tvar(x, (3,17))
20.0
"""
a = asarray(a)
a = a.astype(float)
if limits is None:
return a.var(ddof=ddof, axis=axis)
am = _mask_to_limits(a, limits, inclusive)
amnan = am.filled(fill_value=np.nan)
return np.nanvar(amnan, ddof=ddof, axis=axis)
def tmin(a, lowerlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed minimum.
This function finds the miminum value of an array `a` along the
specified axis, but only considering values greater than a specified
lower limit.
Parameters
----------
a : array_like
Array of values.
lowerlimit : None or float, optional
Values in the input array less than the given limit will be ignored.
When lowerlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the lower limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmin : float, int or ndarray
Trimmed minimum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmin(x)
0
>>> stats.tmin(x, 13)
13
>>> stats.tmin(x, 13, inclusive=False)
14
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (lowerlimit, None), (inclusive, False))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.minimum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tmax(a, upperlimit=None, axis=0, inclusive=True, nan_policy='propagate'):
"""Compute the trimmed maximum.
This function computes the maximum value of an array along a given axis,
while ignoring values larger than a specified upper limit.
Parameters
----------
a : array_like
Array of values.
upperlimit : None or float, optional
Values in the input array greater than the given limit will be ignored.
When upperlimit is None, then all values are used. The default value
is None.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
inclusive : {True, False}, optional
This flag determines whether values exactly equal to the upper limit
are included. The default value is True.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
tmax : float, int or ndarray
Trimmed maximum.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tmax(x)
19
>>> stats.tmax(x, 13)
13
>>> stats.tmax(x, 13, inclusive=False)
12
"""
a, axis = _chk_asarray(a, axis)
am = _mask_to_limits(a, (None, upperlimit), (False, inclusive))
contains_nan, nan_policy = _contains_nan(am, nan_policy)
if contains_nan and nan_policy == 'omit':
am = ma.masked_invalid(am)
res = ma.maximum.reduce(am, axis).data
if res.ndim == 0:
return res[()]
return res
def tstd(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed sample standard deviation.
This function finds the sample standard deviation of given values,
ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tstd : float
Trimmed sample standard deviation.
Notes
-----
`tstd` computes the unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tstd(x)
5.9160797830996161
>>> stats.tstd(x, (3,17))
4.4721359549995796
"""
return np.sqrt(tvar(a, limits, inclusive, axis, ddof))
def tsem(a, limits=None, inclusive=(True, True), axis=0, ddof=1):
"""Compute the trimmed standard error of the mean.
This function finds the standard error of the mean for given
values, ignoring values outside the given `limits`.
Parameters
----------
a : array_like
Array of values.
limits : None or (lower limit, upper limit), optional
Values in the input array less than the lower limit or greater than the
upper limit will be ignored. When limits is None, then all values are
used. Either of the limit values in the tuple can also be None
representing a half-open interval. The default value is None.
inclusive : (bool, bool), optional
A tuple consisting of the (lower flag, upper flag). These flags
determine whether values exactly equal to the lower or upper limits
are included. The default value is (True, True).
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over the
whole array `a`.
ddof : int, optional
Delta degrees of freedom. Default is 1.
Returns
-------
tsem : float
Trimmed standard error of the mean.
Notes
-----
`tsem` uses unbiased sample standard deviation, i.e. it uses a
correction factor ``n / (n - 1)``.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.tsem(x)
1.3228756555322954
>>> stats.tsem(x, (3,17))
1.1547005383792515
"""
a = np.asarray(a).ravel()
if limits is None:
return a.std(ddof=ddof) / np.sqrt(a.size)
am = _mask_to_limits(a, limits, inclusive)
sd = np.sqrt(np.ma.var(am, ddof=ddof, axis=axis))
return sd / np.sqrt(am.count())
#####################################
# MOMENTS #
#####################################
def moment(a, moment=1, axis=0, nan_policy='propagate'):
r"""Calculate the nth moment about the mean for a sample.
A moment is a specific quantitative measure of the shape of a set of
points. It is often used to calculate coefficients of skewness and kurtosis
due to its close relationship with them.
Parameters
----------
a : array_like
Input array.
moment : int or array_like of ints, optional
Order of central moment that is returned. Default is 1.
axis : int or None, optional
Axis along which the central moment is computed. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
n-th central moment : ndarray or float
The appropriate moment along the given axis or over all values if axis
is None. The denominator for the moment calculation is the number of
observations, no degrees of freedom correction is done.
See Also
--------
kurtosis, skew, describe
Notes
-----
The k-th central moment of a data sample is:
.. math::
m_k = \frac{1}{n} \sum_{i = 1}^n (x_i - \bar{x})^k
Where n is the number of samples and x-bar is the mean. This function uses
exponentiation by squares [1]_ for efficiency.
References
----------
.. [1] https://eli.thegreenplace.net/2009/03/21/efficient-integer-exponentiation-algorithms
Examples
--------
>>> from scipy.stats import moment
>>> moment([1, 2, 3, 4, 5], moment=1)
0.0
>>> moment([1, 2, 3, 4, 5], moment=2)
2.0
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.moment(a, moment, axis)
if a.size == 0:
moment_shape = list(a.shape)
del moment_shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
# empty array, return nan(s) with shape matching `moment`
out_shape = (moment_shape if np.isscalar(moment)
else [len(moment)] + moment_shape)
if len(out_shape) == 0:
return dtype(np.nan)
else:
return np.full(out_shape, np.nan, dtype=dtype)
# for array_like moment input, return a value for each.
if not np.isscalar(moment):
mean = a.mean(axis, keepdims=True)
mmnt = [_moment(a, i, axis, mean=mean) for i in moment]
return np.array(mmnt)
else:
return _moment(a, moment, axis)
# Moment with optional pre-computed mean, equal to a.mean(axis, keepdims=True)
def _moment(a, moment, axis, *, mean=None):
if np.abs(moment - np.round(moment)) > 0:
raise ValueError("All moment parameters must be integers")
if moment == 0 or moment == 1:
# By definition the zeroth moment about the mean is 1, and the first
# moment is 0.
shape = list(a.shape)
del shape[axis]
dtype = a.dtype.type if a.dtype.kind in 'fc' else np.float64
if len(shape) == 0:
return dtype(1.0 if moment == 0 else 0.0)
else:
return (np.ones(shape, dtype=dtype) if moment == 0
else np.zeros(shape, dtype=dtype))
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n - 1) / 2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
mean = a.mean(axis, keepdims=True) if mean is None else mean
a_zero_mean = a - mean
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return np.mean(s, axis)
def variation(a, axis=0, nan_policy='propagate', ddof=0):
"""Compute the coefficient of variation.
The coefficient of variation is the standard deviation divided by the
mean. This function is equivalent to::
np.std(x, axis=axis, ddof=ddof) / np.mean(x)
The default for ``ddof`` is 0, but many definitions of the coefficient
of variation use the square root of the unbiased sample variance
for the sample standard deviation, which corresponds to ``ddof=1``.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate the coefficient of variation. Default
is 0. If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
ddof : int, optional
Delta degrees of freedom. Default is 0.
Returns
-------
variation : ndarray
The calculated variation along the requested axis.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
>>> from scipy.stats import variation
>>> variation([1, 2, 3, 4, 5])
0.47140452079103173
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.variation(a, axis, ddof)
return a.std(axis, ddof=ddof) / a.mean(axis)
def skew(a, axis=0, bias=True, nan_policy='propagate'):
r"""Compute the sample skewness of a data set.
For normally distributed data, the skewness should be about zero. For
unimodal continuous distributions, a skewness value greater than zero means
that there is more weight in the right tail of the distribution. The
function `skewtest` can be used to determine if the skewness value
is close enough to zero, statistically speaking.
Parameters
----------
a : ndarray
Input array.
axis : int or None, optional
Axis along which skewness is calculated. Default is 0.
If None, compute over the whole array `a`.
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
skewness : ndarray
The skewness of values along an axis, returning 0 where all values are
equal.
Notes
-----
The sample skewness is computed as the Fisher-Pearson coefficient
of skewness, i.e.
.. math::
g_1=\frac{m_3}{m_2^{3/2}}
where
.. math::
m_i=\frac{1}{N}\sum_{n=1}^N(x[n]-\bar{x})^i
is the biased sample :math:`i\texttt{th}` central moment, and
:math:`\bar{x}` is
the sample mean. If ``bias`` is False, the calculations are
corrected for bias and the value computed is the adjusted
Fisher-Pearson standardized moment coefficient, i.e.
.. math::
G_1=\frac{k_3}{k_2^{3/2}}=
\frac{\sqrt{N(N-1)}}{N-2}\frac{m_3}{m_2^{3/2}}.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 2.2.24.1
Examples
--------
>>> from scipy.stats import skew
>>> skew([1, 2, 3, 4, 5])
0.0
>>> skew([2, 8, 0, 4, 1, 9, 9, 0])
0.2650554122698573
"""
a, axis = _chk_asarray(a, axis)
n = a.shape[axis]
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skew(a, axis, bias)
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m3 = _moment(a, 3, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m3 / m2**1.5)
if not bias:
can_correct = ~zero & (n > 2)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = np.sqrt((n - 1.0) * n) / (n - 2.0) * m3 / m2**1.5
np.place(vals, can_correct, nval)
if vals.ndim == 0:
return vals.item()
return vals
def kurtosis(a, axis=0, fisher=True, bias=True, nan_policy='propagate'):
"""Compute the kurtosis (Fisher or Pearson) of a dataset.
Kurtosis is the fourth central moment divided by the square of the
variance. If Fisher's definition is used, then 3.0 is subtracted from
the result to give 0.0 for a normal distribution.
If bias is False then the kurtosis is calculated using k statistics to
eliminate bias coming from biased moment estimators
Use `kurtosistest` to see if result is close enough to normal.
Parameters
----------
a : array
Data for which the kurtosis is calculated.
axis : int or None, optional
Axis along which the kurtosis is calculated. Default is 0.
If None, compute over the whole array `a`.
fisher : bool, optional
If True, Fisher's definition is used (normal ==> 0.0). If False,
Pearson's definition is used (normal ==> 3.0).
bias : bool, optional
If False, then the calculations are corrected for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'.
Returns
-------
kurtosis : array
The kurtosis of values along an axis. If all values are equal,
return -3 for Fisher's definition and 0 for Pearson's definition.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Examples
--------
In Fisher's definiton, the kurtosis of the normal distribution is zero.
In the following example, the kurtosis is close to zero, because it was
calculated from the dataset, not from the continuous distribution.
>>> from scipy.stats import norm, kurtosis
>>> data = norm.rvs(size=1000, random_state=3)
>>> kurtosis(data)
-0.06928694200380558
The distribution with a higher kurtosis has a heavier tail.
The zero valued kurtosis of the normal distribution in Fisher's definition
can serve as a reference point.
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> from scipy.stats import kurtosis
>>> x = np.linspace(-5, 5, 100)
>>> ax = plt.subplot()
>>> distnames = ['laplace', 'norm', 'uniform']
>>> for distname in distnames:
... if distname == 'uniform':
... dist = getattr(stats, distname)(loc=-2, scale=4)
... else:
... dist = getattr(stats, distname)
... data = dist.rvs(size=1000)
... kur = kurtosis(data, fisher=True)
... y = dist.pdf(x)
... ax.plot(x, y, label="{}, {}".format(distname, round(kur, 3)))
... ax.legend()
The Laplace distribution has a heavier tail than the normal distribution.
The uniform distribution (which has negative kurtosis) has the thinnest
tail.
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosis(a, axis, fisher, bias)
n = a.shape[axis]
mean = a.mean(axis, keepdims=True)
m2 = _moment(a, 2, axis, mean=mean)
m4 = _moment(a, 4, axis, mean=mean)
with np.errstate(all='ignore'):
zero = (m2 <= (np.finfo(m2.dtype).resolution * mean.squeeze(axis))**2)
vals = np.where(zero, 0, m4 / m2**2.0)
if not bias:
can_correct = ~zero & (n > 3)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3) * ((n**2-1.0)*m4/m2**2.0 - 3*(n-1)**2.0)
np.place(vals, can_correct, nval + 3.0)
if vals.ndim == 0:
vals = vals.item() # array scalar
return vals - 3 if fisher else vals
DescribeResult = namedtuple('DescribeResult',
('nobs', 'minmax', 'mean', 'variance', 'skewness',
'kurtosis'))
def describe(a, axis=0, ddof=1, bias=True, nan_policy='propagate'):
"""Compute several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Input data.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
ddof : int, optional
Delta degrees of freedom (only for variance). Default is 1.
bias : bool, optional
If False, then the skewness and kurtosis calculations are corrected
for statistical bias.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
nobs : int or ndarray of ints
Number of observations (length of data along `axis`).
When 'omit' is chosen as nan_policy, the length along each axis
slice is counted separately.
minmax: tuple of ndarrays or floats
Minimum and maximum value of `a` along the given axis.
mean : ndarray or float
Arithmetic mean of `a` along the given axis.
variance : ndarray or float
Unbiased variance of `a` along the given axis; denominator is number
of observations minus one.
skewness : ndarray or float
Skewness of `a` along the given axis, based on moment calculations
with denominator equal to the number of observations, i.e. no degrees
of freedom correction.
kurtosis : ndarray or float
Kurtosis (Fisher) of `a` along the given axis. The kurtosis is
normalized so that it is zero for the normal distribution. No
degrees of freedom are used.
See Also
--------
skew, kurtosis
Examples
--------
>>> from scipy import stats
>>> a = np.arange(10)
>>> stats.describe(a)
DescribeResult(nobs=10, minmax=(0, 9), mean=4.5,
variance=9.166666666666666, skewness=0.0,
kurtosis=-1.2242424242424244)
>>> b = [[1, 2], [3, 4]]
>>> stats.describe(b)
DescribeResult(nobs=2, minmax=(array([1, 2]), array([3, 4])),
mean=array([2., 3.]), variance=array([2., 2.]),
skewness=array([0., 0.]), kurtosis=array([-2., -2.]))
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.describe(a, axis, ddof, bias)
if a.size == 0:
raise ValueError("The input must not be empty.")
n = a.shape[axis]
mm = (np.min(a, axis=axis), np.max(a, axis=axis))
m = np.mean(a, axis=axis)
v = np.var(a, axis=axis, ddof=ddof)
sk = skew(a, axis, bias=bias)
kurt = kurtosis(a, axis, bias=bias)
return DescribeResult(n, mm, m, v, sk, kurt)
#####################################
# NORMALITY TESTS #
#####################################
def _normtest_finish(z, alternative):
"""Common code between all the normality-test functions."""
if alternative == 'less':
prob = distributions.norm.cdf(z)
elif alternative == 'greater':
prob = distributions.norm.sf(z)
elif alternative == 'two-sided':
prob = 2 * distributions.norm.sf(np.abs(z))
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if z.ndim == 0:
z = z[()]
return z, prob
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
def skewtest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether the skew is different from the normal distribution.
This function tests the null hypothesis that the skewness of
the population that the sample was drawn from is the same
as that of a corresponding normal distribution.
Parameters
----------
a : array
The data to be tested.
axis : int or None, optional
Axis along which statistics are calculated. Default is 0.
If None, compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the skewness of the distribution underlying the sample
is different from that of the normal distribution (i.e. 0)
* 'less': the skewness of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the skewness of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
The sample size must be at least 8.
References
----------
.. [1] R. B. D'Agostino, A. J. Belanger and R. B. D'Agostino Jr.,
"A suggestion for using powerful and informative tests of
normality", American Statistician 44, pp. 316-321, 1990.
Examples
--------
>>> from scipy.stats import skewtest
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8])
SkewtestResult(statistic=1.0108048609177787, pvalue=0.3121098361421897)
>>> skewtest([2, 8, 0, 4, 1, 9, 9, 0])
SkewtestResult(statistic=0.44626385374196975, pvalue=0.6554066631275459)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8000])
SkewtestResult(statistic=3.571773510360407, pvalue=0.0003545719905823133)
>>> skewtest([100, 100, 100, 100, 100, 100, 100, 101])
SkewtestResult(statistic=3.5717766638478072, pvalue=0.000354567720281634)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='less')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.8439450819289052)
>>> skewtest([1, 2, 3, 4, 5, 6, 7, 8], alternative='greater')
SkewtestResult(statistic=1.0108048609177787, pvalue=0.15605491807109484)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.skewtest(a, axis, alternative)
if axis is None:
a = np.ravel(a)
axis = 0
b2 = skew(a, axis)
n = a.shape[axis]
if n < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % int(n))
y = b2 * math.sqrt(((n + 1) * (n + 3)) / (6.0 * (n - 2)))
beta2 = (3.0 * (n**2 + 27*n - 70) * (n+1) * (n+3) /
((n-2.0) * (n+5) * (n+7) * (n+9)))
W2 = -1 + math.sqrt(2 * (beta2 - 1))
delta = 1 / math.sqrt(0.5 * math.log(W2))
alpha = math.sqrt(2.0 / (W2 - 1))
y = np.where(y == 0, 1, y)
Z = delta * np.log(y / alpha + np.sqrt((y / alpha)**2 + 1))
return SkewtestResult(*_normtest_finish(Z, alternative))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic', 'pvalue'))
def kurtosistest(a, axis=0, nan_policy='propagate', alternative='two-sided'):
"""Test whether a dataset has normal kurtosis.
This function tests the null hypothesis that the kurtosis
of the population from which the sample was drawn is that
of the normal distribution.
Parameters
----------
a : array
Array of the sample data.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the kurtosis of the distribution underlying the sample
is different from that of the normal distribution
* 'less': the kurtosis of the distribution underlying the sample
is less than that of the normal distribution
* 'greater': the kurtosis of the distribution underlying the sample
is greater than that of the normal distribution
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The computed z-score for this test.
pvalue : float
The p-value for the hypothesis test.
Notes
-----
Valid only for n>20. This function uses the method described in [1]_.
References
----------
.. [1] see e.g. F. J. Anscombe, W. J. Glynn, "Distribution of the kurtosis
statistic b2 for normal samples", Biometrika, vol. 70, pp. 227-234, 1983.
Examples
--------
>>> from scipy.stats import kurtosistest
>>> kurtosistest(list(range(20)))
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
>>> kurtosistest(list(range(20)), alternative='less')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
>>> kurtosistest(list(range(20)), alternative='greater')
KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
>>> rng = np.random.default_rng()
>>> s = rng.normal(0, 1, 1000)
>>> kurtosistest(s)
KurtosistestResult(statistic=-1.475047944490622, pvalue=0.14019965402996987)
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.kurtosistest(a, axis, alternative)
n = a.shape[axis]
if n < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % int(n))
if n < 20:
warnings.warn("kurtosistest only valid for n>=20 ... continuing "
"anyway, n=%i" % int(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5)) # [1]_ Eq. 1
x = (b2-E) / np.sqrt(varb2) # [1]_ Eq. 4
# [1]_ Eq. 2:
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
# [1]_ Eq. 3:
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2/(9.0*A)
denom = 1 + x*np.sqrt(2/(A-4.0))
term2 = np.sign(denom) * np.where(denom == 0.0, np.nan,
np.power((1-2.0/A)/np.abs(denom), 1/3.0))
if np.any(denom == 0):
msg = "Test statistic not defined in some cases due to division by " \
"zero. Return nan in that case..."
warnings.warn(msg, RuntimeWarning)
Z = (term1 - term2) / np.sqrt(2/(9.0*A)) # [1]_ Eq. 5
# zprob uses upper tail, so Z needs to be positive
return KurtosistestResult(*_normtest_finish(Z, alternative))
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
def normaltest(a, axis=0, nan_policy='propagate'):
"""Test whether a sample differs from a normal distribution.
This function tests the null hypothesis that a sample comes
from a normal distribution. It is based on D'Agostino and
Pearson's [1]_, [2]_ test that combines skew and kurtosis to
produce an omnibus test of normality.
Parameters
----------
a : array_like
The array containing the sample to be tested.
axis : int or None, optional
Axis along which to compute test. Default is 0. If None,
compute over the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float or array
``s^2 + k^2``, where ``s`` is the z-score returned by `skewtest` and
``k`` is the z-score returned by `kurtosistest`.
pvalue : float or array
A 2-sided chi squared probability for the hypothesis test.
References
----------
.. [1] D'Agostino, R. B. (1971), "An omnibus test of normality for
moderate and large sample size", Biometrika, 58, 341-348
.. [2] D'Agostino, R. and Pearson, E. S. (1973), "Tests for departure from
normality", Biometrika, 60, 613-622
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> pts = 1000
>>> a = rng.normal(0, 1, size=pts)
>>> b = rng.normal(2, 1, size=pts)
>>> x = np.concatenate((a, b))
>>> k2, p = stats.normaltest(x)
>>> alpha = 1e-3
>>> print("p = {:g}".format(p))
p = 8.4713e-19
>>> if p < alpha: # null hypothesis: x comes from a normal distribution
... print("The null hypothesis can be rejected")
... else:
... print("The null hypothesis cannot be rejected")
The null hypothesis can be rejected
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.normaltest(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
return NormaltestResult(k2, distributions.chi2.sf(k2, 2))
Jarque_beraResult = namedtuple('Jarque_beraResult', ('statistic', 'pvalue'))
def jarque_bera(x):
"""Perform the Jarque-Bera goodness of fit test on sample data.
The Jarque-Bera test tests whether the sample data has the skewness and
kurtosis matching a normal distribution.
Note that this test only works for a large enough number of data samples
(>2000) as the test statistic asymptotically has a Chi-squared distribution
with 2 degrees of freedom.
Parameters
----------
x : array_like
Observations of a random variable.
Returns
-------
jb_value : float
The test statistic.
p : float
The p-value for the hypothesis test.
References
----------
.. [1] Jarque, C. and Bera, A. (1980) "Efficient tests for normality,
homoscedasticity and serial independence of regression residuals",
6 Econometric Letters 255-259.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = rng.normal(0, 1, 100000)
>>> jarque_bera_test = stats.jarque_bera(x)
>>> jarque_bera_test
Jarque_beraResult(statistic=3.3415184718131554, pvalue=0.18810419594996775)
>>> jarque_bera_test.statistic
3.3415184718131554
>>> jarque_bera_test.pvalue
0.18810419594996775
"""
x = np.asarray(x)
n = x.size
if n == 0:
raise ValueError('At least one observation is required.')
mu = x.mean()
diffx = x - mu
skewness = (1 / n * np.sum(diffx**3)) / (1 / n * np.sum(diffx**2))**(3 / 2.)
kurtosis = (1 / n * np.sum(diffx**4)) / (1 / n * np.sum(diffx**2))**2
jb_value = n / 6 * (skewness**2 + (kurtosis - 3)**2 / 4)
p = 1 - distributions.chi2.cdf(jb_value, 2)
return Jarque_beraResult(jb_value, p)
#####################################
# FREQUENCY FUNCTIONS #
#####################################
# deindent to work around numpy/gh-16202
@np.deprecate(
message="`itemfreq` is deprecated and will be removed in a "
"future version. Use instead `np.unique(..., return_counts=True)`")
def itemfreq(a):
"""
Return a 2-D array of item frequencies.
Parameters
----------
a : (N,) array_like
Input array.
Returns
-------
itemfreq : (K, 2) ndarray
A 2-D frequency table. Column 1 contains sorted, unique values from
`a`, column 2 contains their respective counts.
Examples
--------
>>> from scipy import stats
>>> a = np.array([1, 1, 5, 0, 1, 2, 2, 0, 1, 4])
>>> stats.itemfreq(a)
array([[ 0., 2.],
[ 1., 4.],
[ 2., 2.],
[ 4., 1.],
[ 5., 1.]])
>>> np.bincount(a)
array([2, 4, 2, 0, 1, 1])
>>> stats.itemfreq(a/10.)
array([[ 0. , 2. ],
[ 0.1, 4. ],
[ 0.2, 2. ],
[ 0.4, 1. ],
[ 0.5, 1. ]])
"""
items, inv = np.unique(a, return_inverse=True)
freq = np.bincount(inv)
return np.array([items, freq]).T
def scoreatpercentile(a, per, limit=(), interpolation_method='fraction',
axis=None):
"""Calculate the score at a given percentile of the input sequence.
For example, the score at `per=50` is the median. If the desired quantile
lies between two data points, we interpolate between them, according to
the value of `interpolation`. If the parameter `limit` is provided, it
should be a tuple (lower, upper) of two values.
Parameters
----------
a : array_like
A 1-D array of values from which to extract score.
per : array_like
Percentile(s) at which to extract score. Values should be in range
[0,100].
limit : tuple, optional
Tuple of two scalars, the lower and upper limits within which to
compute the percentile. Values of `a` outside
this (closed) interval will be ignored.
interpolation_method : {'fraction', 'lower', 'higher'}, optional
Specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`
The following options are available (default is 'fraction'):
* 'fraction': ``i + (j - i) * fraction`` where ``fraction`` is the
fractional part of the index surrounded by ``i`` and ``j``
* 'lower': ``i``
* 'higher': ``j``
axis : int, optional
Axis along which the percentiles are computed. Default is None. If
None, compute over the whole array `a`.
Returns
-------
score : float or ndarray
Score at percentile(s).
See Also
--------
percentileofscore, numpy.percentile
Notes
-----
This function will become obsolete in the future.
For NumPy 1.9 and higher, `numpy.percentile` provides all the functionality
that `scoreatpercentile` provides. And it's significantly faster.
Therefore it's recommended to use `numpy.percentile` for users that have
numpy >= 1.9.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(100)
>>> stats.scoreatpercentile(a, 50)
49.5
"""
# adapted from NumPy's percentile function. When we require numpy >= 1.8,
# the implementation of this function can be replaced by np.percentile.
a = np.asarray(a)
if a.size == 0:
# empty array, return nan(s) with shape matching `per`
if np.isscalar(per):
return np.nan
else:
return np.full(np.asarray(per).shape, np.nan, dtype=np.float64)
if limit:
a = a[(limit[0] <= a) & (a <= limit[1])]
sorted_ = np.sort(a, axis=axis)
if axis is None:
axis = 0
return _compute_qth_percentile(sorted_, per, interpolation_method, axis)
# handle sequence of per's without calling sort multiple times
def _compute_qth_percentile(sorted_, per, interpolation_method, axis):
if not np.isscalar(per):
score = [_compute_qth_percentile(sorted_, i,
interpolation_method, axis)
for i in per]
return np.array(score)
if not (0 <= per <= 100):
raise ValueError("percentile must be in the range [0, 100]")
indexer = [slice(None)] * sorted_.ndim
idx = per / 100. * (sorted_.shape[axis] - 1)
if int(idx) != idx:
# round fractional indices according to interpolation method
if interpolation_method == 'lower':
idx = int(np.floor(idx))
elif interpolation_method == 'higher':
idx = int(np.ceil(idx))
elif interpolation_method == 'fraction':
pass # keep idx as fraction and interpolate
else:
raise ValueError("interpolation_method can only be 'fraction', "
"'lower' or 'higher'")
i = int(idx)
if i == idx:
indexer[axis] = slice(i, i + 1)
weights = array(1)
sumval = 1.0
else:
indexer[axis] = slice(i, i + 2)
j = i + 1
weights = array([(j - idx), (idx - i)], float)
wshape = [1] * sorted_.ndim
wshape[axis] = 2
weights.shape = wshape
sumval = weights.sum()
# Use np.add.reduce (== np.sum but a little faster) to coerce data type
return np.add.reduce(sorted_[tuple(indexer)] * weights, axis=axis) / sumval
def percentileofscore(a, score, kind='rank'):
"""Compute the percentile rank of a score relative to a list of scores.
A `percentileofscore` of, for example, 80% means that 80% of the
scores in `a` are below the given score. In the case of gaps or
ties, the exact definition depends on the optional keyword, `kind`.
Parameters
----------
a : array_like
Array of scores to which `score` is compared.
score : int or float
Score that is compared to the elements in `a`.
kind : {'rank', 'weak', 'strict', 'mean'}, optional
Specifies the interpretation of the resulting score.
The following options are available (default is 'rank'):
* 'rank': Average percentage ranking of score. In case of multiple
matches, average the percentage rankings of all matching scores.
* 'weak': This kind corresponds to the definition of a cumulative
distribution function. A percentileofscore of 80% means that 80%
of values are less than or equal to the provided score.
* 'strict': Similar to "weak", except that only values that are
strictly less than the given score are counted.
* 'mean': The average of the "weak" and "strict" scores, often used
in testing. See https://en.wikipedia.org/wiki/Percentile_rank
Returns
-------
pcos : float
Percentile-position of score (0-100) relative to `a`.
See Also
--------
numpy.percentile
Examples
--------
Three-quarters of the given values lie below a given score:
>>> from scipy import stats
>>> stats.percentileofscore([1, 2, 3, 4], 3)
75.0
With multiple matches, note how the scores of the two matches, 0.6
and 0.8 respectively, are averaged:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3)
70.0
Only 2/5 values are strictly less than 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='strict')
40.0
But 4/5 values are less than or equal to 3:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='weak')
80.0
The average between the weak and the strict scores is:
>>> stats.percentileofscore([1, 2, 3, 3, 4], 3, kind='mean')
60.0
"""
if np.isnan(score):
return np.nan
a = np.asarray(a)
n = len(a)
if n == 0:
return 100.0
if kind == 'rank':
left = np.count_nonzero(a < score)
right = np.count_nonzero(a <= score)
pct = (right + left + (1 if right > left else 0)) * 50.0/n
return pct
elif kind == 'strict':
return np.count_nonzero(a < score) / n * 100
elif kind == 'weak':
return np.count_nonzero(a <= score) / n * 100
elif kind == 'mean':
pct = (np.count_nonzero(a < score)
+ np.count_nonzero(a <= score)) / n * 50
return pct
else:
raise ValueError("kind can only be 'rank', 'strict', 'weak' or 'mean'")
HistogramResult = namedtuple('HistogramResult',
('count', 'lowerlimit', 'binsize', 'extrapoints'))
def _histogram(a, numbins=10, defaultlimits=None, weights=None,
printextras=False):
"""Create a histogram.
Separate the range into several bins and return the number of instances
in each bin.
Parameters
----------
a : array_like
Array of scores which will be put into bins.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultlimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
printextras : bool, optional
If True, if there are extra points (i.e. the points that fall outside
the bin limits) a warning is raised saying how many of those points
there are. Default is False.
Returns
-------
count : ndarray
Number of points (or sum of weights) in each bin.
lowerlimit : float
Lowest value of histogram, the lower limit of the first bin.
binsize : float
The size of the bins (all bins have the same size).
extrapoints : int
The number of points outside the range of the histogram.
See Also
--------
numpy.histogram
Notes
-----
This histogram is based on numpy's histogram but has a larger range by
default if default limits is not set.
"""
a = np.ravel(a)
if defaultlimits is None:
if a.size == 0:
# handle empty arrays. Undetermined range, so use 0-1.
defaultlimits = (0, 1)
else:
# no range given, so use values in `a`
data_min = a.min()
data_max = a.max()
# Have bins extend past min and max values slightly
s = (data_max - data_min) / (2. * (numbins - 1.))
defaultlimits = (data_min - s, data_max + s)
# use numpy's histogram method to compute bins
hist, bin_edges = np.histogram(a, bins=numbins, range=defaultlimits,
weights=weights)
# hist are not always floats, convert to keep with old output
hist = np.array(hist, dtype=float)
# fixed width for bins is assumed, as numpy's histogram gives
# fixed width bins for int values for 'bins'
binsize = bin_edges[1] - bin_edges[0]
# calculate number of extra points
extrapoints = len([v for v in a
if defaultlimits[0] > v or v > defaultlimits[1]])
if extrapoints > 0 and printextras:
warnings.warn("Points outside given histogram range = %s"
% extrapoints)
return HistogramResult(hist, defaultlimits[0], binsize, extrapoints)
CumfreqResult = namedtuple('CumfreqResult',
('cumcount', 'lowerlimit', 'binsize',
'extrapoints'))
def cumfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a cumulative frequency histogram, using the histogram function.
A cumulative histogram is a mapping that counts the cumulative number of
observations in all of the bins up to the specified bin.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in `a` is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
cumcount : ndarray
Binned values of cumulative frequency.
lowerlimit : float
Lower real limit
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> x = [1, 4, 2, 1, 3, 1]
>>> res = stats.cumfreq(x, numbins=4, defaultreallimits=(1.5, 5))
>>> res.cumcount
array([ 1., 2., 3., 3.])
>>> res.extrapoints
3
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate cumulative frequencies
>>> res = stats.cumfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.cumcount.size,
... res.cumcount.size)
Plot histogram and cumulative histogram
>>> fig = plt.figure(figsize=(10, 4))
>>> ax1 = fig.add_subplot(1, 2, 1)
>>> ax2 = fig.add_subplot(1, 2, 2)
>>> ax1.hist(samples, bins=25)
>>> ax1.set_title('Histogram')
>>> ax2.bar(x, res.cumcount, width=res.binsize)
>>> ax2.set_title('Cumulative histogram')
>>> ax2.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
cumhist = np.cumsum(h * 1, axis=0)
return CumfreqResult(cumhist, l, b, e)
RelfreqResult = namedtuple('RelfreqResult',
('frequency', 'lowerlimit', 'binsize',
'extrapoints'))
def relfreq(a, numbins=10, defaultreallimits=None, weights=None):
"""Return a relative frequency histogram, using the histogram function.
A relative frequency histogram is a mapping of the number of
observations in each of the bins relative to the total of observations.
Parameters
----------
a : array_like
Input array.
numbins : int, optional
The number of bins to use for the histogram. Default is 10.
defaultreallimits : tuple (lower, upper), optional
The lower and upper values for the range of the histogram.
If no value is given, a range slightly larger than the range of the
values in a is used. Specifically ``(a.min() - s, a.max() + s)``,
where ``s = (1/2)(a.max() - a.min()) / (numbins - 1)``.
weights : array_like, optional
The weights for each value in `a`. Default is None, which gives each
value a weight of 1.0
Returns
-------
frequency : ndarray
Binned values of relative frequency.
lowerlimit : float
Lower real limit.
binsize : float
Width of each bin.
extrapoints : int
Extra points.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from numpy.random import default_rng
>>> from scipy import stats
>>> rng = default_rng()
>>> a = np.array([2, 4, 1, 2, 3, 2])
>>> res = stats.relfreq(a, numbins=4)
>>> res.frequency
array([ 0.16666667, 0.5 , 0.16666667, 0.16666667])
>>> np.sum(res.frequency) # relative frequencies should add up to 1
1.0
Create a normal distribution with 1000 random values
>>> samples = stats.norm.rvs(size=1000, random_state=rng)
Calculate relative frequencies
>>> res = stats.relfreq(samples, numbins=25)
Calculate space of values for x
>>> x = res.lowerlimit + np.linspace(0, res.binsize*res.frequency.size,
... res.frequency.size)
Plot relative frequency histogram
>>> fig = plt.figure(figsize=(5, 4))
>>> ax = fig.add_subplot(1, 1, 1)
>>> ax.bar(x, res.frequency, width=res.binsize)
>>> ax.set_title('Relative frequency histogram')
>>> ax.set_xlim([x.min(), x.max()])
>>> plt.show()
"""
a = np.asanyarray(a)
h, l, b, e = _histogram(a, numbins, defaultreallimits, weights=weights)
h = h / a.shape[0]
return RelfreqResult(h, l, b, e)
#####################################
# VARIABILITY FUNCTIONS #
#####################################
def obrientransform(*args):
"""Compute the O'Brien transform on input data (any number of arrays).
Used to test for homogeneity of variance prior to running one-way stats.
Each array in ``*args`` is one level of a factor.
If `f_oneway` is run on the transformed data and found significant,
the variances are unequal. From Maxwell and Delaney [1]_, p.112.
Parameters
----------
args : tuple of array_like
Any number of arrays.
Returns
-------
obrientransform : ndarray
Transformed data for use in an ANOVA. The first dimension
of the result corresponds to the sequence of transformed
arrays. If the arrays given are all 1-D of the same length,
the return value is a 2-D array; otherwise it is a 1-D array
of type object, with each element being an ndarray.
References
----------
.. [1] S. E. Maxwell and H. D. Delaney, "Designing Experiments and
Analyzing Data: A Model Comparison Perspective", Wadsworth, 1990.
Examples
--------
We'll test the following data sets for differences in their variance.
>>> x = [10, 11, 13, 9, 7, 12, 12, 9, 10]
>>> y = [13, 21, 5, 10, 8, 14, 10, 12, 7, 15]
Apply the O'Brien transform to the data.
>>> from scipy.stats import obrientransform
>>> tx, ty = obrientransform(x, y)
Use `scipy.stats.f_oneway` to apply a one-way ANOVA test to the
transformed data.
>>> from scipy.stats import f_oneway
>>> F, p = f_oneway(tx, ty)
>>> p
0.1314139477040335
If we require that ``p < 0.05`` for significance, we cannot conclude
that the variances are different.
"""
TINY = np.sqrt(np.finfo(float).eps)
# `arrays` will hold the transformed arguments.
arrays = []
sLast = None
for arg in args:
a = np.asarray(arg)
n = len(a)
mu = np.mean(a)
sq = (a - mu)**2
sumsq = sq.sum()
# The O'Brien transform.
t = ((n - 1.5) * n * sq - 0.5 * sumsq) / ((n - 1) * (n - 2))
# Check that the mean of the transformed data is equal to the
# original variance.
var = sumsq / (n - 1)
if abs(var - np.mean(t)) > TINY:
raise ValueError('Lack of convergence in obrientransform.')
arrays.append(t)
sLast = a.shape
if sLast:
for arr in arrays[:-1]:
if sLast != arr.shape:
return np.array(arrays, dtype=object)
return np.array(arrays)
def sem(a, axis=0, ddof=1, nan_policy='propagate'):
"""Compute standard error of the mean.
Calculate the standard error of the mean (or standard error of
measurement) of the values in the input array.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` is different to the default (0) used by other
ddof containing routines, such as np.std and np.nanstd.
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.sem(a, axis, ddof)
n = a.shape[axis]
s = np.std(a, axis=axis, ddof=ddof) / np.sqrt(n)
return s
def _isconst(x):
"""
Check if all values in x are the same. nans are ignored.
x must be a 1d array.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([True])
else:
return (y[0] == y).all(keepdims=True)
def _quiet_nanmean(x):
"""
Compute nanmean for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.mean(y, keepdims=True)
def _quiet_nanstd(x, ddof=0):
"""
Compute nanstd for the 1d array x, but quietly return nan if x is all nan.
The return value is a 1d array with length 1, so it can be used
in np.apply_along_axis.
"""
y = x[~np.isnan(x)]
if y.size == 0:
return np.array([np.nan])
else:
return np.std(y, keepdims=True, ddof=ddof)
def zscore(a, axis=0, ddof=0, nan_policy='propagate'):
"""
Compute the z score.
Compute the z score of each value in the sample, relative to the
sample mean and standard deviation.
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan. 'propagate' returns nan,
'raise' throws an error, 'omit' performs the calculations ignoring nan
values. Default is 'propagate'. Note that when the value is 'omit',
nans in the input also propagate to the output, but they do not affect
the z-scores computed for the non-nan values.
Returns
-------
zscore : array_like
The z-scores, standardized by mean and standard deviation of
input array `a`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> a = np.array([ 0.7972, 0.0767, 0.4383, 0.7866, 0.8091,
... 0.1954, 0.6307, 0.6599, 0.1065, 0.0508])
>>> from scipy import stats
>>> stats.zscore(a)
array([ 1.1273, -1.247 , -0.0552, 1.0923, 1.1664, -0.8559, 0.5786,
0.6748, -1.1488, -1.3324])
Computing along a specified axis, using n-1 degrees of freedom
(``ddof=1``) to calculate the standard deviation:
>>> b = np.array([[ 0.3148, 0.0478, 0.6243, 0.4608],
... [ 0.7149, 0.0775, 0.6072, 0.9656],
... [ 0.6341, 0.1403, 0.9759, 0.4064],
... [ 0.5918, 0.6948, 0.904 , 0.3721],
... [ 0.0921, 0.2481, 0.1188, 0.1366]])
>>> stats.zscore(b, axis=1, ddof=1)
array([[-0.19264823, -1.28415119, 1.07259584, 0.40420358],
[ 0.33048416, -1.37380874, 0.04251374, 1.00081084],
[ 0.26796377, -1.12598418, 1.23283094, -0.37481053],
[-0.22095197, 0.24468594, 1.19042819, -1.21416216],
[-0.82780366, 1.4457416 , -0.43867764, -0.1792603 ]])
An example with `nan_policy='omit'`:
>>> x = np.array([[25.11, 30.10, np.nan, 32.02, 43.15],
... [14.95, 16.06, 121.25, 94.35, 29.81]])
>>> stats.zscore(x, axis=1, nan_policy='omit')
array([[-1.13490897, -0.37830299, nan, -0.08718406, 1.60039602],
[-0.91611681, -0.89090508, 1.4983032 , 0.88731639, -0.5785977 ]])
"""
return zmap(a, a, axis=axis, ddof=ddof, nan_policy=nan_policy)
def zmap(scores, compare, axis=0, ddof=0, nan_policy='propagate'):
"""
Calculate the relative z-scores.
Return an array of z-scores, i.e., scores that are standardized to
zero mean and unit variance, where mean and variance are calculated
from the comparison array.
Parameters
----------
scores : array_like
The input for which z-scores are calculated.
compare : array_like
The input from which the mean and standard deviation of the
normalization are taken; assumed to have the same dimension as
`scores`.
axis : int or None, optional
Axis over which mean and variance of `compare` are calculated.
Default is 0. If None, compute over the whole array `scores`.
ddof : int, optional
Degrees of freedom correction in the calculation of the
standard deviation. Default is 0.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle the occurrence of nans in `compare`.
'propagate' returns nan, 'raise' raises an exception, 'omit'
performs the calculations ignoring nan values. Default is
'propagate'. Note that when the value is 'omit', nans in `scores`
also propagate to the output, but they do not affect the z-scores
computed for the non-nan values.
Returns
-------
zscore : array_like
Z-scores, in the same shape as `scores`.
Notes
-----
This function preserves ndarray subclasses, and works also with
matrices and masked arrays (it uses `asanyarray` instead of
`asarray` for parameters).
Examples
--------
>>> from scipy.stats import zmap
>>> a = [0.5, 2.0, 2.5, 3]
>>> b = [0, 1, 2, 3, 4]
>>> zmap(a, b)
array([-1.06066017, 0. , 0.35355339, 0.70710678])
"""
a = np.asanyarray(compare)
if a.size == 0:
return np.empty(a.shape)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
if axis is None:
mn = _quiet_nanmean(a.ravel())
std = _quiet_nanstd(a.ravel(), ddof=ddof)
isconst = _isconst(a.ravel())
else:
mn = np.apply_along_axis(_quiet_nanmean, axis, a)
std = np.apply_along_axis(_quiet_nanstd, axis, a, ddof=ddof)
isconst = np.apply_along_axis(_isconst, axis, a)
else:
mn = a.mean(axis=axis, keepdims=True)
std = a.std(axis=axis, ddof=ddof, keepdims=True)
if axis is None:
isconst = (a.item(0) == a).all()
else:
isconst = (_first(a, axis) == a).all(axis=axis, keepdims=True)
# Set std deviations that are 0 to 1 to avoid division by 0.
std[isconst] = 1.0
z = (scores - mn) / std
# Set the outputs associated with a constant input to nan.
z[np.broadcast_to(isconst, z.shape)] = np.nan
return z
def gstd(a, axis=0, ddof=1):
"""
Calculate the geometric standard deviation of an array.
The geometric standard deviation describes the spread of a set of numbers
where the geometric mean is preferred. It is a multiplicative factor, and
so a dimensionless quantity.
It is defined as the exponent of the standard deviation of ``log(a)``.
Mathematically the population geometric standard deviation can be
evaluated as::
gstd = exp(std(log(a)))
.. versionadded:: 1.3.0
Parameters
----------
a : array_like
An array like object containing the sample data.
axis : int, tuple or None, optional
Axis along which to operate. Default is 0. If None, compute over
the whole array `a`.
ddof : int, optional
Degree of freedom correction in the calculation of the
geometric standard deviation. Default is 1.
Returns
-------
ndarray or float
An array of the geometric standard deviation. If `axis` is None or `a`
is a 1d array a float is returned.
Notes
-----
As the calculation requires the use of logarithms the geometric standard
deviation only supports strictly positive values. Any non-positive or
infinite values will raise a `ValueError`.
The geometric standard deviation is sometimes confused with the exponent of
the standard deviation, ``exp(std(a))``. Instead the geometric standard
deviation is ``exp(std(log(a)))``.
The default value for `ddof` is different to the default value (0) used
by other ddof containing functions, such as ``np.std`` and ``np.nanstd``.
Examples
--------
Find the geometric standard deviation of a log-normally distributed sample.
Note that the standard deviation of the distribution is one, on a
log scale this evaluates to approximately ``exp(1)``.
>>> from scipy.stats import gstd
>>> rng = np.random.default_rng()
>>> sample = rng.lognormal(mean=0, sigma=1, size=1000)
>>> gstd(sample)
2.810010162475324
Compute the geometric standard deviation of a multidimensional array and
of a given axis.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> gstd(a, axis=None)
2.2944076136018947
>>> gstd(a, axis=2)
array([[1.82424757, 1.22436866, 1.13183117],
[1.09348306, 1.07244798, 1.05914985]])
>>> gstd(a, axis=(1,2))
array([2.12939215, 1.22120169])
The geometric standard deviation further handles masked arrays.
>>> a = np.arange(1, 25).reshape(2, 3, 4)
>>> ma = np.ma.masked_where(a > 16, a)
>>> ma
masked_array(
data=[[[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12]],
[[13, 14, 15, 16],
[--, --, --, --],
[--, --, --, --]]],
mask=[[[False, False, False, False],
[False, False, False, False],
[False, False, False, False]],
[[False, False, False, False],
[ True, True, True, True],
[ True, True, True, True]]],
fill_value=999999)
>>> gstd(ma, axis=2)
masked_array(
data=[[1.8242475707663655, 1.2243686572447428, 1.1318311657788478],
[1.0934830582350938, --, --]],
mask=[[False, False, False],
[False, True, True]],
fill_value=999999)
"""
a = np.asanyarray(a)
log = ma.log if isinstance(a, ma.MaskedArray) else np.log
try:
with warnings.catch_warnings():
warnings.simplefilter("error", RuntimeWarning)
return np.exp(np.std(log(a), axis=axis, ddof=ddof))
except RuntimeWarning as w:
if np.isinf(a).any():
raise ValueError(
'Infinite value encountered. The geometric standard deviation '
'is defined for strictly positive values only.'
) from w
a_nan = np.isnan(a)
a_nan_any = a_nan.any()
# exclude NaN's from negativity check, but
# avoid expensive masking for arrays with no NaN
if ((a_nan_any and np.less_equal(np.nanmin(a), 0)) or
(not a_nan_any and np.less_equal(a, 0).any())):
raise ValueError(
'Non positive value encountered. The geometric standard '
'deviation is defined for strictly positive values only.'
) from w
elif 'Degrees of freedom <= 0 for slice' == str(w):
raise ValueError(w) from w
else:
# Remaining warnings don't need to be exceptions.
return np.exp(np.std(log(a, where=~a_nan), axis=axis, ddof=ddof))
except TypeError as e:
raise ValueError(
'Invalid array input. The inputs could not be '
'safely coerced to any supported types') from e
# Private dictionary initialized only once at module level
# See https://en.wikipedia.org/wiki/Robust_measures_of_scale
_scale_conversions = {'raw': 1.0,
'normal': special.erfinv(0.5) * 2.0 * math.sqrt(2.0)}
def iqr(x, axis=None, rng=(25, 75), scale=1.0, nan_policy='propagate',
interpolation='linear', keepdims=False):
r"""
Compute the interquartile range of the data along the specified axis.
The interquartile range (IQR) is the difference between the 75th and
25th percentile of the data. It is a measure of the dispersion
similar to standard deviation or variance, but is much more robust
against outliers [2]_.
The ``rng`` parameter allows this function to compute other
percentile ranges than the actual IQR. For example, setting
``rng=(0, 100)`` is equivalent to `numpy.ptp`.
The IQR of an empty array is `np.nan`.
.. versionadded:: 0.18.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or sequence of int, optional
Axis along which the range is computed. The default is to
compute the IQR for the entire array.
rng : Two-element sequence containing floats in range of [0,100] optional
Percentiles over which to compute the range. Each must be
between 0 and 100, inclusive. The default is the true IQR:
`(25, 75)`. The order of the elements is not important.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The following string values are recognized:
* 'raw' : No scaling, just return the raw IQR.
**Deprecated!** Use `scale=1` instead.
* 'normal' : Scale by
:math:`2 \sqrt{2} erf^{-1}(\frac{1}{2}) \approx 1.349`.
The default is 1.0. The use of scale='raw' is deprecated.
Array-like scale is also allowed, as long
as it broadcasts correctly to the output such that
``out / scale`` is a valid operation. The output dimensions
depend on the input array, `x`, the `axis` argument, and the
`keepdims` flag.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
interpolation : {'linear', 'lower', 'higher', 'midpoint',
'nearest'}, optional
Specifies the interpolation method to use when the percentile
boundaries lie between two data points `i` and `j`.
The following options are available (default is 'linear'):
* 'linear': `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* 'lower': `i`.
* 'higher': `j`.
* 'nearest': `i` or `j` whichever is nearest.
* 'midpoint': `(i + j) / 2`.
keepdims : bool, optional
If this is set to `True`, the reduced axes are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array `x`.
Returns
-------
iqr : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var
Notes
-----
This function is heavily dependent on the version of `numpy` that is
installed. Versions greater than 1.11.0b3 are highly recommended, as they
include a number of enhancements and fixes to `numpy.percentile` and
`numpy.nanpercentile` that affect the operation of this function. The
following modifications apply:
Below 1.10.0 : `nan_policy` is poorly defined.
The default behavior of `numpy.percentile` is used for 'propagate'. This
is a hybrid of 'omit' and 'propagate' that mostly yields a skewed
version of 'omit' since NaNs are sorted to the end of the data. A
warning is raised if there are NaNs in the data.
Below 1.9.0: `numpy.nanpercentile` does not exist.
This means that `numpy.percentile` is used regardless of `nan_policy`
and a warning is issued. See previous item for a description of the
behavior.
Below 1.9.0: `keepdims` and `interpolation` are not supported.
The keywords get ignored with a warning if supplied with non-default
values. However, multiple axes are still supported.
References
----------
.. [1] "Interquartile range" https://en.wikipedia.org/wiki/Interquartile_range
.. [2] "Robust measures of scale" https://en.wikipedia.org/wiki/Robust_measures_of_scale
.. [3] "Quantile" https://en.wikipedia.org/wiki/Quantile
Examples
--------
>>> from scipy.stats import iqr
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> iqr(x)
4.0
>>> iqr(x, axis=0)
array([ 3.5, 2.5, 1.5])
>>> iqr(x, axis=1)
array([ 3., 1.])
>>> iqr(x, axis=1, keepdims=True)
array([[ 3.],
[ 1.]])
"""
x = asarray(x)
# This check prevents percentile from raising an error later. Also, it is
# consistent with `np.var` and `np.std`.
if not x.size:
return np.nan
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
scale_key = scale.lower()
if scale_key not in _scale_conversions:
raise ValueError("{0} not a valid scale for `iqr`".format(scale))
if scale_key == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = _scale_conversions[scale_key]
# Select the percentile function to use based on nans and policy
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan and nan_policy == 'omit':
percentile_func = np.nanpercentile
else:
percentile_func = np.percentile
if len(rng) != 2:
raise TypeError("quantile range must be two element sequence")
if np.isnan(rng).any():
raise ValueError("range must not contain NaNs")
rng = sorted(rng)
pct = percentile_func(x, rng, axis=axis, interpolation=interpolation,
keepdims=keepdims)
out = np.subtract(pct[1], pct[0])
if scale != 1.0:
out /= scale
return out
def _mad_1d(x, center, nan_policy):
# Median absolute deviation for 1-d array x.
# This is a helper function for `median_abs_deviation`; it assumes its
# arguments have been validated already. In particular, x must be a
# 1-d numpy array, center must be callable, and if nan_policy is not
# 'propagate', it is assumed to be 'omit', because 'raise' is handled
# in `median_abs_deviation`.
# No warning is generated if x is empty or all nan.
isnan = np.isnan(x)
if isnan.any():
if nan_policy == 'propagate':
return np.nan
x = x[~isnan]
if x.size == 0:
# MAD of an empty array is nan.
return np.nan
# Edge cases have been handled, so do the basic MAD calculation.
med = center(x)
mad = np.median(np.abs(x - med))
return mad
def median_abs_deviation(x, axis=0, center=np.median, scale=1.0,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.5.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the
function signature ``func(arr, axis)``.
scale : scalar or str, optional
The numerical value of scale will be divided out of the final
result. The default is 1.0. The string "normal" is also accepted,
and results in `scale` being the inverse of the standard normal
quantile function at 0.75, which is approximately 0.67449.
Array-like scale is also allowed, as long as it broadcasts correctly
to the output such that ``out / scale`` is a valid operation. The
output dimensions depend on the input array, `x`, and the `axis`
argument.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
The input array may contain `inf`, but if `center` returns `inf`, the
corresponding MAD for that data will be `nan`.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_abs_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_abs_deviation(x)
0.82832610097857
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_abs_deviation(x)
0.8323442311590675
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_abs_deviation(x)
array([3.5, 2.5, 1.5])
>>> stats.median_abs_deviation(x, axis=None)
2.0
Scale normal example:
>>> x = stats.norm.rvs(size=1000000, scale=2, random_state=123456)
>>> stats.median_abs_deviation(x)
1.3487398527041636
>>> stats.median_abs_deviation(x, scale='normal')
1.9996446978061115
"""
if not callable(center):
raise TypeError("The argument 'center' must be callable. The given "
f"value {repr(center)} is not callable.")
# An error may be raised here, so fail-fast, before doing lengthy
# computations, even though `scale` is not used until later
if isinstance(scale, str):
if scale.lower() == 'normal':
scale = 0.6744897501960817 # special.ndtri(0.75)
else:
raise ValueError(f"{scale} is not a valid scale value.")
x = asarray(x)
# Consistent with `np.var` and `np.std`.
if not x.size:
if axis is None:
return np.nan
nan_shape = tuple(item for i, item in enumerate(x.shape) if i != axis)
if nan_shape == ():
# Return nan, not array(nan)
return np.nan
return np.full(nan_shape, np.nan)
contains_nan, nan_policy = _contains_nan(x, nan_policy)
if contains_nan:
if axis is None:
mad = _mad_1d(x.ravel(), center, nan_policy)
else:
mad = np.apply_along_axis(_mad_1d, axis, x, center, nan_policy)
else:
if axis is None:
med = center(x, axis=None)
mad = np.median(np.abs(x - med))
else:
# Wrap the call to center() in expand_dims() so it acts like
# keepdims=True was used.
med = np.expand_dims(center(x, axis=axis), axis)
mad = np.median(np.abs(x - med), axis=axis)
return mad / scale
# Keep the top newline so that the message does not show up on the stats page
_median_absolute_deviation_deprec_msg = """
To preserve the existing default behavior, use
`scipy.stats.median_abs_deviation(..., scale=1/1.4826)`.
The value 1.4826 is not numerically precise for scaling
with a normal distribution. For a numerically precise value, use
`scipy.stats.median_abs_deviation(..., scale='normal')`.
"""
# Due to numpy/gh-16349 we need to unindent the entire docstring
@np.deprecate(old_name='median_absolute_deviation',
new_name='median_abs_deviation',
message=_median_absolute_deviation_deprec_msg)
def median_absolute_deviation(x, axis=0, center=np.median, scale=1.4826,
nan_policy='propagate'):
r"""
Compute the median absolute deviation of the data along the given axis.
The median absolute deviation (MAD, [1]_) computes the median over the
absolute deviations from the median. It is a measure of dispersion
similar to the standard deviation but more robust to outliers [2]_.
The MAD of an empty array is ``np.nan``.
.. versionadded:: 1.3.0
Parameters
----------
x : array_like
Input array or object that can be converted to an array.
axis : int or None, optional
Axis along which the range is computed. Default is 0. If None, compute
the MAD over the entire array.
center : callable, optional
A function that will return the central value. The default is to use
np.median. Any user defined function used will need to have the function
signature ``func(arr, axis)``.
scale : int, optional
The scaling factor applied to the MAD. The default scale (1.4826)
ensures consistency with the standard deviation for normally distributed
data.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
mad : scalar or ndarray
If ``axis=None``, a scalar is returned. If the input contains
integers or floats of smaller precision than ``np.float64``, then the
output data-type is ``np.float64``. Otherwise, the output data-type is
the same as that of the input.
See Also
--------
numpy.std, numpy.var, numpy.median, scipy.stats.iqr, scipy.stats.tmean,
scipy.stats.tstd, scipy.stats.tvar
Notes
-----
The `center` argument only affects the calculation of the central value
around which the MAD is calculated. That is, passing in ``center=np.mean``
will calculate the MAD around the mean - it will not calculate the *mean*
absolute deviation.
References
----------
.. [1] "Median absolute deviation",
https://en.wikipedia.org/wiki/Median_absolute_deviation
.. [2] "Robust measures of scale",
https://en.wikipedia.org/wiki/Robust_measures_of_scale
Examples
--------
When comparing the behavior of `median_absolute_deviation` with ``np.std``,
the latter is affected when we change a single value of an array to have an
outlier value while the MAD hardly changes:
>>> from scipy import stats
>>> x = stats.norm.rvs(size=100, scale=1, random_state=123456)
>>> x.std()
0.9973906394005013
>>> stats.median_absolute_deviation(x)
1.2280762773108278
>>> x[0] = 345.6
>>> x.std()
34.42304872314415
>>> stats.median_absolute_deviation(x)
1.2340335571164334
Axis handling example:
>>> x = np.array([[10, 7, 4], [3, 2, 1]])
>>> x
array([[10, 7, 4],
[ 3, 2, 1]])
>>> stats.median_absolute_deviation(x)
array([5.1891, 3.7065, 2.2239])
>>> stats.median_absolute_deviation(x, axis=None)
2.9652
"""
if isinstance(scale, str):
if scale.lower() == 'raw':
warnings.warn(
"use of scale='raw' is deprecated, use scale=1.0 instead",
np.VisibleDeprecationWarning
)
scale = 1.0
if not isinstance(scale, str):
scale = 1 / scale
return median_abs_deviation(x, axis=axis, center=center, scale=scale,
nan_policy=nan_policy)
#####################################
# TRIMMING FUNCTIONS #
#####################################
SigmaclipResult = namedtuple('SigmaclipResult', ('clipped', 'lower', 'upper'))
def sigmaclip(a, low=4., high=4.):
"""Perform iterative sigma-clipping of array elements.
Starting from the full sample, all elements outside the critical range are
removed, i.e. all elements of the input array `c` that satisfy either of
the following conditions::
c < mean(c) - std(c)*low
c > mean(c) + std(c)*high
The iteration continues with the updated sample until no
elements are outside the (updated) range.
Parameters
----------
a : array_like
Data array, will be raveled if not 1-D.
low : float, optional
Lower bound factor of sigma clipping. Default is 4.
high : float, optional
Upper bound factor of sigma clipping. Default is 4.
Returns
-------
clipped : ndarray
Input array with clipped elements removed.
lower : float
Lower threshold value use for clipping.
upper : float
Upper threshold value use for clipping.
Examples
--------
>>> from scipy.stats import sigmaclip
>>> a = np.concatenate((np.linspace(9.5, 10.5, 31),
... np.linspace(0, 20, 5)))
>>> fact = 1.5
>>> c, low, upp = sigmaclip(a, fact, fact)
>>> c
array([ 9.96666667, 10. , 10.03333333, 10. ])
>>> c.var(), c.std()
(0.00055555555555555165, 0.023570226039551501)
>>> low, c.mean() - fact*c.std(), c.min()
(9.9646446609406727, 9.9646446609406727, 9.9666666666666668)
>>> upp, c.mean() + fact*c.std(), c.max()
(10.035355339059327, 10.035355339059327, 10.033333333333333)
>>> a = np.concatenate((np.linspace(9.5, 10.5, 11),
... np.linspace(-100, -50, 3)))
>>> c, low, upp = sigmaclip(a, 1.8, 1.8)
>>> (c == np.linspace(9.5, 10.5, 11)).all()
True
"""
c = np.asarray(a).ravel()
delta = 1
while delta:
c_std = c.std()
c_mean = c.mean()
size = c.size
critlower = c_mean - c_std * low
critupper = c_mean + c_std * high
c = c[(c >= critlower) & (c <= critupper)]
delta = size - c.size
return SigmaclipResult(c, critlower, critupper)
def trimboth(a, proportiontocut, axis=0):
"""Slice off a proportion of items from both ends of an array.
Slice off the passed proportion of items from both ends of the passed
array (i.e., with `proportiontocut` = 0.1, slices leftmost 10% **and**
rightmost 10% of scores). The trimmed values are the lowest and
highest ones.
Slice off less if proportion results in a non-integer slice index (i.e.
conservatively slices off `proportiontocut`).
Parameters
----------
a : array_like
Data to trim.
proportiontocut : float
Proportion (in range 0-1) of total data set to trim of each end.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
out : ndarray
Trimmed version of array `a`. The order of the trimmed content
is undefined.
See Also
--------
trim_mean
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trimboth(a, 0.1)
>>> b.shape
(16,)
"""
a = np.asarray(a)
if a.size == 0:
return a
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut >= uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return atmp[tuple(sl)]
def trim1(a, proportiontocut, tail='right', axis=0):
"""Slice off a proportion from ONE end of the passed array distribution.
If `proportiontocut` = 0.1, slices off 'leftmost' or 'rightmost'
10% of scores. The lowest or highest values are trimmed (depending on
the tail).
Slice off less if proportion results in a non-integer slice index
(i.e. conservatively slices off `proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of 'left' or 'right' of distribution.
tail : {'left', 'right'}, optional
Defaults to 'right'.
axis : int or None, optional
Axis along which to trim data. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
trim1 : ndarray
Trimmed version of array `a`. The order of the trimmed content is
undefined.
Examples
--------
>>> from scipy import stats
>>> a = np.arange(20)
>>> b = stats.trim1(a, 0.5, 'left')
>>> b
array([10, 11, 12, 13, 14, 16, 15, 17, 18, 19])
"""
a = np.asarray(a)
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
# avoid possible corner case
if proportiontocut >= 1:
return []
if tail.lower() == 'right':
lowercut = 0
uppercut = nobs - int(proportiontocut * nobs)
elif tail.lower() == 'left':
lowercut = int(proportiontocut * nobs)
uppercut = nobs
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
return atmp[lowercut:uppercut]
def trim_mean(a, proportiontocut, axis=0):
"""Return mean of array after trimming distribution from both tails.
If `proportiontocut` = 0.1, slices off 'leftmost' and 'rightmost' 10% of
scores. The input is sorted before slicing. Slices off less if proportion
results in a non-integer slice index (i.e., conservatively slices off
`proportiontocut` ).
Parameters
----------
a : array_like
Input array.
proportiontocut : float
Fraction to cut off of both tails of the distribution.
axis : int or None, optional
Axis along which the trimmed means are computed. Default is 0.
If None, compute over the whole array `a`.
Returns
-------
trim_mean : ndarray
Mean of trimmed array.
See Also
--------
trimboth
tmean : Compute the trimmed mean ignoring values outside given `limits`.
Examples
--------
>>> from scipy import stats
>>> x = np.arange(20)
>>> stats.trim_mean(x, 0.1)
9.5
>>> x2 = x.reshape(5, 4)
>>> x2
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15],
[16, 17, 18, 19]])
>>> stats.trim_mean(x2, 0.25)
array([ 8., 9., 10., 11.])
>>> stats.trim_mean(x2, 0.25, axis=1)
array([ 1.5, 5.5, 9.5, 13.5, 17.5])
"""
a = np.asarray(a)
if a.size == 0:
return np.nan
if axis is None:
a = a.ravel()
axis = 0
nobs = a.shape[axis]
lowercut = int(proportiontocut * nobs)
uppercut = nobs - lowercut
if (lowercut > uppercut):
raise ValueError("Proportion too big.")
atmp = np.partition(a, (lowercut, uppercut - 1), axis)
sl = [slice(None)] * atmp.ndim
sl[axis] = slice(lowercut, uppercut)
return np.mean(atmp[tuple(sl)], axis=axis)
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
class F_onewayConstantInputWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input is constant, e.g.
each of the samples provided is a constant array.
"""
def __init__(self, msg=None):
if msg is None:
msg = ("Each of the input arrays is constant;"
"the F statistic is not defined or infinite")
self.args = (msg,)
class F_onewayBadInputSizesWarning(RuntimeWarning):
"""
Warning generated by `f_oneway` when an input has length 0,
or if all the inputs have length 1.
"""
pass
def _create_f_oneway_nan_result(shape, axis):
"""
This is a helper function for f_oneway for creating the return values
in certain degenerate conditions. It creates return values that are
all nan with the appropriate shape for the given `shape` and `axis`.
"""
axis = np.core.multiarray.normalize_axis_index(axis, len(shape))
shp = shape[:axis] + shape[axis+1:]
if shp == ():
f = np.nan
prob = np.nan
else:
f = np.full(shp, fill_value=np.nan)
prob = f.copy()
return F_onewayResult(f, prob)
def _first(arr, axis):
"""Return arr[..., 0:1, ...] where 0:1 is in the `axis` position."""
return np.take_along_axis(arr, np.array(0, ndmin=arr.ndim), axis)
def f_oneway(*args, axis=0):
"""Perform one-way ANOVA.
The one-way ANOVA tests the null hypothesis that two or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two arguments. If the arrays are multidimensional, then all the
dimensions of the array must be the same except for `axis`.
axis : int, optional
Axis of the input arrays along which the test is applied.
Default is 0.
Returns
-------
statistic : float
The computed F statistic of the test.
pvalue : float
The associated p-value from the F distribution.
Warns
-----
F_onewayConstantInputWarning
Raised if each of the input arrays is constant array.
In this case the F statistic is either infinite or isn't defined,
so ``np.inf`` or ``np.nan`` is returned.
F_onewayBadInputSizesWarning
Raised if the length of any input array is 0, or if all the input
arrays have length 1. ``np.nan`` is returned for the F statistic
and the p-value in these cases.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still
be possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`) or
the Alexander-Govern test (`scipy.stats.alexandergovern`) although with
some loss of power.
The length of each group must be at least one, and there must be at
least one group with length greater than one. If these conditions
are not satisfied, a warning is generated and (``np.nan``, ``np.nan``)
is returned.
If each group contains constant values, and there exist at least two
groups with different values, the function generates a warning and
returns (``np.inf``, 0).
If all values in all groups are the same, function generates a warning
and returns (``np.nan``, ``np.nan``).
The algorithm is from Heiman [2]_, pp.394-7.
References
----------
.. [1] R. Lowry, "Concepts and Applications of Inferential Statistics",
Chapter 14, 2014, http://vassarstats.net/textbook/
.. [2] G.W. Heiman, "Understanding research methods and statistics: An
integrated introduction for psychology", Houghton, Mifflin and
Company, 2001.
.. [3] G.H. McDonald, "Handbook of Biological Statistics", One-way ANOVA.
http://www.biostathandbook.com/onewayanova.html
Examples
--------
>>> from scipy.stats import f_oneway
Here are some data [3]_ on a shell measurement (the length of the anterior
adductor muscle scar, standardized by dividing by length) in the mussel
Mytilus trossulus from five locations: Tillamook, Oregon; Newport, Oregon;
Petersburg, Alaska; Magadan, Russia; and Tvarminne, Finland, taken from a
much larger data set used in McDonald et al. (1991).
>>> tillamook = [0.0571, 0.0813, 0.0831, 0.0976, 0.0817, 0.0859, 0.0735,
... 0.0659, 0.0923, 0.0836]
>>> newport = [0.0873, 0.0662, 0.0672, 0.0819, 0.0749, 0.0649, 0.0835,
... 0.0725]
>>> petersburg = [0.0974, 0.1352, 0.0817, 0.1016, 0.0968, 0.1064, 0.105]
>>> magadan = [0.1033, 0.0915, 0.0781, 0.0685, 0.0677, 0.0697, 0.0764,
... 0.0689]
>>> tvarminne = [0.0703, 0.1026, 0.0956, 0.0973, 0.1039, 0.1045]
>>> f_oneway(tillamook, newport, petersburg, magadan, tvarminne)
F_onewayResult(statistic=7.121019471642447, pvalue=0.0002812242314534544)
`f_oneway` accepts multidimensional input arrays. When the inputs
are multidimensional and `axis` is not given, the test is performed
along the first axis of the input arrays. For the following data, the
test is performed three times, once for each column.
>>> a = np.array([[9.87, 9.03, 6.81],
... [7.18, 8.35, 7.00],
... [8.39, 7.58, 7.68],
... [7.45, 6.33, 9.35],
... [6.41, 7.10, 9.33],
... [8.00, 8.24, 8.44]])
>>> b = np.array([[6.35, 7.30, 7.16],
... [6.65, 6.68, 7.63],
... [5.72, 7.73, 6.72],
... [7.01, 9.19, 7.41],
... [7.75, 7.87, 8.30],
... [6.90, 7.97, 6.97]])
>>> c = np.array([[3.31, 8.77, 1.01],
... [8.25, 3.24, 3.62],
... [6.32, 8.81, 5.19],
... [7.48, 8.83, 8.91],
... [8.59, 6.01, 6.07],
... [3.07, 9.72, 7.48]])
>>> F, p = f_oneway(a, b, c)
>>> F
array([1.75676344, 0.03701228, 3.76439349])
>>> p
array([0.20630784, 0.96375203, 0.04733157])
"""
if len(args) < 2:
raise TypeError(f'at least two inputs are required; got {len(args)}.')
args = [np.asarray(arg, dtype=float) for arg in args]
# ANOVA on N groups, each in its own array
num_groups = len(args)
# We haven't explicitly validated axis, but if it is bad, this call of
# np.concatenate will raise np.AxisError. The call will raise ValueError
# if the dimensions of all the arrays, except the axis dimension, are not
# the same.
alldata = np.concatenate(args, axis=axis)
bign = alldata.shape[axis]
# Check this after forming alldata, so shape errors are detected
# and reported before checking for 0 length inputs.
if any(arg.shape[axis] == 0 for arg in args):
warnings.warn(F_onewayBadInputSizesWarning('at least one input '
'has length 0'))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Must have at least one group with length greater than 1.
if all(arg.shape[axis] == 1 for arg in args):
msg = ('all input arrays have length 1. f_oneway requires that at '
'least one input has length greater than 1.')
warnings.warn(F_onewayBadInputSizesWarning(msg))
return _create_f_oneway_nan_result(alldata.shape, axis)
# Check if the values within each group are constant, and if the common
# value in at least one group is different from that in another group.
# Based on https://github.com/scipy/scipy/issues/11669
# If axis=0, say, and the groups have shape (n0, ...), (n1, ...), ...,
# then is_const is a boolean array with shape (num_groups, ...).
# It is True if the groups along the axis slice are each consant.
# In the typical case where each input array is 1-d, is_const is a
# 1-d array with length num_groups.
is_const = np.concatenate([(_first(a, axis) == a).all(axis=axis,
keepdims=True)
for a in args], axis=axis)
# all_const is a boolean array with shape (...) (see previous comment).
# It is True if the values within each group along the axis slice are
# the same (e.g. [[3, 3, 3], [5, 5, 5, 5], [4, 4, 4]]).
all_const = is_const.all(axis=axis)
if all_const.any():
warnings.warn(F_onewayConstantInputWarning())
# all_same_const is True if all the values in the groups along the axis=0
# slice are the same (e.g. [[3, 3, 3], [3, 3, 3, 3], [3, 3, 3]]).
all_same_const = (_first(alldata, axis) == alldata).all(axis=axis)
# Determine the mean of the data, and subtract that from all inputs to a
# variance (via sum_of_sq / sq_of_sum) calculation. Variance is invariant
# to a shift in location, and centering all data around zero vastly
# improves numerical stability.
offset = alldata.mean(axis=axis, keepdims=True)
alldata -= offset
normalized_ss = _square_of_sums(alldata, axis=axis) / bign
sstot = _sum_of_squares(alldata, axis=axis) - normalized_ss
ssbn = 0
for a in args:
ssbn += _square_of_sums(a - offset, axis=axis) / a.shape[axis]
# Naming: variables ending in bn/b are for "between treatments", wn/w are
# for "within treatments"
ssbn -= normalized_ss
sswn = sstot - ssbn
dfbn = num_groups - 1
dfwn = bign - num_groups
msb = ssbn / dfbn
msw = sswn / dfwn
with np.errstate(divide='ignore', invalid='ignore'):
f = msb / msw
prob = special.fdtrc(dfbn, dfwn, f) # equivalent to stats.f.sf
# Fix any f values that should be inf or nan because the corresponding
# inputs were constant.
if np.isscalar(f):
if all_same_const:
f = np.nan
prob = np.nan
elif all_const:
f = np.inf
prob = 0.0
else:
f[all_const] = np.inf
prob[all_const] = 0.0
f[all_same_const] = np.nan
prob[all_same_const] = np.nan
return F_onewayResult(f, prob)
def alexandergovern(*args, nan_policy='propagate'):
"""Performs the Alexander Govern test.
The Alexander-Govern approximation tests the equality of k independent
means in the face of heterogeneity of variance. The test is applied to
samples from two or more groups, possibly with differing sizes.
Parameters
----------
sample1, sample2, ... : array_like
The sample measurements for each group. There must be at least
two samples.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The computed A statistic of the test.
pvalue : float
The associated p-value from the chi-squared distribution.
Warns
-----
AlexanderGovernConstantInputWarning
Raised if an input is a constant array. The statistic is not defined
in this case, so ``np.nan`` is returned.
See Also
--------
f_oneway : one-way ANOVA
Notes
-----
The use of this test relies on several assumptions.
1. The samples are independent.
2. Each sample is from a normally distributed population.
3. Unlike `f_oneway`, this test does not assume on homoscedasticity,
instead relaxing the assumption of equal variances.
Input samples must be finite, one dimensional, and with size greater than
one.
References
----------
.. [1] Alexander, Ralph A., and Diane M. Govern. "A New and Simpler
Approximation for ANOVA under Variance Heterogeneity." Journal
of Educational Statistics, vol. 19, no. 2, 1994, pp. 91-101.
JSTOR, www.jstor.org/stable/1165140. Accessed 12 Sept. 2020.
Examples
--------
>>> from scipy.stats import alexandergovern
Here are some data on annual percentage rate of interest charged on
new car loans at nine of the largest banks in four American cities
taken from the National Institute of Standards and Technology's
ANOVA dataset.
We use `alexandergovern` to test the null hypothesis that all cities
have the same mean APR against the alternative that the cities do not
all have the same mean APR. We decide that a sigificance level of 5%
is required to reject the null hypothesis in favor of the alternative.
>>> atlanta = [13.75, 13.75, 13.5, 13.5, 13.0, 13.0, 13.0, 12.75, 12.5]
>>> chicago = [14.25, 13.0, 12.75, 12.5, 12.5, 12.4, 12.3, 11.9, 11.9]
>>> houston = [14.0, 14.0, 13.51, 13.5, 13.5, 13.25, 13.0, 12.5, 12.5]
>>> memphis = [15.0, 14.0, 13.75, 13.59, 13.25, 12.97, 12.5, 12.25,
... 11.89]
>>> alexandergovern(atlanta, chicago, houston, memphis)
AlexanderGovernResult(statistic=4.65087071883494,
pvalue=0.19922132490385214)
The p-value is 0.1992, indicating a nearly 20% chance of observing
such an extreme value of the test statistic under the null hypothesis.
This exceeds 5%, so we do not reject the null hypothesis in favor of
the alternative.
"""
args = _alexandergovern_input_validation(args, nan_policy)
if np.any([(arg == arg[0]).all() for arg in args]):
warnings.warn(AlexanderGovernConstantInputWarning())
return AlexanderGovernResult(np.nan, np.nan)
# The following formula numbers reference the equation described on
# page 92 by Alexander, Govern. Formulas 5, 6, and 7 describe other
# tests that serve as the basis for equation (8) but are not needed
# to perform the test.
# precalculate mean and length of each sample
lengths = np.array([ma.count(arg) if nan_policy == 'omit' else len(arg)
for arg in args])
means = np.array([np.mean(arg) for arg in args])
# (1) determine standard error of the mean for each sample
standard_errors = [np.std(arg, ddof=1) / np.sqrt(length)
for arg, length in zip(args, lengths)]
# (2) define a weight for each sample
inv_sq_se = 1 / np.square(standard_errors)
weights = inv_sq_se / np.sum(inv_sq_se)
# (3) determine variance-weighted estimate of the common mean
var_w = np.sum(weights * means)
# (4) determine one-sample t statistic for each group
t_stats = (means - var_w)/standard_errors
# calculate parameters to be used in transformation
v = lengths - 1
a = v - .5
b = 48 * a**2
c = (a * np.log(1 + (t_stats ** 2)/v))**.5
# (8) perform a normalizing transformation on t statistic
z = (c + ((c**3 + 3*c)/b) -
((4*c**7 + 33*c**5 + 240*c**3 + 855*c) /
(b**2*10 + 8*b*c**4 + 1000*b)))
# (9) calculate statistic
A = np.sum(np.square(z))
# "[the p value is determined from] central chi-square random deviates
# with k - 1 degrees of freedom". Alexander, Govern (94)
p = distributions.chi2.sf(A, len(args) - 1)
return AlexanderGovernResult(A, p)
def _alexandergovern_input_validation(args, nan_policy):
if len(args) < 2:
raise TypeError(f"2 or more inputs required, got {len(args)}")
# input arrays are flattened
args = [np.asarray(arg, dtype=float) for arg in args]
for i, arg in enumerate(args):
if np.size(arg) <= 1:
raise ValueError("Input sample size must be greater than one.")
if arg.ndim != 1:
raise ValueError("Input samples must be one-dimensional")
if np.isinf(arg).any():
raise ValueError("Input samples must be finite.")
contains_nan, nan_policy = _contains_nan(arg, nan_policy=nan_policy)
if contains_nan and nan_policy == 'omit':
args[i] = ma.masked_invalid(arg)
return args
AlexanderGovernResult = make_dataclass("AlexanderGovernResult", ("statistic",
"pvalue"))
class AlexanderGovernConstantInputWarning(RuntimeWarning):
"""Warning generated by `alexandergovern` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the statistic is not defined.")
self.args = (msg,)
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is nearly constant; the computed "
"correlation coefficient may be inaccurate.")
self.args = (msg,)
def pearsonr(x, y):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``np.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector x and :math:`m_y` is
the mean of the vector y.
Under the assumption that x and y are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient r is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. The p-value
roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. More precisely, for a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
There is a linear dependence between x and y if y = a + b*x + e, where
a,b are constants and e is a random error term, assumed to be independent
of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
e follow a normal distribution with mean zero and standard deviation s>0.
>>> s = 0.5
>>> x = stats.norm.rvs(size=500)
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> stats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/np.sqrt(1 + s**2)
0.8944271909999159
For s=0.5, we observe a high level of correlation. In general, a large
variance of the noise reduces the correlation, while the correlation
approaches one as the variance of the error goes to zero.
It is important to keep in mind that no correlation does not imply
independence unless (x, y) is jointly normal. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let y = abs(x). Note that the correlation
between x and y is zero. Indeed, since the expectation of x is zero,
cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
by symmetry. The following lines of code illustrate this observation:
>>> y = np.abs(x)
>>> stats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
A non-zero correlation coefficient can be misleading. For example, if X has
a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
implying a high level of correlation:
>>> y = np.where(x < 0, x, 0)
>>> stats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of x and y if x is larger
than zero which happens in about half of the cases if we sample x and y.
"""
n = len(x)
if n != len(y):
raise ValueError('x and y must have the same length.')
if n < 2:
raise ValueError('x and y must have length at least 2.')
x = np.asarray(x)
y = np.asarray(y)
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return np.nan, np.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, np.longdouble.
dtype = type(1.0 + x[0] + y[0])
if n == 2:
return dtype(np.sign(x[1] - x[0])*np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# Unlike np.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = linalg.norm(xm)
normym = linalg.norm(ym)
threshold = 1e-13
if normxm < threshold*abs(xmean) or normym < threshold*abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction xm = x - xmean
# might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = np.dot(xm/normxm, ym/normym)
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n/2 - 1
prob = 2*special.btdtr(ab, ab, 0.5*(1 - abs(np.float64(r))))
return r, prob
def fisher_exact(table, alternative='two-sided'):
"""Perform a Fisher exact test on a 2x2 contingency table.
Parameters
----------
table : array_like of ints
A 2x2 contingency table. Elements must be non-negative integers.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
See the Notes for more details.
Returns
-------
oddsratio : float
This is prior odds ratio and not a posterior estimate.
p_value : float
P-value, the probability of obtaining a distribution at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
See Also
--------
chi2_contingency : Chi-square test of independence of variables in a
contingency table. This can be used as an alternative to
`fisher_exact` when the numbers in the table are large.
barnard_exact : Barnard's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
boschloo_exact : Boschloo's exact test, which is a more powerful alternative
than Fisher's exact test for 2x2 contingency tables.
Notes
-----
*Null hypothesis and p-values*
The null hypothesis is that the input table is from the hypergeometric
distribution with parameters (as used in `hypergeom`)
``M = a + b + c + d``, ``n = a + b`` and ``N = a + c``, where the
input table is ``[[a, b], [c, d]]``. This distribution has support
``max(0, N + n - M) <= x <= min(N, n)``, or, in terms of the values
in the input table, ``min(0, a - d) <= x <= a + min(b, c)``. ``x``
can be interpreted as the upper-left element of a 2x2 table, so the
tables in the distribution have form::
[ x n - x ]
[N - x M - (n + N) + x]
For example, if::
table = [6 2]
[1 4]
then the support is ``2 <= x <= 7``, and the tables in the distribution
are::
[2 6] [3 5] [4 4] [5 3] [6 2] [7 1]
[5 0] [4 1] [3 2] [2 3] [1 4] [0 5]
The probability of each table is given by the hypergeometric distribution
``hypergeom.pmf(x, M, n, N)``. For this example, these are (rounded to
three significant digits)::
x 2 3 4 5 6 7
p 0.0163 0.163 0.408 0.326 0.0816 0.00466
These can be computed with::
>>> from scipy.stats import hypergeom
>>> table = np.array([[6, 2], [1, 4]])
>>> M = table.sum()
>>> n = table[0].sum()
>>> N = table[:, 0].sum()
>>> start, end = hypergeom.support(M, n, N)
>>> hypergeom.pmf(np.arange(start, end+1), M, n, N)
array([0.01631702, 0.16317016, 0.40792541, 0.32634033, 0.08158508,
0.004662 ])
The two-sided p-value is the probability that, under the null hypothesis,
a random table would have a probability equal to or less than the
probability of the input table. For our example, the probability of
the input table (where ``x = 6``) is 0.0816. The x values where the
probability does not exceed this are 2, 6 and 7, so the two-sided p-value
is ``0.0163 + 0.0816 + 0.00466 ~= 0.10256``::
>>> from scipy.stats import fisher_exact
>>> oddsr, p = fisher_exact(table, alternative='two-sided')
>>> p
0.10256410256410257
The one-sided p-value for ``alternative='greater'`` is the probability
that a random table has ``x >= a``, which in our example is ``x >= 6``,
or ``0.0816 + 0.00466 ~= 0.08626``::
>>> oddsr, p = fisher_exact(table, alternative='greater')
>>> p
0.08624708624708627
This is equivalent to computing the survival function of the
distribution at ``x = 5`` (one less than ``x`` from the input table,
because we want to include the probability of ``x = 6`` in the sum)::
>>> hypergeom.sf(5, M, n, N)
0.08624708624708627
For ``alternative='less'``, the one-sided p-value is the probability
that a random table has ``x <= a``, (i.e. ``x <= 6`` in our example),
or ``0.0163 + 0.163 + 0.408 + 0.326 + 0.0816 ~= 0.9949``::
>>> oddsr, p = fisher_exact(table, alternative='less')
>>> p
0.9953379953379957
This is equivalent to computing the cumulative distribution function
of the distribution at ``x = 6``:
>>> hypergeom.cdf(6, M, n, N)
0.9953379953379957
*Odds ratio*
The calculated odds ratio is different from the one R uses. This SciPy
implementation returns the (more common) "unconditional Maximum
Likelihood Estimate", while R uses the "conditional Maximum Likelihood
Estimate".
Examples
--------
Say we spend a few days counting whales and sharks in the Atlantic and
Indian oceans. In the Atlantic ocean we find 8 whales and 1 shark, in the
Indian ocean 2 whales and 5 sharks. Then our contingency table is::
Atlantic Indian
whales 8 2
sharks 1 5
We use this table to find the p-value:
>>> from scipy.stats import fisher_exact
>>> oddsratio, pvalue = fisher_exact([[8, 2], [1, 5]])
>>> pvalue
0.0349...
The probability that we would observe this or an even more imbalanced ratio
by chance is about 3.5%. A commonly used significance level is 5%--if we
adopt that, we can therefore conclude that our observed imbalance is
statistically significant; whales prefer the Atlantic while sharks prefer
the Indian ocean.
"""
hypergeom = distributions.hypergeom
# int32 is not enough for the algorithm
c = np.asarray(table, dtype=np.int64)
if not c.shape == (2, 2):
raise ValueError("The input `table` must be of shape (2, 2).")
if np.any(c < 0):
raise ValueError("All values in `table` must be nonnegative.")
if 0 in c.sum(axis=0) or 0 in c.sum(axis=1):
# If both values in a row or column are zero, the p-value is 1 and
# the odds ratio is NaN.
return np.nan, 1.0
if c[1, 0] > 0 and c[0, 1] > 0:
oddsratio = c[0, 0] * c[1, 1] / (c[1, 0] * c[0, 1])
else:
oddsratio = np.inf
n1 = c[0, 0] + c[0, 1]
n2 = c[1, 0] + c[1, 1]
n = c[0, 0] + c[1, 0]
def binary_search(n, n1, n2, side):
"""Binary search for where to begin halves in two-sided test."""
if side == "upper":
minval = mode
maxval = n
else:
minval = 0
maxval = mode
guess = -1
while maxval - minval > 1:
if maxval == minval + 1 and guess == minval:
guess = maxval
else:
guess = (maxval + minval) // 2
pguess = hypergeom.pmf(guess, n1 + n2, n1, n)
if side == "upper":
ng = guess - 1
else:
ng = guess + 1
if pguess <= pexact < hypergeom.pmf(ng, n1 + n2, n1, n):
break
elif pguess < pexact:
maxval = guess
else:
minval = guess
if guess == -1:
guess = minval
if side == "upper":
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess -= 1
while hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess += 1
else:
while hypergeom.pmf(guess, n1 + n2, n1, n) < pexact * epsilon:
guess += 1
while guess > 0 and \
hypergeom.pmf(guess, n1 + n2, n1, n) > pexact / epsilon:
guess -= 1
return guess
if alternative == 'less':
pvalue = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
elif alternative == 'greater':
# Same formula as the 'less' case, but with the second column.
pvalue = hypergeom.cdf(c[0, 1], n1 + n2, n1, c[0, 1] + c[1, 1])
elif alternative == 'two-sided':
mode = int((n + 1) * (n1 + 1) / (n1 + n2 + 2))
pexact = hypergeom.pmf(c[0, 0], n1 + n2, n1, n)
pmode = hypergeom.pmf(mode, n1 + n2, n1, n)
epsilon = 1 - 1e-4
if np.abs(pexact - pmode) / np.maximum(pexact, pmode) <= 1 - epsilon:
return oddsratio, 1.
elif c[0, 0] < mode:
plower = hypergeom.cdf(c[0, 0], n1 + n2, n1, n)
if hypergeom.pmf(n, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, plower
guess = binary_search(n, n1, n2, "upper")
pvalue = plower + hypergeom.sf(guess - 1, n1 + n2, n1, n)
else:
pupper = hypergeom.sf(c[0, 0] - 1, n1 + n2, n1, n)
if hypergeom.pmf(0, n1 + n2, n1, n) > pexact / epsilon:
return oddsratio, pupper
guess = binary_search(n, n1, n2, "lower")
pvalue = pupper + hypergeom.cdf(guess, n1 + n2, n1, n)
else:
msg = "`alternative` should be one of {'two-sided', 'less', 'greater'}"
raise ValueError(msg)
pvalue = min(pvalue, 1.0)
return oddsratio, pvalue
class SpearmanRConstantInputWarning(RuntimeWarning):
"""Warning generated by `spearmanr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = ("An input array is constant; the correlation coefficient "
"is not defined.")
self.args = (msg,)
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
def spearmanr(a, b=None, axis=0, nan_policy='propagate',
alternative='two-sided'):
"""Calculate a Spearman correlation coefficient with associated p-value.
The Spearman rank-order correlation coefficient is a nonparametric measure
of the monotonicity of the relationship between two datasets. Unlike the
Pearson correlation, the Spearman correlation does not assume that both
datasets are normally distributed. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Correlations of -1 or +1 imply an exact monotonic relationship. Positive
correlations imply that as x increases, so does y. Negative correlations
imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
a, b : 1D or 2D array_like, b is optional
One or two 1-D or 2-D arrays containing multiple variables and
observations. When these are 1-D, each represents a vector of
observations of a single variable. For the behavior in the 2-D case,
see under ``axis``, below.
Both arrays need to have the same length in the ``axis`` dimension.
axis : int or None, optional
If axis=0 (default), then each column represents a variable, with
observations in the rows. If axis=1, the relationship is transposed:
each row represents a variable, while the columns contain observations.
If axis=None, then both arrays will be raveled.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': the correlation is nonzero
* 'less': the correlation is negative (less than zero)
* 'greater': the correlation is positive (greater than zero)
.. versionadded:: 1.7.0
Returns
-------
correlation : float or ndarray (2-D square)
Spearman correlation matrix or correlation coefficient (if only 2
variables are given as parameters. Correlation matrix is square with
length equal to total number of variables (columns or rows) in ``a``
and ``b`` combined.
pvalue : float
The p-value for a hypothesis test whose null hypotheisis
is that two sets of data are uncorrelated. See `alternative` above
for alternative hypotheses. `pvalue` has the same
shape as `correlation`.
References
----------
.. [1] Zwillinger, D. and Kokoska, S. (2000). CRC Standard
Probability and Statistics Tables and Formulae. Chapman & Hall: New
York. 2000.
Section 14.7
Examples
--------
>>> from scipy import stats
>>> stats.spearmanr([1,2,3,4,5], [5,6,7,8,7])
SpearmanrResult(correlation=0.82078..., pvalue=0.08858...)
>>> rng = np.random.default_rng()
>>> x2n = rng.standard_normal((100, 2))
>>> y2n = rng.standard_normal((100, 2))
>>> stats.spearmanr(x2n)
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> stats.spearmanr(x2n[:,0], x2n[:,1])
SpearmanrResult(correlation=-0.07960396039603959, pvalue=0.4311168705769747)
>>> rho, pval = stats.spearmanr(x2n, y2n)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> pval
array([[0. , 0.43111687, 0.41084066, 0.33891628],
[0.43111687, 0. , 0.15151618, 0.09600687],
[0.41084066, 0.15151618, 0. , 0.74938561],
[0.33891628, 0.09600687, 0.74938561, 0. ]])
>>> rho, pval = stats.spearmanr(x2n.T, y2n.T, axis=1)
>>> rho
array([[ 1. , -0.07960396, -0.08314431, 0.09662166],
[-0.07960396, 1. , -0.14448245, 0.16738074],
[-0.08314431, -0.14448245, 1. , 0.03234323],
[ 0.09662166, 0.16738074, 0.03234323, 1. ]])
>>> stats.spearmanr(x2n, y2n, axis=None)
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> stats.spearmanr(x2n.ravel(), y2n.ravel())
SpearmanrResult(correlation=0.044981624540613524, pvalue=0.5270803651336189)
>>> rng = np.random.default_rng()
>>> xint = rng.integers(10, size=(100, 2))
>>> stats.spearmanr(xint)
SpearmanrResult(correlation=0.09800224850707953, pvalue=0.3320271757932076)
"""
if axis is not None and axis > 1:
raise ValueError("spearmanr only handles 1-D or 2-D arrays, "
"supplied axis argument {}, please use only "
"values 0, 1 or None for axis".format(axis))
a, axisout = _chk_asarray(a, axis)
if a.ndim > 2:
raise ValueError("spearmanr only handles 1-D or 2-D arrays")
if b is None:
if a.ndim < 2:
raise ValueError("`spearmanr` needs at least 2 "
"variables to compare")
else:
# Concatenate a and b, so that we now only have to handle the case
# of a 2-D `a`.
b, _ = _chk_asarray(b, axis)
if axisout == 0:
a = np.column_stack((a, b))
else:
a = np.row_stack((a, b))
n_vars = a.shape[1 - axisout]
n_obs = a.shape[axisout]
if n_obs <= 1:
# Handle empty arrays or single observations.
return SpearmanrResult(np.nan, np.nan)
if axisout == 0:
if (a[:, 0][0] == a[:, 0]).all() or (a[:, 1][0] == a[:, 1]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
else: # case when axisout == 1 b/c a is 2 dim only
if (a[0, :][0] == a[0, :]).all() or (a[1, :][0] == a[1, :]).all():
# If an input is constant, the correlation coefficient
# is not defined.
warnings.warn(SpearmanRConstantInputWarning())
return SpearmanrResult(np.nan, np.nan)
a_contains_nan, nan_policy = _contains_nan(a, nan_policy)
variable_has_nan = np.zeros(n_vars, dtype=bool)
if a_contains_nan:
if nan_policy == 'omit':
return mstats_basic.spearmanr(a, axis=axis, nan_policy=nan_policy,
alternative=alternative)
elif nan_policy == 'propagate':
if a.ndim == 1 or n_vars <= 2:
return SpearmanrResult(np.nan, np.nan)
else:
# Keep track of variables with NaNs, set the outputs to NaN
# only for those variables
variable_has_nan = np.isnan(a).any(axis=axisout)
a_ranked = np.apply_along_axis(rankdata, axisout, a)
rs = np.corrcoef(a_ranked, rowvar=axisout)
dof = n_obs - 2 # degrees of freedom
# rs can have elements equal to 1, so avoid zero division warnings
with np.errstate(divide='ignore'):
# clip the small negative values possibly caused by rounding
# errors before taking the square root
t = rs * np.sqrt((dof/((rs+1.0)*(1.0-rs))).clip(0))
t, prob = _ttest_finish(dof, t, alternative)
# For backwards compatibility, return scalars when comparing 2 columns
if rs.shape == (2, 2):
return SpearmanrResult(rs[1, 0], prob[1, 0])
else:
rs[variable_has_nan, :] = np.nan
rs[:, variable_has_nan] = np.nan
return SpearmanrResult(rs, prob)
PointbiserialrResult = namedtuple('PointbiserialrResult',
('correlation', 'pvalue'))
def pointbiserialr(x, y):
r"""Calculate a point biserial correlation coefficient and its p-value.
The point biserial correlation is used to measure the relationship
between a binary variable, x, and a continuous variable, y. Like other
correlation coefficients, this one varies between -1 and +1 with 0
implying no correlation. Correlations of -1 or +1 imply a determinative
relationship.
This function uses a shortcut formula but produces the same result as
`pearsonr`.
Parameters
----------
x : array_like of bools
Input array.
y : array_like
Input array.
Returns
-------
correlation : float
R value.
pvalue : float
Two-sided p-value.
Notes
-----
`pointbiserialr` uses a t-test with ``n-1`` degrees of freedom.
It is equivalent to `pearsonr`.
The value of the point-biserial correlation can be calculated from:
.. math::
r_{pb} = \frac{\overline{Y_{1}} -
\overline{Y_{0}}}{s_{y}}\sqrt{\frac{N_{1} N_{2}}{N (N - 1))}}
Where :math:`Y_{0}` and :math:`Y_{1}` are means of the metric
observations coded 0 and 1 respectively; :math:`N_{0}` and :math:`N_{1}`
are number of observations coded 0 and 1 respectively; :math:`N` is the
total number of observations and :math:`s_{y}` is the standard
deviation of all the metric observations.
A value of :math:`r_{pb}` that is significantly different from zero is
completely equivalent to a significant difference in means between the two
groups. Thus, an independent groups t Test with :math:`N-2` degrees of
freedom may be used to test whether :math:`r_{pb}` is nonzero. The
relation between the t-statistic for comparing two independent groups and
:math:`r_{pb}` is given by:
.. math::
t = \sqrt{N - 2}\frac{r_{pb}}{\sqrt{1 - r^{2}_{pb}}}
References
----------
.. [1] J. Lev, "The Point Biserial Coefficient of Correlation", Ann. Math.
Statist., Vol. 20, no.1, pp. 125-126, 1949.
.. [2] R.F. Tate, "Correlation Between a Discrete and a Continuous
Variable. Point-Biserial Correlation.", Ann. Math. Statist., Vol. 25,
np. 3, pp. 603-607, 1954.
.. [3] D. Kornbrot "Point Biserial Correlation", In Wiley StatsRef:
Statistics Reference Online (eds N. Balakrishnan, et al.), 2014.
:doi:`10.1002/9781118445112.stat06227`
Examples
--------
>>> from scipy import stats
>>> a = np.array([0, 0, 0, 1, 1, 1, 1])
>>> b = np.arange(7)
>>> stats.pointbiserialr(a, b)
(0.8660254037844386, 0.011724811003954652)
>>> stats.pearsonr(a, b)
(0.86602540378443871, 0.011724811003954626)
>>> np.corrcoef(a, b)
array([[ 1. , 0.8660254],
[ 0.8660254, 1. ]])
"""
rpb, prob = pearsonr(x, y)
return PointbiserialrResult(rpb, prob)
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
def kendalltau(x, y, initial_lexsort=None, nan_policy='propagate',
method='auto', variant='b'):
"""Calculate Kendall's tau, a correlation measure for ordinal data.
Kendall's tau is a measure of the correspondence between two rankings.
Values close to 1 indicate strong agreement, and values close to -1
indicate strong disagreement. This implements two variants of Kendall's
tau: tau-b (the default) and tau-c (also known as Stuart's tau-c). These
differ only in how they are normalized to lie within the range -1 to 1;
the hypothesis tests (their p-values) are identical. Kendall's original
tau-a is not implemented separately because both tau-b and tau-c reduce
to tau-a in the absence of ties.
Parameters
----------
x, y : array_like
Arrays of rankings, of the same shape. If arrays are not 1-D, they
will be flattened to 1-D.
initial_lexsort : bool, optional
Unused (deprecated).
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
method : {'auto', 'asymptotic', 'exact'}, optional
Defines which method is used to calculate the p-value [5]_.
The following options are available (default is 'auto'):
* 'auto': selects the appropriate method based on a trade-off
between speed and accuracy
* 'asymptotic': uses a normal approximation valid for large samples
* 'exact': computes the exact p-value, but can only be used if no ties
are present. As the sample size increases, the 'exact' computation
time may grow and the result may lose some precision.
variant: {'b', 'c'}, optional
Defines which variant of Kendall's tau is returned. Default is 'b'.
Returns
-------
correlation : float
The tau statistic.
pvalue : float
The two-sided p-value for a hypothesis test whose null hypothesis is
an absence of association, tau = 0.
See Also
--------
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
weightedtau : Computes a weighted version of Kendall's tau.
Notes
-----
The definition of Kendall's tau that is used is [2]_::
tau_b = (P - Q) / sqrt((P + Q + T) * (P + Q + U))
tau_c = 2 (P - Q) / (n**2 * (m - 1) / m)
where P is the number of concordant pairs, Q the number of discordant
pairs, T the number of ties only in `x`, and U the number of ties only in
`y`. If a tie occurs for the same pair in both `x` and `y`, it is not
added to either T or U. n is the total number of samples, and m is the
number of unique values in either `x` or `y`, whichever is smaller.
References
----------
.. [1] Maurice G. Kendall, "A New Measure of Rank Correlation", Biometrika
Vol. 30, No. 1/2, pp. 81-93, 1938.
.. [2] Maurice G. Kendall, "The treatment of ties in ranking problems",
Biometrika Vol. 33, No. 3, pp. 239-251. 1945.
.. [3] Gottfried E. Noether, "Elements of Nonparametric Statistics", John
Wiley & Sons, 1967.
.. [4] Peter M. Fenwick, "A new data structure for cumulative frequency
tables", Software: Practice and Experience, Vol. 24, No. 3,
pp. 327-336, 1994.
.. [5] Maurice G. Kendall, "Rank Correlation Methods" (4th Edition),
Charles Griffin & Co., 1970.
Examples
--------
>>> from scipy import stats
>>> x1 = [12, 2, 1, 12, 2]
>>> x2 = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.kendalltau(x1, x2)
>>> tau
-0.47140452079103173
>>> p_value
0.2827454599327748
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `kendalltau` must be of the same "
f"size, found x-size {x.size} and y-size {y.size}")
elif not x.size or not y.size:
# Return NaN if arrays are empty
return KendalltauResult(np.nan, np.nan)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == 'omit' or npy == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'propagate':
return KendalltauResult(np.nan, np.nan)
elif contains_nan and nan_policy == 'omit':
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
if variant == 'b':
return mstats_basic.kendalltau(x, y, method=method, use_ties=True)
else:
raise ValueError("Only variant 'b' is supported for masked arrays")
if initial_lexsort is not None: # deprecate to drop!
warnings.warn('"initial_lexsort" is gone!')
def count_rank_tie(ranks):
cnt = np.bincount(ranks).astype('int64', copy=False)
cnt = cnt[cnt > 1]
return ((cnt * (cnt - 1) // 2).sum(),
(cnt * (cnt - 1.) * (cnt - 2)).sum(),
(cnt * (cnt - 1.) * (2*cnt + 5)).sum())
size = x.size
perm = np.argsort(y) # sort on y and convert y to dense ranks
x, y = x[perm], y[perm]
y = np.r_[True, y[1:] != y[:-1]].cumsum(dtype=np.intp)
# stable sort on x and convert x to dense ranks
perm = np.argsort(x, kind='mergesort')
x, y = x[perm], y[perm]
x = np.r_[True, x[1:] != x[:-1]].cumsum(dtype=np.intp)
dis = _kendall_dis(x, y) # discordant pairs
obs = np.r_[True, (x[1:] != x[:-1]) | (y[1:] != y[:-1]), True]
cnt = np.diff(np.nonzero(obs)[0]).astype('int64', copy=False)
ntie = (cnt * (cnt - 1) // 2).sum() # joint ties
xtie, x0, x1 = count_rank_tie(x) # ties in x, stats
ytie, y0, y1 = count_rank_tie(y) # ties in y, stats
tot = (size * (size - 1)) // 2
if xtie == tot or ytie == tot:
return KendalltauResult(np.nan, np.nan)
# Note that tot = con + dis + (xtie - ntie) + (ytie - ntie) + ntie
# = con + dis + xtie + ytie - ntie
con_minus_dis = tot - xtie - ytie + ntie - 2 * dis
if variant == 'b':
tau = con_minus_dis / np.sqrt(tot - xtie) / np.sqrt(tot - ytie)
elif variant == 'c':
minclasses = min(len(set(x)), len(set(y)))
tau = 2*con_minus_dis / (size**2 * (minclasses-1)/minclasses)
else:
raise ValueError(f"Unknown variant of the method chosen: {variant}. "
"variant must be 'b' or 'c'.")
# Limit range to fix computational errors
tau = min(1., max(-1., tau))
# The p-value calculation is the same for all variants since the p-value
# depends only on con_minus_dis.
if method == 'exact' and (xtie != 0 or ytie != 0):
raise ValueError("Ties found, exact method cannot be used.")
if method == 'auto':
if (xtie == 0 and ytie == 0) and (size <= 33 or
min(dis, tot-dis) <= 1):
method = 'exact'
else:
method = 'asymptotic'
if xtie == 0 and ytie == 0 and method == 'exact':
pvalue = mstats_basic._kendall_p_exact(size, min(dis, tot-dis))
elif method == 'asymptotic':
# con_minus_dis is approx normally distributed with this variance [3]_
m = size * (size - 1.)
var = ((m * (2*size + 5) - x1 - y1) / 18 +
(2 * xtie * ytie) / m + x0 * y0 / (9 * m * (size - 2)))
pvalue = (special.erfc(np.abs(con_minus_dis) /
np.sqrt(var) / np.sqrt(2)))
else:
raise ValueError(f"Unknown method {method} specified. Use 'auto', "
"'exact' or 'asymptotic'.")
return KendalltauResult(tau, pvalue)
WeightedTauResult = namedtuple('WeightedTauResult', ('correlation', 'pvalue'))
def weightedtau(x, y, rank=True, weigher=None, additive=True):
r"""Compute a weighted version of Kendall's :math:`\tau`.
The weighted :math:`\tau` is a weighted version of Kendall's
:math:`\tau` in which exchanges of high weight are more influential than
exchanges of low weight. The default parameters compute the additive
hyperbolic version of the index, :math:`\tau_\mathrm h`, which has
been shown to provide the best balance between important and
unimportant elements [1]_.
The weighting is defined by means of a rank array, which assigns a
nonnegative rank to each element (higher importance ranks being
associated with smaller values, e.g., 0 is the highest possible rank),
and a weigher function, which assigns a weight based on the rank to
each element. The weight of an exchange is then the sum or the product
of the weights of the ranks of the exchanged elements. The default
parameters compute :math:`\tau_\mathrm h`: an exchange between
elements with rank :math:`r` and :math:`s` (starting from zero) has
weight :math:`1/(r+1) + 1/(s+1)`.
Specifying a rank array is meaningful only if you have in mind an
external criterion of importance. If, as it usually happens, you do
not have in mind a specific rank, the weighted :math:`\tau` is
defined by averaging the values obtained using the decreasing
lexicographical rank by (`x`, `y`) and by (`y`, `x`). This is the
behavior with default parameters. Note that the convention used
here for ranking (lower values imply higher importance) is opposite
to that used by other SciPy statistical functions.
Parameters
----------
x, y : array_like
Arrays of scores, of the same shape. If arrays are not 1-D, they will
be flattened to 1-D.
rank : array_like of ints or bool, optional
A nonnegative rank assigned to each element. If it is None, the
decreasing lexicographical rank by (`x`, `y`) will be used: elements of
higher rank will be those with larger `x`-values, using `y`-values to
break ties (in particular, swapping `x` and `y` will give a different
result). If it is False, the element indices will be used
directly as ranks. The default is True, in which case this
function returns the average of the values obtained using the
decreasing lexicographical rank by (`x`, `y`) and by (`y`, `x`).
weigher : callable, optional
The weigher function. Must map nonnegative integers (zero
representing the most important element) to a nonnegative weight.
The default, None, provides hyperbolic weighing, that is,
rank :math:`r` is mapped to weight :math:`1/(r+1)`.
additive : bool, optional
If True, the weight of an exchange is computed by adding the
weights of the ranks of the exchanged elements; otherwise, the weights
are multiplied. The default is True.
Returns
-------
correlation : float
The weighted :math:`\tau` correlation index.
pvalue : float
Presently ``np.nan``, as the null statistics is unknown (even in the
additive hyperbolic case).
See Also
--------
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
theilslopes : Computes the Theil-Sen estimator for a set of points (x, y).
Notes
-----
This function uses an :math:`O(n \log n)`, mergesort-based algorithm
[1]_ that is a weighted extension of Knight's algorithm for Kendall's
:math:`\tau` [2]_. It can compute Shieh's weighted :math:`\tau` [3]_
between rankings without ties (i.e., permutations) by setting
`additive` and `rank` to False, as the definition given in [1]_ is a
generalization of Shieh's.
NaNs are considered the smallest possible score.
.. versionadded:: 0.19.0
References
----------
.. [1] Sebastiano Vigna, "A weighted correlation index for rankings with
ties", Proceedings of the 24th international conference on World
Wide Web, pp. 1166-1176, ACM, 2015.
.. [2] W.R. Knight, "A Computer Method for Calculating Kendall's Tau with
Ungrouped Data", Journal of the American Statistical Association,
Vol. 61, No. 314, Part 1, pp. 436-439, 1966.
.. [3] Grace S. Shieh. "A weighted Kendall's tau statistic", Statistics &
Probability Letters, Vol. 39, No. 1, pp. 17-24, 1998.
Examples
--------
>>> from scipy import stats
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, p_value = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
>>> p_value
nan
>>> tau, p_value = stats.weightedtau(x, y, additive=False)
>>> tau
-0.62205716951801038
NaNs are considered the smallest possible score:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, np.nan]
>>> tau, _ = stats.weightedtau(x, y)
>>> tau
-0.56694968153682723
This is exactly Kendall's tau:
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> tau, _ = stats.weightedtau(x, y, weigher=lambda x: 1)
>>> tau
-0.47140452079103173
>>> x = [12, 2, 1, 12, 2]
>>> y = [1, 4, 7, 1, 0]
>>> stats.weightedtau(x, y, rank=None)
WeightedTauResult(correlation=-0.4157652301037516, pvalue=nan)
>>> stats.weightedtau(y, x, rank=None)
WeightedTauResult(correlation=-0.7181341329699028, pvalue=nan)
"""
x = np.asarray(x).ravel()
y = np.asarray(y).ravel()
if x.size != y.size:
raise ValueError("All inputs to `weightedtau` must be "
"of the same size, "
"found x-size %s and y-size %s" % (x.size, y.size))
if not x.size:
# Return NaN if arrays are empty
return WeightedTauResult(np.nan, np.nan)
# If there are NaNs we apply _toint64()
if np.isnan(np.sum(x)):
x = _toint64(x)
if np.isnan(np.sum(y)):
y = _toint64(y)
# Reduce to ranks unsupported types
if x.dtype != y.dtype:
if x.dtype != np.int64:
x = _toint64(x)
if y.dtype != np.int64:
y = _toint64(y)
else:
if x.dtype not in (np.int32, np.int64, np.float32, np.float64):
x = _toint64(x)
y = _toint64(y)
if rank is True:
return WeightedTauResult((
_weightedrankedtau(x, y, None, weigher, additive) +
_weightedrankedtau(y, x, None, weigher, additive)
) / 2, np.nan)
if rank is False:
rank = np.arange(x.size, dtype=np.intp)
elif rank is not None:
rank = np.asarray(rank).ravel()
if rank.size != x.size:
raise ValueError(
"All inputs to `weightedtau` must be of the same size, "
"found x-size %s and rank-size %s" % (x.size, rank.size)
)
return WeightedTauResult(_weightedrankedtau(x, y, rank, weigher, additive),
np.nan)
# FROM MGCPY: https://github.com/neurodata/mgcpy
class _ParallelP:
"""Helper function to calculate parallel p-value."""
def __init__(self, x, y, random_states):
self.x = x
self.y = y
self.random_states = random_states
def __call__(self, index):
order = self.random_states[index].permutation(self.y.shape[0])
permy = self.y[order][:, order]
# calculate permuted stats, store in null distribution
perm_stat = _mgc_stat(self.x, permy)[0]
return perm_stat
def _perm_test(x, y, stat, reps=1000, workers=-1, random_state=None):
r"""Helper function that calculates the p-value. See below for uses.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)`.
stat : float
The sample test statistic.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is 1000 replications.
workers : int or map-like callable, optional
If `workers` is an int the population is subdivided into `workers`
sections and evaluated in parallel (uses
`multiprocessing.Pool <multiprocessing>`). Supply `-1` to use all cores
available to the Process. Alternatively supply a map-like callable,
such as `multiprocessing.Pool.map` for evaluating the population in
parallel. This evaluation is carried out as `workers(func, iterable)`.
Requires that `func` be pickleable.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
pvalue : float
The sample test p-value.
null_dist : list
The approximated null distribution.
"""
# generate seeds for each rep (change to new parallel random number
# capabilities in numpy >= 1.17+)
random_state = check_random_state(random_state)
random_states = [np.random.RandomState(rng_integers(random_state, 1 << 32,
size=4, dtype=np.uint32)) for _ in range(reps)]
# parallelizes with specified workers over number of reps and set seeds
parallelp = _ParallelP(x=x, y=y, random_states=random_states)
with MapWrapper(workers) as mapwrapper:
null_dist = np.array(list(mapwrapper(parallelp, range(reps))))
# calculate p-value and significant permutation map through list
pvalue = (null_dist >= stat).sum() / reps
# correct for a p-value of 0. This is because, with bootstrapping
# permutations, a p-value of 0 is incorrect
if pvalue == 0:
pvalue = 1 / reps
return pvalue, null_dist
def _euclidean_dist(x):
return cdist(x, x)
MGCResult = namedtuple('MGCResult', ('stat', 'pvalue', 'mgc_dict'))
def multiscale_graphcorr(x, y, compute_distance=_euclidean_dist, reps=1000,
workers=1, is_twosamp=False, random_state=None):
r"""Computes the Multiscale Graph Correlation (MGC) test statistic.
Specifically, for each point, MGC finds the :math:`k`-nearest neighbors for
one property (e.g. cloud density), and the :math:`l`-nearest neighbors for
the other property (e.g. grass wetness) [1]_. This pair :math:`(k, l)` is
called the "scale". A priori, however, it is not know which scales will be
most informative. So, MGC computes all distance pairs, and then efficiently
computes the distance correlations for all scales. The local correlations
illustrate which scales are relatively informative about the relationship.
The key, therefore, to successfully discover and decipher relationships
between disparate data modalities is to adaptively determine which scales
are the most informative, and the geometric implication for the most
informative scales. Doing so not only provides an estimate of whether the
modalities are related, but also provides insight into how the
determination was made. This is especially important in high-dimensional
data, where simple visualizations do not reveal relationships to the
unaided human eye. Characterizations of this implementation in particular
have been derived from and benchmarked within in [2]_.
Parameters
----------
x, y : ndarray
If ``x`` and ``y`` have shapes ``(n, p)`` and ``(n, q)`` where `n` is
the number of samples and `p` and `q` are the number of dimensions,
then the MGC independence test will be run. Alternatively, ``x`` and
``y`` can have shapes ``(n, n)`` if they are distance or similarity
matrices, and ``compute_distance`` must be sent to ``None``. If ``x``
and ``y`` have shapes ``(n, p)`` and ``(m, p)``, an unpaired
two-sample MGC test will be run.
compute_distance : callable, optional
A function that computes the distance or similarity among the samples
within each data matrix. Set to ``None`` if ``x`` and ``y`` are
already distance matrices. The default uses the euclidean norm metric.
If you are calling a custom function, either create the distance
matrix before-hand or create a function of the form
``compute_distance(x)`` where `x` is the data matrix for which
pairwise distances are calculated.
reps : int, optional
The number of replications used to estimate the null when using the
permutation test. The default is ``1000``.
workers : int or map-like callable, optional
If ``workers`` is an int the population is subdivided into ``workers``
sections and evaluated in parallel (uses ``multiprocessing.Pool
<multiprocessing>``). Supply ``-1`` to use all cores available to the
Process. Alternatively supply a map-like callable, such as
``multiprocessing.Pool.map`` for evaluating the p-value in parallel.
This evaluation is carried out as ``workers(func, iterable)``.
Requires that `func` be pickleable. The default is ``1``.
is_twosamp : bool, optional
If `True`, a two sample test will be run. If ``x`` and ``y`` have
shapes ``(n, p)`` and ``(m, p)``, this optional will be overriden and
set to ``True``. Set to ``True`` if ``x`` and ``y`` both have shapes
``(n, p)`` and a two sample test is desired. The default is ``False``.
Note that this will not run if inputs are distance matrices.
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
pvalue : float
The p-value obtained via permutation.
mgc_dict : dict
Contains additional useful additional returns containing the following
keys:
- mgc_map : ndarray
A 2D representation of the latent geometry of the relationship.
of the relationship.
- opt_scale : (int, int)
The estimated optimal scale as a `(x, y)` pair.
- null_dist : list
The null distribution derived from the permuted matrices
See Also
--------
pearsonr : Pearson correlation coefficient and p-value for testing
non-correlation.
kendalltau : Calculates Kendall's tau.
spearmanr : Calculates a Spearman rank-order correlation coefficient.
Notes
-----
A description of the process of MGC and applications on neuroscience data
can be found in [1]_. It is performed using the following steps:
#. Two distance matrices :math:`D^X` and :math:`D^Y` are computed and
modified to be mean zero columnwise. This results in two
:math:`n \times n` distance matrices :math:`A` and :math:`B` (the
centering and unbiased modification) [3]_.
#. For all values :math:`k` and :math:`l` from :math:`1, ..., n`,
* The :math:`k`-nearest neighbor and :math:`l`-nearest neighbor graphs
are calculated for each property. Here, :math:`G_k (i, j)` indicates
the :math:`k`-smallest values of the :math:`i`-th row of :math:`A`
and :math:`H_l (i, j)` indicates the :math:`l` smallested values of
the :math:`i`-th row of :math:`B`
* Let :math:`\circ` denotes the entry-wise matrix product, then local
correlations are summed and normalized using the following statistic:
.. math::
c^{kl} = \frac{\sum_{ij} A G_k B H_l}
{\sqrt{\sum_{ij} A^2 G_k \times \sum_{ij} B^2 H_l}}
#. The MGC test statistic is the smoothed optimal local correlation of
:math:`\{ c^{kl} \}`. Denote the smoothing operation as :math:`R(\cdot)`
(which essentially set all isolated large correlations) as 0 and
connected large correlations the same as before, see [3]_.) MGC is,
.. math::
MGC_n (x, y) = \max_{(k, l)} R \left(c^{kl} \left( x_n, y_n \right)
\right)
The test statistic returns a value between :math:`(-1, 1)` since it is
normalized.
The p-value returned is calculated using a permutation test. This process
is completed by first randomly permuting :math:`y` to estimate the null
distribution and then calculating the probability of observing a test
statistic, under the null, at least as extreme as the observed test
statistic.
MGC requires at least 5 samples to run with reliable results. It can also
handle high-dimensional data sets.
In addition, by manipulating the input data matrices, the two-sample
testing problem can be reduced to the independence testing problem [4]_.
Given sample data :math:`U` and :math:`V` of sizes :math:`p \times n`
:math:`p \times m`, data matrix :math:`X` and :math:`Y` can be created as
follows:
.. math::
X = [U | V] \in \mathcal{R}^{p \times (n + m)}
Y = [0_{1 \times n} | 1_{1 \times m}] \in \mathcal{R}^{(n + m)}
Then, the MGC statistic can be calculated as normal. This methodology can
be extended to similar tests such as distance correlation [4]_.
.. versionadded:: 1.4.0
References
----------
.. [1] Vogelstein, J. T., Bridgeford, E. W., Wang, Q., Priebe, C. E.,
Maggioni, M., & Shen, C. (2019). Discovering and deciphering
relationships across disparate data modalities. ELife.
.. [2] Panda, S., Palaniappan, S., Xiong, J., Swaminathan, A.,
Ramachandran, S., Bridgeford, E. W., ... Vogelstein, J. T. (2019).
mgcpy: A Comprehensive High Dimensional Independence Testing Python
Package. :arXiv:`1907.02088`
.. [3] Shen, C., Priebe, C.E., & Vogelstein, J. T. (2019). From distance
correlation to multiscale graph correlation. Journal of the American
Statistical Association.
.. [4] Shen, C. & Vogelstein, J. T. (2018). The Exact Equivalence of
Distance and Kernel Methods for Hypothesis Testing.
:arXiv:`1806.05514`
Examples
--------
>>> from scipy.stats import multiscale_graphcorr
>>> x = np.arange(100)
>>> y = x
>>> stat, pvalue, _ = multiscale_graphcorr(x, y, workers=-1)
>>> '%.1f, %.3f' % (stat, pvalue)
'1.0, 0.001'
Alternatively,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.1f, %.3f' % (mgc.stat, mgc.pvalue)
'1.0, 0.001'
To run an unpaired two-sample test,
>>> x = np.arange(100)
>>> y = np.arange(79)
>>> mgc = multiscale_graphcorr(x, y)
>>> '%.3f, %.2f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'0.033, 0.02'
or, if shape of the inputs are the same,
>>> x = np.arange(100)
>>> y = x
>>> mgc = multiscale_graphcorr(x, y, is_twosamp=True)
>>> '%.3f, %.1f' % (mgc.stat, mgc.pvalue) # doctest: +SKIP
'-0.008, 1.0'
"""
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise ValueError("x and y must be ndarrays")
# convert arrays of type (n,) to (n, 1)
if x.ndim == 1:
x = x[:, np.newaxis]
elif x.ndim != 2:
raise ValueError("Expected a 2-D array `x`, found shape "
"{}".format(x.shape))
if y.ndim == 1:
y = y[:, np.newaxis]
elif y.ndim != 2:
raise ValueError("Expected a 2-D array `y`, found shape "
"{}".format(y.shape))
nx, px = x.shape
ny, py = y.shape
# check for NaNs
_contains_nan(x, nan_policy='raise')
_contains_nan(y, nan_policy='raise')
# check for positive or negative infinity and raise error
if np.sum(np.isinf(x)) > 0 or np.sum(np.isinf(y)) > 0:
raise ValueError("Inputs contain infinities")
if nx != ny:
if px == py:
# reshape x and y for two sample testing
is_twosamp = True
else:
raise ValueError("Shape mismatch, x and y must have shape [n, p] "
"and [n, q] or have shape [n, p] and [m, p].")
if nx < 5 or ny < 5:
raise ValueError("MGC requires at least 5 samples to give reasonable "
"results.")
# convert x and y to float
x = x.astype(np.float64)
y = y.astype(np.float64)
# check if compute_distance_matrix if a callable()
if not callable(compute_distance) and compute_distance is not None:
raise ValueError("Compute_distance must be a function.")
# check if number of reps exists, integer, or > 0 (if under 1000 raises
# warning)
if not isinstance(reps, int) or reps < 0:
raise ValueError("Number of reps must be an integer greater than 0.")
elif reps < 1000:
msg = ("The number of replications is low (under 1000), and p-value "
"calculations may be unreliable. Use the p-value result, with "
"caution!")
warnings.warn(msg, RuntimeWarning)
if is_twosamp:
if compute_distance is None:
raise ValueError("Cannot run if inputs are distance matrices")
x, y = _two_sample_transform(x, y)
if compute_distance is not None:
# compute distance matrices for x and y
x = compute_distance(x)
y = compute_distance(y)
# calculate MGC stat
stat, stat_dict = _mgc_stat(x, y)
stat_mgc_map = stat_dict["stat_mgc_map"]
opt_scale = stat_dict["opt_scale"]
# calculate permutation MGC p-value
pvalue, null_dist = _perm_test(x, y, stat, reps=reps, workers=workers,
random_state=random_state)
# save all stats (other than stat/p-value) in dictionary
mgc_dict = {"mgc_map": stat_mgc_map,
"opt_scale": opt_scale,
"null_dist": null_dist}
return MGCResult(stat, pvalue, mgc_dict)
def _mgc_stat(distx, disty):
r"""Helper function that calculates the MGC stat. See above for use.
Parameters
----------
x, y : ndarray
`x` and `y` have shapes `(n, p)` and `(n, q)` or `(n, n)` and `(n, n)`
if distance matrices.
Returns
-------
stat : float
The sample MGC test statistic within `[-1, 1]`.
stat_dict : dict
Contains additional useful additional returns containing the following
keys:
- stat_mgc_map : ndarray
MGC-map of the statistics.
- opt_scale : (float, float)
The estimated optimal scale as a `(x, y)` pair.
"""
# calculate MGC map and optimal scale
stat_mgc_map = _local_correlations(distx, disty, global_corr='mgc')
n, m = stat_mgc_map.shape
if m == 1 or n == 1:
# the global scale at is the statistic calculated at maximial nearest
# neighbors. There is not enough local scale to search over, so
# default to global scale
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = m * n
else:
samp_size = len(distx) - 1
# threshold to find connected region of significant local correlations
sig_connect = _threshold_mgc_map(stat_mgc_map, samp_size)
# maximum within the significant region
stat, opt_scale = _smooth_mgc_map(sig_connect, stat_mgc_map)
stat_dict = {"stat_mgc_map": stat_mgc_map,
"opt_scale": opt_scale}
return stat, stat_dict
def _threshold_mgc_map(stat_mgc_map, samp_size):
r"""
Finds a connected region of significance in the MGC-map by thresholding.
Parameters
----------
stat_mgc_map : ndarray
All local correlations within `[-1,1]`.
samp_size : int
The sample size of original data.
Returns
-------
sig_connect : ndarray
A binary matrix with 1's indicating the significant region.
"""
m, n = stat_mgc_map.shape
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance. Threshold is based on a beta
# approximation.
per_sig = 1 - (0.02 / samp_size) # Percentile to consider as significant
threshold = samp_size * (samp_size - 3)/4 - 1/2 # Beta approximation
threshold = distributions.beta.ppf(per_sig, threshold, threshold) * 2 - 1
# the global scale at is the statistic calculated at maximial nearest
# neighbors. Threshold is the maximium on the global and local scales
threshold = max(threshold, stat_mgc_map[m - 1][n - 1])
# find the largest connected component of significant correlations
sig_connect = stat_mgc_map > threshold
if np.sum(sig_connect) > 0:
sig_connect, _ = measurements.label(sig_connect)
_, label_counts = np.unique(sig_connect, return_counts=True)
# skip the first element in label_counts, as it is count(zeros)
max_label = np.argmax(label_counts[1:]) + 1
sig_connect = sig_connect == max_label
else:
sig_connect = np.array([[False]])
return sig_connect
def _smooth_mgc_map(sig_connect, stat_mgc_map):
"""Finds the smoothed maximal within the significant region R.
If area of R is too small it returns the last local correlation. Otherwise,
returns the maximum within significant_connected_region.
Parameters
----------
sig_connect: ndarray
A binary matrix with 1's indicating the significant region.
stat_mgc_map: ndarray
All local correlations within `[-1, 1]`.
Returns
-------
stat : float
The sample MGC statistic within `[-1, 1]`.
opt_scale: (float, float)
The estimated optimal scale as an `(x, y)` pair.
"""
m, n = stat_mgc_map.shape
# the global scale at is the statistic calculated at maximial nearest
# neighbors. By default, statistic and optimal scale are global.
stat = stat_mgc_map[m - 1][n - 1]
opt_scale = [m, n]
if np.linalg.norm(sig_connect) != 0:
# proceed only when the connected region's area is sufficiently large
# 0.02 is simply an empirical threshold, this can be set to 0.01 or 0.05
# with varying levels of performance
if np.sum(sig_connect) >= np.ceil(0.02 * max(m, n)) * min(m, n):
max_corr = max(stat_mgc_map[sig_connect])
# find all scales within significant_connected_region that maximize
# the local correlation
max_corr_index = np.where((stat_mgc_map >= max_corr) & sig_connect)
if max_corr >= stat:
stat = max_corr
k, l = max_corr_index
one_d_indices = k * n + l # 2D to 1D indexing
k = np.max(one_d_indices) // n
l = np.max(one_d_indices) % n
opt_scale = [k+1, l+1] # adding 1s to match R indexing
return stat, opt_scale
def _two_sample_transform(u, v):
"""Helper function that concatenates x and y for two sample MGC stat.
See above for use.
Parameters
----------
u, v : ndarray
`u` and `v` have shapes `(n, p)` and `(m, p)`.
Returns
-------
x : ndarray
Concatenate `u` and `v` along the `axis = 0`. `x` thus has shape
`(2n, p)`.
y : ndarray
Label matrix for `x` where 0 refers to samples that comes from `u` and
1 refers to samples that come from `v`. `y` thus has shape `(2n, 1)`.
"""
nx = u.shape[0]
ny = v.shape[0]
x = np.concatenate([u, v], axis=0)
y = np.concatenate([np.zeros(nx), np.ones(ny)], axis=0).reshape(-1, 1)
return x, y
#####################################
# INFERENTIAL STATISTICS #
#####################################
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
def ttest_1samp(a, popmean, axis=0, nan_policy='propagate',
alternative="two-sided"):
"""Calculate the T-test for the mean of ONE group of scores.
This is a test for the null hypothesis that the expected value
(mean) of a sample of independent observations `a` is equal to the given
population mean, `popmean`.
Parameters
----------
a : array_like
Sample observation.
popmean : float or array_like
Expected value in null hypothesis. If array_like, then it must have the
same shape as `a` excluding the axis dimension.
axis : int or None, optional
Axis along which to compute test; default is 0. If None, compute over
the whole array `a`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the mean of the underlying distribution of the sample
is different than the given population mean (`popmean`)
* 'less': the mean of the underlying distribution of the sample is
less than the given population mean (`popmean`)
* 'greater': the mean of the underlying distribution of the sample is
greater than the given population mean (`popmean`)
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
Two-sided p-value.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs = stats.norm.rvs(loc=5, scale=10, size=(50, 2), random_state=rng)
Test if mean of random sample is equal to true mean, and different mean.
We reject the null hypothesis in the second case and don't reject it in
the first case.
>>> stats.ttest_1samp(rvs, 5.0)
Ttest_1sampResult(statistic=array([-2.09794637, -1.75977004]), pvalue=array([0.04108952, 0.08468867]))
>>> stats.ttest_1samp(rvs, 0.0)
Ttest_1sampResult(statistic=array([1.64495065, 1.62095307]), pvalue=array([0.10638103, 0.11144602]))
Examples using axis and non-scalar dimension for population mean.
>>> result = stats.ttest_1samp(rvs, [5.0, 0.0])
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs.T, [5.0, 0.0], axis=1)
>>> result.statistic
array([-2.09794637, 1.62095307])
>>> result.pvalue
array([0.04108952, 0.11144602])
>>> result = stats.ttest_1samp(rvs, [[5.0], [0.0]])
>>> result.statistic
array([[-2.09794637, -1.75977004],
[ 1.64495065, 1.62095307]])
>>> result.pvalue
array([[0.04108952, 0.08468867],
[0.10638103, 0.11144602]])
"""
a, axis = _chk_asarray(a, axis)
contains_nan, nan_policy = _contains_nan(a, nan_policy)
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
return mstats_basic.ttest_1samp(a, popmean, axis, alternative)
n = a.shape[axis]
df = n - 1
d = np.mean(a, axis) - popmean
v = np.var(a, axis, ddof=1)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_1sampResult(t, prob)
def _ttest_finish(df, t, alternative):
"""Common code between all 3 t-test functions."""
# We use ``stdtr`` directly here as it handles the case when ``nan``
# values are present in the data and masked arrays are passed
# while ``t.cdf`` emits runtime warnings. This way ``_ttest_finish``
# can be shared between the ``stats`` and ``mstats`` versions.
if alternative == 'less':
pval = special.stdtr(df, t)
elif alternative == 'greater':
pval = special.stdtr(df, -t)
elif alternative == 'two-sided':
pval = special.stdtr(df, -np.abs(t))*2
else:
raise ValueError("alternative must be "
"'less', 'greater' or 'two-sided'")
if t.ndim == 0:
t = t[()]
if pval.ndim == 0:
pval = pval[()]
return t, pval
def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
d = mean1 - mean2
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(d, denom)
t, prob = _ttest_finish(df, t, alternative)
return (t, prob)
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide='ignore', invalid='ignore'):
df = (vn1 + vn2)**2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = np.where(np.isnan(df), 1, df)
denom = np.sqrt(vn1 + vn2)
return df, denom
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = np.sqrt(svar * (1.0 / n1 + 1.0 / n2))
return df, denom
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
def ttest_ind_from_stats(mean1, std1, nobs1, mean2, std2, nobs2,
equal_var=True, alternative="two-sided"):
r"""
T-test for means of two independent samples from descriptive statistics.
This is a test for the null hypothesis that two independent
samples have identical average (expected) values.
Parameters
----------
mean1 : array_like
The mean(s) of sample 1.
std1 : array_like
The standard deviation(s) of sample 1.
nobs1 : array_like
The number(s) of observations of sample 1.
mean2 : array_like
The mean(s) of sample 2.
std2 : array_like
The standard deviations(s) of sample 2.
nobs2 : array_like
The number(s) of observations of sample 2.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions are unequal.
* 'less': the mean of the first distribution is less than the
mean of the second distribution.
* 'greater': the mean of the first distribution is greater than the
mean of the second distribution.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
The calculated t-statistics.
pvalue : float or array
The two-tailed p-value.
See Also
--------
scipy.stats.ttest_ind
Notes
-----
.. versionadded:: 0.16.0
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
Examples
--------
Suppose we have the summary data for two samples, as follows::
Sample Sample
Size Mean Variance
Sample 1 13 15.0 87.5
Sample 2 11 12.0 39.0
Apply the t-test to this data (with the assumption that the population
variances are equal):
>>> from scipy.stats import ttest_ind_from_stats
>>> ttest_ind_from_stats(mean1=15.0, std1=np.sqrt(87.5), nobs1=13,
... mean2=12.0, std2=np.sqrt(39.0), nobs2=11)
Ttest_indResult(statistic=0.9051358093310269, pvalue=0.3751996797581487)
For comparison, here is the data from which those summary statistics
were taken. With this data, we can compute the same result using
`scipy.stats.ttest_ind`:
>>> a = np.array([1, 3, 4, 6, 11, 13, 15, 19, 22, 24, 25, 26, 26])
>>> b = np.array([2, 4, 6, 9, 11, 13, 14, 15, 18, 19, 21])
>>> from scipy.stats import ttest_ind
>>> ttest_ind(a, b)
Ttest_indResult(statistic=0.905135809331027, pvalue=0.3751996797581486)
Suppose we instead have binary data and would like to apply a t-test to
compare the proportion of 1s in two independent groups::
Number of Sample Sample
Size ones Mean Variance
Sample 1 150 30 0.2 0.16
Sample 2 200 45 0.225 0.174375
The sample mean :math:`\hat{p}` is the proportion of ones in the sample
and the variance for a binary observation is estimated by
:math:`\hat{p}(1-\hat{p})`.
>>> ttest_ind_from_stats(mean1=0.2, std1=np.sqrt(0.16), nobs1=150,
... mean2=0.225, std2=np.sqrt(0.17437), nobs2=200)
Ttest_indResult(statistic=-0.564327545549774, pvalue=0.5728947691244874)
For comparison, we could compute the t statistic and p-value using
arrays of 0s and 1s and `scipy.stat.ttest_ind`, as above.
>>> group1 = np.array([1]*30 + [0]*(150-30))
>>> group2 = np.array([1]*45 + [0]*(200-45))
>>> ttest_ind(group1, group2)
Ttest_indResult(statistic=-0.5627179589855622, pvalue=0.573989277115258)
"""
mean1 = np.asarray(mean1)
std1 = np.asarray(std1)
mean2 = np.asarray(mean2)
std2 = np.asarray(std2)
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1,
std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_nans(a, b, axis, namedtuple_type):
"""
Generate an array of `nan`, with shape determined by `a`, `b` and `axis`.
This function is used by ttest_ind and ttest_rel to create the return
value when one of the inputs has size 0.
The shapes of the arrays are determined by dropping `axis` from the
shapes of `a` and `b` and broadcasting what is left.
The return value is a named tuple of the type given in `namedtuple_type`.
Examples
--------
>>> a = np.zeros((9, 2))
>>> b = np.zeros((5, 1))
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=array([nan, nan]), pvalue=array([nan, nan]))
>>> a = np.zeros((3, 0, 9))
>>> b = np.zeros((1, 10))
>>> stat, p = _ttest_nans(a, b, -1, Ttest_indResult)
>>> stat
array([], shape=(3, 0), dtype=float64)
>>> p
array([], shape=(3, 0), dtype=float64)
>>> a = np.zeros(10)
>>> b = np.zeros(7)
>>> _ttest_nans(a, b, 0, Ttest_indResult)
Ttest_indResult(statistic=nan, pvalue=nan)
"""
shp = _broadcast_shapes_with_dropped_axis(a, b, axis)
if len(shp) == 0:
t = np.nan
p = np.nan
else:
t = np.full(shp, fill_value=np.nan)
p = t.copy()
return namedtuple_type(t, p)
def ttest_ind(a, b, axis=0, equal_var=True, nan_policy='propagate',
permutations=None, random_state=None, alternative="two-sided",
trim=0):
"""
Calculate the T-test for the means of *two independent* samples of scores.
This is a test for the null hypothesis that 2 independent samples
have identical average (expected) values. This test assumes that the
populations have identical variances by default.
Parameters
----------
a, b : array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
equal_var : bool, optional
If True (default), perform a standard independent 2 sample test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
.. versionadded:: 0.11.0
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
The 'omit' option is not currently available for permutation tests or
one-sided asympyotic tests.
permutations : non-negative int, np.inf, or None (default), optional
If 0 or None (default), use the t-distribution to calculate p-values.
Otherwise, `permutations` is the number of random permutations that
will be used to estimate p-values using a permutation test. If
`permutations` equals or exceeds the number of distinct partitions of
the pooled data, an exact test is performed instead (i.e. each
distinct partition is used exactly once). See Notes for details.
.. versionadded:: 1.7.0
random_state : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Pseudorandom number generator state used to generate permutations
(used only when `permutations` is not None).
.. versionadded:: 1.7.0
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
trim : float, optional
If nonzero, performs a trimmed (Yuen's) t-test.
Defines the fraction of elements to be trimmed from each end of the
input samples. If 0 (default), no elements will be trimmed from either
side. The number of trimmed elements from each tail is the floor of the
trim times the number of elements. Valid range is [0, .5).
.. versionadded:: 1.7
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Suppose we observe two independent samples, e.g. flower petal lengths, and
we are considering whether the two samples were drawn from the same
population (e.g. the same species of flower or two species with similar
petal characteristics) or two different populations.
The t-test quantifies the difference between the arithmetic means
of the two samples. The p-value quantifies the probability of observing
as or more extreme values assuming the null hypothesis, that the
samples are drawn from populations with the same population means, is true.
A p-value larger than a chosen threshold (e.g. 5% or 1%) indicates that
our observation is not so unlikely to have occurred by chance. Therefore,
we do not reject the null hypothesis of equal population means.
If the p-value is smaller than our threshold, then we have evidence
against the null hypothesis of equal population means.
By default, the p-value is determined by comparing the t-statistic of the
observed data against a theoretical t-distribution.
When ``1 < permutations < binom(n, k)``, where
* ``k`` is the number of observations in `a`,
* ``n`` is the total number of observations in `a` and `b`, and
* ``binom(n, k)`` is the binomial coefficient (``n`` choose ``k``),
the data are pooled (concatenated), randomly assigned to either group `a`
or `b`, and the t-statistic is calculated. This process is performed
repeatedly (`permutation` times), generating a distribution of the
t-statistic under the null hypothesis, and the t-statistic of the observed
data is compared to this distribution to determine the p-value. When
``permutations >= binom(n, k)``, an exact test is performed: the data are
partitioned between the groups in each distinct way exactly once.
The permutation test can be computationally expensive and not necessarily
more accurate than the analytical test, but it does not make strong
assumptions about the shape of the underlying distribution.
Use of trimming is commonly referred to as the trimmed t-test. At times
called Yuen's t-test, this is an extension of Welch's t-test, with the
difference being the use of winsorized means in calculation of the variance
and the trimmed sample size in calculation of the statistic. Trimming is
reccomended if the underlying distribution is long-tailed or contaminated
with outliers [4]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] https://en.wikipedia.org/wiki/Welch%27s_t-test
.. [3] http://en.wikipedia.org/wiki/Resampling_%28statistics%29
.. [4] Yuen, Karen K. "The Two-Sample Trimmed t for Unequal Population
Variances." Biometrika, vol. 61, no. 1, 1974, pp. 165-170. JSTOR,
www.jstor.org/stable/2334299. Accessed 30 Mar. 2021.
.. [5] Yuen, Karen K., and W. J. Dixon. "The Approximate Behaviour and
Performance of the Two-Sample Trimmed t." Biometrika, vol. 60,
no. 2, 1973, pp. 369-374. JSTOR, www.jstor.org/stable/2334550.
Accessed 30 Mar. 2021.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
Test with sample with identical means:
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs2)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952038870015)
>>> stats.ttest_ind(rvs1, rvs2, equal_var=False)
Ttest_indResult(statistic=-0.4390847099199348, pvalue=0.6606952553131064)
`ttest_ind` underestimates p for unequal variances:
>>> rvs3 = stats.norm.rvs(loc=5, scale=20, size=500, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs3)
Ttest_indResult(statistic=-1.6370984482905417, pvalue=0.1019251574705033)
>>> stats.ttest_ind(rvs1, rvs3, equal_var=False)
Ttest_indResult(statistic=-1.637098448290542, pvalue=0.10202110497954867)
When ``n1 != n2``, the equal variance t-statistic is no longer equal to the
unequal variance t-statistic:
>>> rvs4 = stats.norm.rvs(loc=5, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs4)
Ttest_indResult(statistic=-1.9481646859513422, pvalue=0.05186270935842703)
>>> stats.ttest_ind(rvs1, rvs4, equal_var=False)
Ttest_indResult(statistic=-1.3146566100751664, pvalue=0.1913495266513811)
T-test with different means, variance, and n:
>>> rvs5 = stats.norm.rvs(loc=8, scale=20, size=100, random_state=rng)
>>> stats.ttest_ind(rvs1, rvs5)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0046418707568707885)
>>> stats.ttest_ind(rvs1, rvs5, equal_var=False)
Ttest_indResult(statistic=-1.8686598649188084, pvalue=0.06434714193919686)
When performing a permutation test, more permutations typically yields
more accurate results. Use a ``np.random.Generator`` to ensure
reproducibility:
>>> stats.ttest_ind(rvs1, rvs5, permutations=10000,
... random_state=rng)
Ttest_indResult(statistic=-2.8415950600298774, pvalue=0.0052)
Take these two samples, one of which has an extreme tail.
>>> a = (56, 128.6, 12, 123.8, 64.34, 78, 763.3)
>>> b = (1.1, 2.9, 4.2)
Use the `trim` keyword to perform a trimmed (Yuen) t-test. For example,
using 20% trimming, ``trim=.2``, the test will reduce the impact of one
(``np.floor(trim*len(a))``) element from each tail of sample `a`. It will
have no effect on sample `b` because ``np.floor(trim*len(b))`` is 0.
>>> stats.ttest_ind(a, b, trim=.2)
Ttest_indResult(statistic=3.4463884028073513,
pvalue=0.01369338726499547)
"""
if not (0 <= trim < .5):
raise ValueError("Trimming percentage should be 0 <= `trim` < .5.")
a, b, axis = _chk2_asarray(a, b, axis)
# check both a and b
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
if permutations or trim != 0:
raise ValueError("nan-containing/masked inputs with "
"nan_policy='omit' are currently not "
"supported by permutation tests or "
"trimmed tests.")
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
return mstats_basic.ttest_ind(a, b, axis, equal_var, alternative)
if a.size == 0 or b.size == 0:
return _ttest_nans(a, b, axis, Ttest_indResult)
if permutations is not None and permutations != 0:
if trim != 0:
raise ValueError("Permutations are currently not supported "
"with trimming.")
if permutations < 0 or (np.isfinite(permutations) and
int(permutations) != permutations) :
raise ValueError("Permutations must be a non-negative integer.")
res = _permutation_ttest(a, b, permutations=permutations,
axis=axis, equal_var=equal_var,
nan_policy=nan_policy,
random_state=random_state,
alternative=alternative)
else:
n1 = a.shape[axis]
n2 = b.shape[axis]
if trim == 0:
v1 = np.var(a, axis, ddof=1)
v2 = np.var(b, axis, ddof=1)
m1 = np.mean(a, axis)
m2 = np.mean(b, axis)
else:
v1, m1, n1 = _ttest_trim_var_mean_len(a, trim, axis)
v2, m2, n2 = _ttest_trim_var_mean_len(b, trim, axis)
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(m1, m2, denom, df, alternative)
return Ttest_indResult(*res)
def _ttest_trim_var_mean_len(a, trim, axis):
"""Variance, mean, and length of winsorized input along specified axis"""
# for use with `ttest_ind` when trimming.
# further calculations in this test assume that the inputs are sorted.
# From [4] Section 1 "Let x_1, ..., x_n be n ordered observations..."
a = np.sort(a, axis=axis)
# `g` is the number of elements to be replaced on each tail, converted
# from a percentage amount of trimming
n = a.shape[axis]
g = int(n * trim)
# Calculate the Winsorized variance of the input samples according to
# specified `g`
v = _calculate_winsorized_variance(a, g, axis)
# the total number of elements in the trimmed samples
n -= 2 * g
# calculate the g-times trimmed mean, as defined in [4] (1-1)
m = trim_mean(a, trim, axis=axis)
return v, m, n
def _calculate_winsorized_variance(a, g, axis):
"""Calculates g-times winsorized variance along specified axis"""
# it is expected that the input `a` is sorted along the correct axis
if g == 0:
return np.var(a, ddof=1, axis=axis)
# move the intended axis to the end that way it is easier to manipulate
a_win = np.moveaxis(a, axis, -1)
# save where NaNs are for later use.
nans_indices = np.any(np.isnan(a_win), axis=-1)
# Winsorization and variance calculation are done in one step in [4]
# (1-3), but here winsorization is done first; replace the left and
# right sides with the repeating value. This can be see in effect in (
# 1-3) in [4], where the leftmost and rightmost tails are replaced with
# `(g + 1) * x_{g + 1}` on the left and `(g + 1) * x_{n - g}` on the
# right. Zero-indexing turns `g + 1` to `g`, and `n - g` to `- g - 1` in
# array indexing.
a_win[..., :g] = a_win[..., [g]]
a_win[..., -g:] = a_win[..., [-g - 1]]
# Determine the variance. In [4], the degrees of freedom is expressed as
# `h - 1`, where `h = n - 2g` (unnumbered equations in Section 1, end of
# page 369, beginning of page 370). This is converted to NumPy's format,
# `n - ddof` for use with with `np.var`. The result is converted to an
# array to accommodate indexing later.
var_win = np.asarray(np.var(a_win, ddof=(2 * g + 1), axis=-1))
# with `nan_policy='propagate'`, NaNs may be completely trimmed out
# because they were sorted into the tail of the array. In these cases,
# replace computed variances with `np.nan`.
var_win[nans_indices] = np.nan
return var_win
def _broadcast_concatenate(xs, axis):
"""Concatenate arrays along an axis with broadcasting."""
# move the axis we're concatenating along to the end
xs = [np.swapaxes(x, axis, -1) for x in xs]
# determine final shape of all but the last axis
shape = np.broadcast(*[x[..., 0] for x in xs]).shape
# broadcast along all but the last axis
xs = [np.broadcast_to(x, shape + (x.shape[-1],)) for x in xs]
# concatenate along last axis
res = np.concatenate(xs, axis=-1)
# move the last axis back to where it was
res = np.swapaxes(res, axis, -1)
return res
def _data_partitions(data, permutations, size_a, axis=-1, random_state=None):
"""All partitions of data into sets of given lengths, ignoring order"""
random_state = check_random_state(random_state)
if axis < 0: # we'll be adding a new dimension at the end
axis = data.ndim + axis
# prepare permutation indices
size = data.shape[axis]
# number of distinct combinations
n_max = special.comb(size, size_a)
if permutations < n_max:
indices = np.array([random_state.permutation(size)
for i in range(permutations)]).T
else:
permutations = n_max
indices = np.array([np.concatenate(z)
for z in _all_partitions(size_a, size-size_a)]).T
data = data.swapaxes(axis, -1) # so we can index along a new dimension
data = data[..., indices] # generate permutations
data = data.swapaxes(-2, axis) # restore original axis order
data = np.moveaxis(data, -1, 0) # permutations indexed along axis 0
return data, permutations
def _calc_t_stat(a, b, equal_var, axis=-1):
"""Calculate the t statistic along the given dimension."""
na = a.shape[axis]
nb = b.shape[axis]
avg_a = np.mean(a, axis=axis)
avg_b = np.mean(b, axis=axis)
var_a = np.var(a, axis=axis, ddof=1)
var_b = np.var(b, axis=axis, ddof=1)
if not equal_var:
denom = _unequal_var_ttest_denom(var_a, na, var_b, nb)[1]
else:
denom = _equal_var_ttest_denom(var_a, na, var_b, nb)[1]
return (avg_a-avg_b)/denom
def _permutation_ttest(a, b, permutations, axis=0, equal_var=True,
nan_policy='propagate', random_state=None,
alternative="two-sided"):
"""
Calculates the T-test for the means of TWO INDEPENDENT samples of scores
using permutation methods.
This test is similar to `stats.ttest_ind`, except it doesn't rely on an
approximate normality assumption since it uses a permutation test.
This function is only called from ttest_ind when permutations is not None.
Parameters
----------
a, b : array_like
The arrays must be broadcastable, except along the dimension
corresponding to `axis` (the zeroth, by default).
axis : int, optional
The axis over which to operate on a and b.
permutations: int, optional
Number of permutations used to calculate p-value. If greater than or
equal to the number of distinct permutations, perform an exact test.
equal_var: bool, optional
If False, an equal variance (Welch's) t-test is conducted. Otherwise,
an ordinary t-test is conducted.
random_state : {None, int, `numpy.random.Generator`}, optional
If `seed` is None the `numpy.random.Generator` singleton is used.
If `seed` is an int, a new ``Generator`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` instance then that instance is
used.
Pseudorandom number generator state used for generating random
permutations.
Returns
-------
statistic : float or array
The calculated t-statistic.
pvalue : float or array
The p-value.
"""
random_state = check_random_state(random_state)
t_stat_observed = _calc_t_stat(a, b, equal_var, axis=axis)
na = a.shape[axis]
mat = _broadcast_concatenate((a, b), axis=axis)
mat = np.moveaxis(mat, axis, -1)
mat_perm, permutations = _data_partitions(mat, permutations, size_a=na,
random_state=random_state)
a = mat_perm[..., :na]
b = mat_perm[..., na:]
t_stat = _calc_t_stat(a, b, equal_var)
compare = {"less": np.less_equal,
"greater": np.greater_equal,
"two-sided": lambda x, y: (x <= -np.abs(y)) | (x >= np.abs(y))}
# Calculate the p-values
cmps = compare[alternative](t_stat, t_stat_observed)
pvalues = cmps.sum(axis=0) / permutations
# nans propagate naturally in statistic calculation, but need to be
# propagated manually into pvalues
if nan_policy == 'propagate' and np.isnan(t_stat_observed).any():
if np.ndim(pvalues) == 0:
pvalues = np.float64(np.nan)
else:
pvalues[np.isnan(t_stat_observed)] = np.nan
return (t_stat_observed, pvalues)
def _get_len(a, axis, msg):
try:
n = a.shape[axis]
except IndexError:
raise np.AxisError(axis, a.ndim, msg) from None
return n
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
def ttest_rel(a, b, axis=0, nan_policy='propagate', alternative="two-sided"):
"""Calculate the t-test on TWO RELATED samples of scores, a and b.
This is a test for the null hypothesis that two related or
repeated samples have identical average (expected) values.
Parameters
----------
a, b : array_like
The arrays must have the same shape.
axis : int or None, optional
Axis along which to compute test. If None, compute over the whole
arrays, `a`, and `b`.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided': the means of the distributions underlying the samples
are unequal.
* 'less': the mean of the distribution underlying the first sample
is less than the mean of the distribution underlying the second
sample.
* 'greater': the mean of the distribution underlying the first
sample is greater than the mean of the distribution underlying
the second sample.
.. versionadded:: 1.6.0
Returns
-------
statistic : float or array
t-statistic.
pvalue : float or array
The p-value.
Notes
-----
Examples for use are scores of the same set of student in
different exams, or repeated sampling from the same units. The
test measures whether the average score differs significantly
across samples (e.g. exams). If we observe a large p-value, for
example greater than 0.05 or 0.1 then we cannot reject the null
hypothesis of identical average scores. If the p-value is smaller
than the threshold, e.g. 1%, 5% or 10%, then we reject the null
hypothesis of equal averages. Small p-values are associated with
large t-statistics.
References
----------
https://en.wikipedia.org/wiki/T-test#Dependent_t-test_for_paired_samples
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> rvs1 = stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
>>> rvs2 = (stats.norm.rvs(loc=5, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs2)
Ttest_relResult(statistic=-0.4549717054410304, pvalue=0.6493274702088672)
>>> rvs3 = (stats.norm.rvs(loc=8, scale=10, size=500, random_state=rng)
... + stats.norm.rvs(scale=0.2, size=500, random_state=rng))
>>> stats.ttest_rel(rvs1, rvs3)
Ttest_relResult(statistic=-5.879467544540889, pvalue=7.540777129099917e-09)
"""
a, b, axis = _chk2_asarray(a, b, axis)
cna, npa = _contains_nan(a, nan_policy)
cnb, npb = _contains_nan(b, nan_policy)
contains_nan = cna or cnb
if npa == 'omit' or npb == 'omit':
nan_policy = 'omit'
if contains_nan and nan_policy == 'omit':
a = ma.masked_invalid(a)
b = ma.masked_invalid(b)
m = ma.mask_or(ma.getmask(a), ma.getmask(b))
aa = ma.array(a, mask=m, copy=True)
bb = ma.array(b, mask=m, copy=True)
return mstats_basic.ttest_rel(aa, bb, axis, alternative)
na = _get_len(a, axis, "first argument")
nb = _get_len(b, axis, "second argument")
if na != nb:
raise ValueError('unequal length arrays')
if na == 0:
return _ttest_nans(a, b, axis, Ttest_relResult)
n = a.shape[axis]
df = n - 1
d = (a - b).astype(np.float64)
v = np.var(d, axis, ddof=1)
dm = np.mean(d, axis)
denom = np.sqrt(v / n)
with np.errstate(divide='ignore', invalid='ignore'):
t = np.divide(dm, denom)
t, prob = _ttest_finish(df, t, alternative)
return Ttest_relResult(t, prob)
# Map from names to lambda_ values used in power_divergence().
_power_div_lambda_names = {
"pearson": 1,
"log-likelihood": 0,
"freeman-tukey": -0.5,
"mod-log-likelihood": -1,
"neyman": -2,
"cressie-read": 2/3,
}
def _count(a, axis=None):
"""Count the number of non-masked elements of an array.
This function behaves like `np.ma.count`, but is much faster
for ndarrays.
"""
if hasattr(a, 'count'):
num = a.count(axis=axis)
if isinstance(num, np.ndarray) and num.ndim == 0:
# In some cases, the `count` method returns a scalar array (e.g.
# np.array(3)), but we want a plain integer.
num = int(num)
else:
if axis is None:
num = a.size
else:
num = a.shape[axis]
return num
def _m_broadcast_to(a, shape):
if np.ma.isMaskedArray(a):
return np.ma.masked_array(np.broadcast_to(a, shape),
mask=np.broadcast_to(a.mask, shape))
return np.broadcast_to(a, shape, subok=True)
Power_divergenceResult = namedtuple('Power_divergenceResult',
('statistic', 'pvalue'))
def power_divergence(f_obs, f_exp=None, ddof=0, axis=0, lambda_=None):
"""Cressie-Read power divergence statistic and goodness of fit test.
This function tests the null hypothesis that the categorical data
has the given frequencies, using the Cressie-Read power divergence
statistic.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
lambda_ : float or str, optional
The power in the Cressie-Read power divergence statistic. The default
is 1. For convenience, `lambda_` may be assigned one of the following
strings, in which case the corresponding numerical value is used::
String Value Description
"pearson" 1 Pearson's chi-squared statistic.
In this case, the function is
equivalent to `stats.chisquare`.
"log-likelihood" 0 Log-likelihood ratio. Also known as
the G-test [3]_.
"freeman-tukey" -1/2 Freeman-Tukey statistic.
"mod-log-likelihood" -1 Modified log-likelihood ratio.
"neyman" -2 Neyman's statistic.
"cressie-read" 2/3 The power recommended in [5]_.
Returns
-------
statistic : float or ndarray
The Cressie-Read power divergence test statistic. The value is
a float if `axis` is None or if` `f_obs` and `f_exp` are 1-D.
pvalue : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `stat` are scalars.
See Also
--------
chisquare
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `power_divergence` raises an error if the sums
do not agree within a relative tolerance of ``1e-8``.
When `lambda_` is less than zero, the formula for the statistic involves
dividing by `f_obs`, so a warning or error may be generated if any value
in `f_obs` is 0.
Similarly, a warning or error may be generated if any value in `f_exp` is
zero when `lambda_` >= 0.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not a chisquare, in which case this
test is not appropriate.
This function handles masked arrays. If an element of `f_obs` or `f_exp`
is masked, then data at that position is ignored, and does not count
towards the size of the data set.
.. versionadded:: 0.13.0
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171015035606/http://faculty.vassar.edu/lowry/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] "G-test", https://en.wikipedia.org/wiki/G-test
.. [4] Sokal, R. R. and Rohlf, F. J. "Biometry: the principles and
practice of statistics in biological research", New York: Freeman
(1981)
.. [5] Cressie, N. and Read, T. R. C., "Multinomial Goodness-of-Fit
Tests", J. Royal Stat. Soc. Series B, Vol. 46, No. 3 (1984),
pp. 440-464.
Examples
--------
(See `chisquare` for more examples.)
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies. Here we
perform a G-test (i.e. use the log-likelihood ratio statistic):
>>> from scipy.stats import power_divergence
>>> power_divergence([16, 18, 16, 14, 12, 12], lambda_='log-likelihood')
(2.006573162632538, 0.84823476779463769)
The expected frequencies can be given with the `f_exp` argument:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[16, 16, 16, 16, 16, 8],
... lambda_='log-likelihood')
(3.3281031458963746, 0.6495419288047497)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> power_divergence(obs, lambda_="log-likelihood")
(array([ 2.00657316, 6.77634498]), array([ 0.84823477, 0.23781225]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> power_divergence(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> power_divergence(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
test statistic with `ddof`.
>>> power_divergence([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we must use ``axis=1``:
>>> power_divergence([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8],
... [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
# Convert the input argument `lambda_` to a numerical value.
if isinstance(lambda_, str):
if lambda_ not in _power_div_lambda_names:
names = repr(list(_power_div_lambda_names.keys()))[1:-1]
raise ValueError("invalid string for lambda_: {0!r}. "
"Valid strings are {1}".format(lambda_, names))
lambda_ = _power_div_lambda_names[lambda_]
elif lambda_ is None:
lambda_ = 1
f_obs = np.asanyarray(f_obs)
f_obs_float = f_obs.astype(np.float64)
if f_exp is not None:
f_exp = np.asanyarray(f_exp)
bshape = _broadcast_shapes(f_obs_float.shape, f_exp.shape)
f_obs_float = _m_broadcast_to(f_obs_float, bshape)
f_exp = _m_broadcast_to(f_exp, bshape)
rtol = 1e-8 # to pass existing tests
with np.errstate(invalid='ignore'):
f_obs_sum = f_obs_float.sum(axis=axis)
f_exp_sum = f_exp.sum(axis=axis)
relative_diff = (np.abs(f_obs_sum - f_exp_sum) /
np.minimum(f_obs_sum, f_exp_sum))
diff_gt_tol = (relative_diff > rtol).any()
if diff_gt_tol:
msg = (f"For each axis slice, the sum of the observed "
f"frequencies must agree with the sum of the "
f"expected frequencies to a relative tolerance "
f"of {rtol}, but the percent differences are:\n"
f"{relative_diff}")
raise ValueError(msg)
else:
# Ignore 'invalid' errors so the edge case of a data set with length 0
# is handled without spurious warnings.
with np.errstate(invalid='ignore'):
f_exp = f_obs.mean(axis=axis, keepdims=True)
# `terms` is the array of terms that are summed along `axis` to create
# the test statistic. We use some specialized code for a few special
# cases of lambda_.
if lambda_ == 1:
# Pearson's chi-squared statistic
terms = (f_obs_float - f_exp)**2 / f_exp
elif lambda_ == 0:
# Log-likelihood ratio (i.e. G-test)
terms = 2.0 * special.xlogy(f_obs, f_obs / f_exp)
elif lambda_ == -1:
# Modified log-likelihood ratio
terms = 2.0 * special.xlogy(f_exp, f_exp / f_obs)
else:
# General Cressie-Read power divergence.
terms = f_obs * ((f_obs / f_exp)**lambda_ - 1)
terms /= 0.5 * lambda_ * (lambda_ + 1)
stat = terms.sum(axis=axis)
num_obs = _count(terms, axis=axis)
ddof = asarray(ddof)
p = distributions.chi2.sf(stat, num_obs - 1 - ddof)
return Power_divergenceResult(stat, p)
def chisquare(f_obs, f_exp=None, ddof=0, axis=0):
"""Calculate a one-way chi-square test.
The chi-square test tests the null hypothesis that the categorical data
has the given frequencies.
Parameters
----------
f_obs : array_like
Observed frequencies in each category.
f_exp : array_like, optional
Expected frequencies in each category. By default the categories are
assumed to be equally likely.
ddof : int, optional
"Delta degrees of freedom": adjustment to the degrees of freedom
for the p-value. The p-value is computed using a chi-squared
distribution with ``k - 1 - ddof`` degrees of freedom, where `k`
is the number of observed frequencies. The default value of `ddof`
is 0.
axis : int or None, optional
The axis of the broadcast result of `f_obs` and `f_exp` along which to
apply the test. If axis is None, all values in `f_obs` are treated
as a single data set. Default is 0.
Returns
-------
chisq : float or ndarray
The chi-squared test statistic. The value is a float if `axis` is
None or `f_obs` and `f_exp` are 1-D.
p : float or ndarray
The p-value of the test. The value is a float if `ddof` and the
return value `chisq` are scalars.
See Also
--------
scipy.stats.power_divergence
scipy.stats.fisher_exact : Fisher exact test on a 2x2 contingency table.
scipy.stats.barnard_exact : An unconditional exact test. An alternative
to chi-squared test for small sample sizes.
Notes
-----
This test is invalid when the observed or expected frequencies in each
category are too small. A typical rule is that all of the observed
and expected frequencies should be at least 5. According to [3]_, the
total number of samples is recommended to be greater than 13,
otherwise exact tests (such as Barnard's Exact test) should be used
because they do not overreject.
Also, the sum of the observed and expected frequencies must be the same
for the test to be valid; `chisquare` raises an error if the sums do not
agree within a relative tolerance of ``1e-8``.
The default degrees of freedom, k-1, are for the case when no parameters
of the distribution are estimated. If p parameters are estimated by
efficient maximum likelihood then the correct degrees of freedom are
k-1-p. If the parameters are estimated in a different way, then the
dof can be between k-1-p and k-1. However, it is also possible that
the asymptotic distribution is not chi-square, in which case this test
is not appropriate.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 8.
https://web.archive.org/web/20171022032306/http://vassarstats.net:80/textbook/ch8pt1.html
.. [2] "Chi-squared test", https://en.wikipedia.org/wiki/Chi-squared_test
.. [3] Pearson, Karl. "On the criterion that a given system of deviations from the probable
in the case of a correlated system of variables is such that it can be reasonably
supposed to have arisen from random sampling", Philosophical Magazine. Series 5. 50
(1900), pp. 157-175.
Examples
--------
When just `f_obs` is given, it is assumed that the expected frequencies
are uniform and given by the mean of the observed frequencies.
>>> from scipy.stats import chisquare
>>> chisquare([16, 18, 16, 14, 12, 12])
(2.0, 0.84914503608460956)
With `f_exp` the expected frequencies can be given.
>>> chisquare([16, 18, 16, 14, 12, 12], f_exp=[16, 16, 16, 16, 16, 8])
(3.5, 0.62338762774958223)
When `f_obs` is 2-D, by default the test is applied to each column.
>>> obs = np.array([[16, 18, 16, 14, 12, 12], [32, 24, 16, 28, 20, 24]]).T
>>> obs.shape
(6, 2)
>>> chisquare(obs)
(array([ 2. , 6.66666667]), array([ 0.84914504, 0.24663415]))
By setting ``axis=None``, the test is applied to all data in the array,
which is equivalent to applying the test to the flattened array.
>>> chisquare(obs, axis=None)
(23.31034482758621, 0.015975692534127565)
>>> chisquare(obs.ravel())
(23.31034482758621, 0.015975692534127565)
`ddof` is the change to make to the default degrees of freedom.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=1)
(2.0, 0.73575888234288467)
The calculation of the p-values is done by broadcasting the
chi-squared statistic with `ddof`.
>>> chisquare([16, 18, 16, 14, 12, 12], ddof=[0,1,2])
(2.0, array([ 0.84914504, 0.73575888, 0.5724067 ]))
`f_obs` and `f_exp` are also broadcast. In the following, `f_obs` has
shape (6,) and `f_exp` has shape (2, 6), so the result of broadcasting
`f_obs` and `f_exp` has shape (2, 6). To compute the desired chi-squared
statistics, we use ``axis=1``:
>>> chisquare([16, 18, 16, 14, 12, 12],
... f_exp=[[16, 16, 16, 16, 16, 8], [8, 20, 20, 16, 12, 12]],
... axis=1)
(array([ 3.5 , 9.25]), array([ 0.62338763, 0.09949846]))
"""
return power_divergence(f_obs, f_exp=f_exp, ddof=ddof, axis=axis,
lambda_="pearson")
KstestResult = namedtuple('KstestResult', ('statistic', 'pvalue'))
def _compute_dplus(cdfvals):
"""Computes D+ as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values below Uniform(0, 1)
"""
n = len(cdfvals)
return (np.arange(1.0, n + 1) / n - cdfvals).max()
def _compute_dminus(cdfvals):
"""Computes D- as used in the Kolmogorov-Smirnov test.
Parameters
----------
cdfvals: array_like
Sorted array of CDF values between 0 and 1
Returns
-------
Maximum distance of the CDF values above Uniform(0, 1)
"""
n = len(cdfvals)
return (cdfvals - np.arange(0.0, n)/n).max()
def ks_1samp(x, cdf, args=(), alternative='two-sided', mode='auto'):
"""
Performs the one-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying distribution F(x) of a sample
against a given continuous distribution G(x). See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
x : array_like
a 1-D array of observations of iid random variables.
cdf : callable
callable used to calculate the cdf.
args : tuple, sequence, optional
Distribution parameters, used with `cdf`.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice
the one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D- (depending on the value
of 'alternative')
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp, kstest
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.ks_1samp(x, stats.norm.cdf)
(0.44435602715924361, 0.038850142705171065)
>>> stats.ks_1samp(stats.norm.rvs(size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that `` CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='less')
KstestResult(statistic=0.100203351482..., pvalue=0.125544644447...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.ks_1samp(x, stats.norm.cdf, alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.ks_1samp(x, stats.norm.cdf)
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
Don't reject null hypothesis in favor of alternative hypothesis: two-sided
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.ks_1samp(stats.t.rvs(100,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.ks_1samp(stats.t.rvs(3,size=100, random_state=rng),
... stats.norm.cdf)
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
if np.ma.is_masked(x):
x = x.compressed()
N = len(x)
x = np.sort(x)
cdfvals = cdf(x, *args)
if alternative == 'greater':
Dplus = _compute_dplus(cdfvals)
return KstestResult(Dplus, distributions.ksone.sf(Dplus, N))
if alternative == 'less':
Dminus = _compute_dminus(cdfvals)
return KstestResult(Dminus, distributions.ksone.sf(Dminus, N))
# alternative == 'two-sided':
Dplus = _compute_dplus(cdfvals)
Dminus = _compute_dminus(cdfvals)
D = np.max([Dplus, Dminus])
if mode == 'auto': # Always select exact
mode = 'exact'
if mode == 'exact':
prob = distributions.kstwo.sf(D, N)
elif mode == 'asymp':
prob = distributions.kstwobign.sf(D * np.sqrt(N))
else:
# mode == 'approx'
prob = 2 * distributions.ksone.sf(D, N)
prob = np.clip(prob, 0, 1)
return KstestResult(D, prob)
Ks_2sampResult = KstestResult
def _compute_prob_inside_method(m, n, g, h):
"""
Count the proportion of paths that stay strictly inside two diagonal lines.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The proportion of paths that stay inside the two lines.
Count the integer lattice paths from (0, 0) to (m, n) which satisfy
|x/m - y/n| < h / lcm(m, n).
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Probability is symmetrical in m, n. Computation below uses m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Count the integer lattice paths from (0, 0) to (m, n) which satisfy
# |nx/g - my/g| < h.
# Compute matrix A such that:
# A(x, 0) = A(0, y) = 1
# A(x, y) = A(x, y-1) + A(x-1, y), for x,y>=1, except that
# A(x, y) = 0 if |x/m - y/n|>= h
# Probability is A(m, n)/binom(m+n, n)
# Optimizations exist for m==n, m==n*p.
# Only need to preserve a single column of A, and only a
# sliding window of it.
# minj keeps track of the slide.
minj, maxj = 0, min(int(np.ceil(h / mg)), n + 1)
curlen = maxj - minj
# Make a vector long enough to hold maximum window needed.
lenA = min(2 * maxj + 2, n + 1)
# This is an integer calculation, but the entries are essentially
# binomial coefficients, hence grow quickly.
# Scaling after each column is computed avoids dividing by a
# large binomial coefficient at the end, but is not sufficient to avoid
# the large dyanamic range which appears during the calculation.
# Instead we rescale based on the magnitude of the right most term in
# the column and keep track of an exponent separately and apply
# it at the end of the calculation. Similarly when multiplying by
# the binomial coefficint
dtype = np.float64
A = np.zeros(lenA, dtype=dtype)
# Initialize the first column
A[minj:maxj] = 1
expnt = 0
for i in range(1, m + 1):
# Generate the next column.
# First calculate the sliding window
lastminj, lastlen = minj, curlen
minj = max(int(np.floor((ng * i - h) / mg)) + 1, 0)
minj = min(minj, n)
maxj = min(int(np.ceil((ng * i + h) / mg)), n + 1)
if maxj <= minj:
return 0
# Now fill in the values
A[0:maxj - minj] = np.cumsum(A[minj - lastminj:maxj - lastminj])
curlen = maxj - minj
if lastlen > curlen:
# Set some carried-over elements to 0
A[maxj - minj:maxj - minj + (lastlen - curlen)] = 0
# Rescale if the right most value is over 2**900
val = A[maxj - minj - 1]
_, valexpt = math.frexp(val)
if valexpt > 900:
# Scaling to bring down to about 2**800 appears
# sufficient for sizes under 10000.
valexpt -= 800
A = np.ldexp(A, -valexpt)
expnt += valexpt
val = A[maxj - minj - 1]
# Now divide by the binomial (m+n)!/m!/n!
for i in range(1, n + 1):
val = (val * i) / (m + i)
_, valexpt = math.frexp(val)
if valexpt < -128:
val = np.ldexp(val, -valexpt)
expnt += valexpt
# Finally scale if needed.
return np.ldexp(val, expnt)
def _compute_prob_outside_square(n, h):
"""
Compute the proportion of paths that pass outside the two diagonal lines.
Parameters
----------
n : integer
n > 0
h : integer
0 <= h <= n
Returns
-------
p : float
The proportion of paths that pass outside the lines x-y = +/-h.
"""
# Compute Pr(D_{n,n} >= h/n)
# Prob = 2 * ( binom(2n, n-h) - binom(2n, n-2a) + binom(2n, n-3a) - ... )
# / binom(2n, n)
# This formulation exhibits subtractive cancellation.
# Instead divide each term by binom(2n, n), then factor common terms
# and use a Horner-like algorithm
# P = 2 * A0 * (1 - A1*(1 - A2*(1 - A3*(1 - A4*(...)))))
P = 0.0
k = int(np.floor(n / h))
while k >= 0:
p1 = 1.0
# Each of the Ai terms has numerator and denominator with
# h simple terms.
for j in range(h):
p1 = (n - k * h - j) * p1 / (n + k * h + j + 1)
P = p1 * (1.0 - P)
k -= 1
return 2 * P
def _count_paths_outside_method(m, n, g, h):
"""Count the number of paths that pass outside the specified diagonal.
Parameters
----------
m : integer
m > 0
n : integer
n > 0
g : integer
g is greatest common divisor of m and n
h : integer
0 <= h <= lcm(m,n)
Returns
-------
p : float
The number of paths that go low.
The calculation may overflow - check for a finite answer.
Raises
------
FloatingPointError: Raised if the intermediate computation goes outside
the range of a float.
Notes
-----
Count the integer lattice paths from (0, 0) to (m, n), which at some
point (x, y) along the path, satisfy:
m*y <= n*x - h*g
The paths make steps of size +1 in either positive x or positive y
directions.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk.
Hodges, J.L. Jr.,
"The Significance Probability of the Smirnov Two-Sample Test,"
Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
"""
# Compute #paths which stay lower than x/m-y/n = h/lcm(m,n)
# B(x, y) = #{paths from (0,0) to (x,y) without
# previously crossing the boundary}
# = binom(x, y) - #{paths which already reached the boundary}
# Multiply by the number of path extensions going from (x, y) to (m, n)
# Sum.
# Probability is symmetrical in m, n. Computation below assumes m >= n.
if m < n:
m, n = n, m
mg = m // g
ng = n // g
# Not every x needs to be considered.
# xj holds the list of x values to be checked.
# Wherever n*x/m + ng*h crosses an integer
lxj = n + (mg-h)//mg
xj = [(h + mg * j + ng-1)//ng for j in range(lxj)]
# B is an array just holding a few values of B(x,y), the ones needed.
# B[j] == B(x_j, j)
if lxj == 0:
return np.round(special.binom(m + n, n))
B = np.zeros(lxj)
B[0] = 1
# Compute the B(x, y) terms
# The binomial coefficient is an integer, but special.binom()
# may return a float. Round it to the nearest integer.
for j in range(1, lxj):
Bj = np.round(special.binom(xj[j] + j, j))
if not np.isfinite(Bj):
raise FloatingPointError()
for i in range(j):
bin = np.round(special.binom(xj[j] - xj[i] + j - i, j-i))
Bj -= bin * B[i]
B[j] = Bj
if not np.isfinite(Bj):
raise FloatingPointError()
# Compute the number of path extensions...
num_paths = 0
for j in range(lxj):
bin = np.round(special.binom((m-xj[j]) + (n - j), n-j))
term = B[j] * bin
if not np.isfinite(term):
raise FloatingPointError()
num_paths += term
return np.round(num_paths)
def _attempt_exact_2kssamp(n1, n2, g, d, alternative):
"""Attempts to compute the exact 2sample probability.
n1, n2 are the sample sizes
g is the gcd(n1, n2)
d is the computed max difference in ECDFs
Returns (success, d, probability)
"""
lcm = (n1 // g) * n2
h = int(np.round(d * lcm))
d = h * 1.0 / lcm
if h == 0:
return True, d, 1.0
saw_fp_error, prob = False, np.nan
try:
if alternative == 'two-sided':
if n1 == n2:
prob = _compute_prob_outside_square(n1, h)
else:
prob = 1 - _compute_prob_inside_method(n1, n2, g, h)
else:
if n1 == n2:
# prob = binom(2n, n-h) / binom(2n, n)
# Evaluating in that form incurs roundoff errors
# from special.binom. Instead calculate directly
jrange = np.arange(h)
prob = np.prod((n1 - jrange) / (n1 + jrange + 1.0))
else:
num_paths = _count_paths_outside_method(n1, n2, g, h)
bin = special.binom(n1 + n2, n1)
if not np.isfinite(bin) or not np.isfinite(num_paths)\
or num_paths > bin:
saw_fp_error = True
else:
prob = num_paths / bin
except FloatingPointError:
saw_fp_error = True
if saw_fp_error:
return False, d, np.nan
if not (0 <= prob <= 1):
return False, d, prob
return True, d, prob
def ks_2samp(data1, data2, alternative='two-sided', mode='auto'):
"""
Performs the two-sample Kolmogorov-Smirnov test for goodness of fit.
This test compares the underlying continuous distributions F(x) and G(x)
of two independent samples. See Notes for a description
of the available null and alternative hypotheses.
Parameters
----------
data1, data2 : array_like, 1-Dimensional
Two arrays of sample observations assumed to be drawn from a continuous
distribution, sample sizes can be different.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'asymp'}, optional
Defines the method used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : use 'exact' for small size arrays, 'asymp' for large
* 'exact' : use exact distribution of test statistic
* 'asymp' : use asymptotic distribution of test statistic
Returns
-------
statistic : float
KS statistic.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
kstest, ks_1samp, epps_singleton_2samp, anderson_ksamp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
If the KS statistic is small or the p-value is high, then we cannot
reject the null hypothesis in favor of the alternative.
If the mode is 'auto', the computation is exact if the sample sizes are
less than 10000. For larger sizes, the computation uses the
Kolmogorov-Smirnov distributions to compute an approximate value.
The 'two-sided' 'exact' computation computes the complementary probability
and then subtracts from 1. As such, the minimum probability it can return
is about 1e-16. While the algorithm itself is exact, numerical
errors may accumulate for large sample sizes. It is most suited to
situations in which one of the sample sizes is only a few thousand.
We generally follow Hodges' treatment of Drion/Gnedenko/Korolyuk [1]_.
References
----------
.. [1] Hodges, J.L. Jr., "The Significance Probability of the Smirnov
Two-Sample Test," Arkiv fiur Matematik, 3, No. 43 (1958), 469-86.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> n1 = 200 # size of first sample
>>> n2 = 300 # size of second sample
For a different distribution, we can reject the null hypothesis since the
pvalue is below 1%:
>>> rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1, random_state=rng)
>>> rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs2)
KstestResult(statistic=0.24833333333333332, pvalue=5.846586728086578e-07)
For a slightly different distribution, we cannot reject the null hypothesis
at a 10% or lower alpha since the p-value at 0.144 is higher than 10%
>>> rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs3)
KstestResult(statistic=0.07833333333333334, pvalue=0.4379658456442945)
For an identical distribution, we cannot reject the null hypothesis since
the p-value is high, 41%:
>>> rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0, random_state=rng)
>>> stats.ks_2samp(rvs1, rvs4)
KstestResult(statistic=0.12166666666666667, pvalue=0.05401863039081145)
"""
if mode not in ['auto', 'exact', 'asymp']:
raise ValueError(f'Invalid value for mode: {mode}')
alternative = {'t': 'two-sided', 'g': 'greater', 'l': 'less'}.get(
alternative.lower()[0], alternative)
if alternative not in ['two-sided', 'less', 'greater']:
raise ValueError(f'Invalid value for alternative: {alternative}')
MAX_AUTO_N = 10000 # 'auto' will attempt to be exact if n1,n2 <= MAX_AUTO_N
if np.ma.is_masked(data1):
data1 = data1.compressed()
if np.ma.is_masked(data2):
data2 = data2.compressed()
data1 = np.sort(data1)
data2 = np.sort(data2)
n1 = data1.shape[0]
n2 = data2.shape[0]
if min(n1, n2) == 0:
raise ValueError('Data passed to ks_2samp must not be empty')
data_all = np.concatenate([data1, data2])
# using searchsorted solves equal data problem
cdf1 = np.searchsorted(data1, data_all, side='right') / n1
cdf2 = np.searchsorted(data2, data_all, side='right') / n2
cddiffs = cdf1 - cdf2
# Ensure sign of minS is not negative.
minS = np.clip(-np.min(cddiffs), 0, 1)
maxS = np.max(cddiffs)
alt2Dvalue = {'less': minS, 'greater': maxS, 'two-sided': max(minS, maxS)}
d = alt2Dvalue[alternative]
g = gcd(n1, n2)
n1g = n1 // g
n2g = n2 // g
prob = -np.inf
original_mode = mode
if mode == 'auto':
mode = 'exact' if max(n1, n2) <= MAX_AUTO_N else 'asymp'
elif mode == 'exact':
# If lcm(n1, n2) is too big, switch from exact to asymp
if n1g >= np.iinfo(np.int32).max / n2g:
mode = 'asymp'
warnings.warn(
f"Exact ks_2samp calculation not possible with samples sizes "
f"{n1} and {n2}. Switching to 'asymp'.", RuntimeWarning)
if mode == 'exact':
success, d, prob = _attempt_exact_2kssamp(n1, n2, g, d, alternative)
if not success:
mode = 'asymp'
if original_mode == 'exact':
warnings.warn(f"ks_2samp: Exact calculation unsuccessful. "
f"Switching to mode={mode}.", RuntimeWarning)
if mode == 'asymp':
# The product n1*n2 is large. Use Smirnov's asymptoptic formula.
# Ensure float to avoid overflow in multiplication
# sorted because the one-sided formula is not symmetric in n1, n2
m, n = sorted([float(n1), float(n2)], reverse=True)
en = m * n / (m + n)
if alternative == 'two-sided':
prob = distributions.kstwo.sf(d, np.round(en))
else:
z = np.sqrt(en) * d
# Use Hodges' suggested approximation Eqn 5.3
# Requires m to be the larger of (n1, n2)
expt = -2 * z**2 - 2 * z * (m + 2*n)/np.sqrt(m*n*(m+n))/3.0
prob = np.exp(expt)
prob = np.clip(prob, 0, 1)
return KstestResult(d, prob)
def _parse_kstest_args(data1, data2, args, N):
# kstest allows many different variations of arguments.
# Pull out the parsing into a separate function
# (xvals, yvals, ) # 2sample
# (xvals, cdf function,..)
# (xvals, name of distribution, ...)
# (name of distribution, name of distribution, ...)
# Returns xvals, yvals, cdf
# where cdf is a cdf function, or None
# and yvals is either an array_like of values, or None
# and xvals is array_like.
rvsfunc, cdf = None, None
if isinstance(data1, str):
rvsfunc = getattr(distributions, data1).rvs
elif callable(data1):
rvsfunc = data1
if isinstance(data2, str):
cdf = getattr(distributions, data2).cdf
data2 = None
elif callable(data2):
cdf = data2
data2 = None
data1 = np.sort(rvsfunc(*args, size=N) if rvsfunc else data1)
return data1, data2, cdf
def kstest(rvs, cdf, args=(), N=20, alternative='two-sided', mode='auto'):
"""
Performs the (one-sample or two-sample) Kolmogorov-Smirnov test for
goodness of fit.
The one-sample test compares the underlying distribution F(x) of a sample
against a given distribution G(x). The two-sample test compares the
underlying distributions of two independent samples. Both tests are valid
only for continuous distributions.
Parameters
----------
rvs : str, array_like, or callable
If an array, it should be a 1-D array of observations of random
variables.
If a callable, it should be a function to generate random variables;
it is required to have a keyword argument `size`.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used to generate random variables.
cdf : str, array_like or callable
If array_like, it should be a 1-D array of observations of random
variables, and the two-sample test is performed
(and rvs must be array_like).
If a callable, that callable is used to calculate the cdf.
If a string, it should be the name of a distribution in `scipy.stats`,
which will be used as the cdf function.
args : tuple, sequence, optional
Distribution parameters, used if `rvs` or `cdf` are strings or
callables.
N : int, optional
Sample size if `rvs` is string or callable. Default is 20.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the null and alternative hypotheses. Default is 'two-sided'.
Please see explanations in the Notes below.
mode : {'auto', 'exact', 'approx', 'asymp'}, optional
Defines the distribution used for calculating the p-value.
The following options are available (default is 'auto'):
* 'auto' : selects one of the other options.
* 'exact' : uses the exact distribution of test statistic.
* 'approx' : approximates the two-sided probability with twice the
one-sided probability
* 'asymp': uses asymptotic distribution of test statistic
Returns
-------
statistic : float
KS test statistic, either D, D+ or D-.
pvalue : float
One-tailed or two-tailed p-value.
See Also
--------
ks_2samp
Notes
-----
There are three options for the null and corresponding alternative
hypothesis that can be selected using the `alternative` parameter.
- `two-sided`: The null hypothesis is that the two distributions are
identical, F(x)=G(x) for all x; the alternative is that they are not
identical.
- `less`: The null hypothesis is that F(x) >= G(x) for all x; the
alternative is that F(x) < G(x) for at least one x.
- `greater`: The null hypothesis is that F(x) <= G(x) for all x; the
alternative is that F(x) > G(x) for at least one x.
Note that the alternative hypotheses describe the *CDFs* of the
underlying distributions, not the observed values. For example,
suppose x1 ~ F and x2 ~ G. If F(x) > G(x) for all x, the values in
x1 tend to be less than those in x2.
Examples
--------
>>> from scipy import stats
>>> rng = np.random.default_rng()
>>> x = np.linspace(-15, 15, 9)
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.444356027159..., pvalue=0.038850140086...)
>>> stats.kstest(stats.norm.rvs(size=100, random_state=rng), stats.norm.cdf)
KstestResult(statistic=0.165471391799..., pvalue=0.007331283245...)
The above lines are equivalent to:
>>> stats.kstest(stats.norm.rvs, 'norm', N=100)
KstestResult(statistic=0.113810164200..., pvalue=0.138690052319...) # may vary
*Test against one-sided alternative hypothesis*
Shift distribution to larger values, so that ``CDF(x) < norm.cdf(x)``:
>>> x = stats.norm.rvs(loc=0.2, size=100, random_state=rng)
>>> stats.kstest(x, 'norm', alternative='less')
KstestResult(statistic=0.1002033514..., pvalue=0.1255446444...)
Reject null hypothesis in favor of alternative hypothesis: less
>>> stats.kstest(x, 'norm', alternative='greater')
KstestResult(statistic=0.018749806388..., pvalue=0.920581859791...)
Don't reject null hypothesis in favor of alternative hypothesis: greater
>>> stats.kstest(x, 'norm')
KstestResult(statistic=0.100203351482..., pvalue=0.250616879765...)
*Testing t distributed random variables against normal distribution*
With 100 degrees of freedom the t distribution looks close to the normal
distribution, and the K-S test does not reject the hypothesis that the
sample came from the normal distribution:
>>> stats.kstest(stats.t.rvs(100, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.064273776544..., pvalue=0.778737758305...)
With 3 degrees of freedom the t distribution looks sufficiently different
from the normal distribution, that we can reject the hypothesis that the
sample came from the normal distribution at the 10% level:
>>> stats.kstest(stats.t.rvs(3, size=100, random_state=rng), 'norm')
KstestResult(statistic=0.128678487493..., pvalue=0.066569081515...)
"""
# to not break compatibility with existing code
if alternative == 'two_sided':
alternative = 'two-sided'
if alternative not in ['two-sided', 'greater', 'less']:
raise ValueError("Unexpected alternative %s" % alternative)
xvals, yvals, cdf = _parse_kstest_args(rvs, cdf, args, N)
if cdf:
return ks_1samp(xvals, cdf, args=args, alternative=alternative,
mode=mode)
return ks_2samp(xvals, yvals, alternative=alternative, mode=mode)
def tiecorrect(rankvals):
"""Tie correction factor for Mann-Whitney U and Kruskal-Wallis H tests.
Parameters
----------
rankvals : array_like
A 1-D sequence of ranks. Typically this will be the array
returned by `~scipy.stats.rankdata`.
Returns
-------
factor : float
Correction factor for U or H.
See Also
--------
rankdata : Assign ranks to the data
mannwhitneyu : Mann-Whitney rank test
kruskal : Kruskal-Wallis H test
References
----------
.. [1] Siegel, S. (1956) Nonparametric Statistics for the Behavioral
Sciences. New York: McGraw-Hill.
Examples
--------
>>> from scipy.stats import tiecorrect, rankdata
>>> tiecorrect([1, 2.5, 2.5, 4])
0.9
>>> ranks = rankdata([1, 3, 2, 4, 5, 7, 2, 8, 4])
>>> ranks
array([ 1. , 4. , 2.5, 5.5, 7. , 8. , 2.5, 9. , 5.5])
>>> tiecorrect(ranks)
0.9833333333333333
"""
arr = np.sort(rankvals)
idx = np.nonzero(np.r_[True, arr[1:] != arr[:-1], True])[0]
cnt = np.diff(idx).astype(np.float64)
size = np.float64(arr.size)
return 1.0 if size < 2 else 1.0 - (cnt**3 - cnt).sum() / (size**3 - size)
RanksumsResult = namedtuple('RanksumsResult', ('statistic', 'pvalue'))
def ranksums(x, y, alternative='two-sided'):
"""Compute the Wilcoxon rank-sum statistic for two samples.
The Wilcoxon rank-sum test tests the null hypothesis that two sets
of measurements are drawn from the same distribution. The alternative
hypothesis is that values in one sample are more likely to be
larger than the values in the other sample.
This test should be used to compare two samples from continuous
distributions. It does not handle ties between measurements
in x and y. For tie-handling and an optional continuity correction
see `scipy.stats.mannwhitneyu`.
Parameters
----------
x,y : array_like
The data from the two samples.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis. Default is 'two-sided'.
The following options are available:
* 'two-sided': one of the distributions (underlying `x` or `y`) is
stochastically greater than the other.
* 'less': the distribution underlying `x` is stochastically less
than the distribution underlying `y`.
* 'greater': the distribution underlying `x` is stochastically greater
than the distribution underlying `y`.
.. versionadded:: 1.7.0
Returns
-------
statistic : float
The test statistic under the large-sample approximation that the
rank sum statistic is normally distributed.
pvalue : float
The p-value of the test.
References
----------
.. [1] https://en.wikipedia.org/wiki/Wilcoxon_rank-sum_test
Examples
--------
We can test the hypothesis that two independent unequal-sized samples are
drawn from the same distribution with computing the Wilcoxon rank-sum
statistic.
>>> from scipy.stats import ranksums
>>> rng = np.random.default_rng()
>>> sample1 = rng.uniform(-1, 1, 200)
>>> sample2 = rng.uniform(-0.5, 1.5, 300) # a shifted distribution
>>> ranksums(sample1, sample2)
RanksumsResult(statistic=-7.887059, pvalue=3.09390448e-15) # may vary
>>> ranksums(sample1, sample2, alternative='less')
RanksumsResult(statistic=-7.750585297581713, pvalue=4.573497606342543e-15) # may vary
>>> ranksums(sample1, sample2, alternative='greater')
RanksumsResult(statistic=-7.750585297581713, pvalue=0.9999999999999954) # may vary
The p-value of less than ``0.05`` indicates that this test rejects the
hypothesis at the 5% significance level.
"""
x, y = map(np.asarray, (x, y))
n1 = len(x)
n2 = len(y)
alldata = np.concatenate((x, y))
ranked = rankdata(alldata)
x = ranked[:n1]
s = np.sum(x, axis=0)
expected = n1 * (n1+n2+1) / 2.0
z = (s - expected) / np.sqrt(n1*n2*(n1+n2+1)/12.0)
z, prob = _normtest_finish(z, alternative)
return RanksumsResult(z, prob)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
def kruskal(*args, nan_policy='propagate'):
"""Compute the Kruskal-Wallis H-test for independent samples.
The Kruskal-Wallis H-test tests the null hypothesis that the population
median of all of the groups are equal. It is a non-parametric version of
ANOVA. The test works on 2 or more independent samples, which may have
different sizes. Note that rejecting the null hypothesis does not
indicate which of the groups differs. Post hoc comparisons between
groups are required to determine which groups are different.
Parameters
----------
sample1, sample2, ... : array_like
Two or more arrays with the sample measurements can be given as
arguments. Samples must be one-dimensional.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Kruskal-Wallis H statistic, corrected for ties.
pvalue : float
The p-value for the test using the assumption that H has a chi
square distribution. The p-value returned is the survival function of
the chi square distribution evaluated at H.
See Also
--------
f_oneway : 1-way ANOVA.
mannwhitneyu : Mann-Whitney rank test on two samples.
friedmanchisquare : Friedman test for repeated measurements.
Notes
-----
Due to the assumption that H has a chi square distribution, the number
of samples in each group must not be too small. A typical rule is
that each sample must have at least 5 measurements.
References
----------
.. [1] W. H. Kruskal & W. W. Wallis, "Use of Ranks in
One-Criterion Variance Analysis", Journal of the American Statistical
Association, Vol. 47, Issue 260, pp. 583-621, 1952.
.. [2] https://en.wikipedia.org/wiki/Kruskal-Wallis_one-way_analysis_of_variance
Examples
--------
>>> from scipy import stats
>>> x = [1, 3, 5, 7, 9]
>>> y = [2, 4, 6, 8, 10]
>>> stats.kruskal(x, y)
KruskalResult(statistic=0.2727272727272734, pvalue=0.6015081344405895)
>>> x = [1, 1, 1]
>>> y = [2, 2, 2]
>>> z = [2, 2]
>>> stats.kruskal(x, y, z)
KruskalResult(statistic=7.0, pvalue=0.0301973834223185)
"""
args = list(map(np.asarray, args))
num_groups = len(args)
if num_groups < 2:
raise ValueError("Need at least two groups in stats.kruskal()")
for arg in args:
if arg.size == 0:
return KruskalResult(np.nan, np.nan)
elif arg.ndim != 1:
raise ValueError("Samples must be one-dimensional.")
n = np.asarray(list(map(len, args)))
if nan_policy not in ('propagate', 'raise', 'omit'):
raise ValueError("nan_policy must be 'propagate', 'raise' or 'omit'")
contains_nan = False
for arg in args:
cn = _contains_nan(arg, nan_policy)
if cn[0]:
contains_nan = True
break
if contains_nan and nan_policy == 'omit':
for a in args:
a = ma.masked_invalid(a)
return mstats_basic.kruskal(*args)
if contains_nan and nan_policy == 'propagate':
return KruskalResult(np.nan, np.nan)
alldata = np.concatenate(args)
ranked = rankdata(alldata)
ties = tiecorrect(ranked)
if ties == 0:
raise ValueError('All numbers are identical in kruskal')
# Compute sum^2/n for each group and sum
j = np.insert(np.cumsum(n), 0, 0)
ssbn = 0
for i in range(num_groups):
ssbn += _square_of_sums(ranked[j[i]:j[i+1]]) / n[i]
totaln = np.sum(n, dtype=float)
h = 12.0 / (totaln * (totaln + 1)) * ssbn - 3 * (totaln + 1)
df = num_groups - 1
h /= ties
return KruskalResult(h, distributions.chi2.sf(h, df))
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
def friedmanchisquare(*args):
"""Compute the Friedman test for repeated measurements.
The Friedman test tests the null hypothesis that repeated measurements of
the same individuals have the same distribution. It is often used
to test for consistency among measurements obtained in different ways.
For example, if two measurement techniques are used on the same set of
individuals, the Friedman test can be used to determine if the two
measurement techniques are consistent.
Parameters
----------
measurements1, measurements2, measurements3... : array_like
Arrays of measurements. All of the arrays must have the same number
of elements. At least 3 sets of measurements must be given.
Returns
-------
statistic : float
The test statistic, correcting for ties.
pvalue : float
The associated p-value assuming that the test statistic has a chi
squared distribution.
Notes
-----
Due to the assumption that the test statistic has a chi squared
distribution, the p-value is only reliable for n > 10 and more than
6 repeated measurements.
References
----------
.. [1] https://en.wikipedia.org/wiki/Friedman_test
"""
k = len(args)
if k < 3:
raise ValueError('At least 3 sets of measurements must be given '
'for Friedman test, got {}.'.format(k))
n = len(args[0])
for i in range(1, k):
if len(args[i]) != n:
raise ValueError('Unequal N in friedmanchisquare. Aborting.')
# Rank data
data = np.vstack(args).T
data = data.astype(float)
for i in range(len(data)):
data[i] = rankdata(data[i])
# Handle ties
ties = 0
for i in range(len(data)):
replist, repnum = find_repeats(array(data[i]))
for t in repnum:
ties += t * (t*t - 1)
c = 1 - ties / (k*(k*k - 1)*n)
ssbn = np.sum(data.sum(axis=0)**2)
chisq = (12.0 / (k*n*(k+1)) * ssbn - 3*n*(k+1)) / c
return FriedmanchisquareResult(chisq, distributions.chi2.sf(chisq, k - 1))
BrunnerMunzelResult = namedtuple('BrunnerMunzelResult',
('statistic', 'pvalue'))
def brunnermunzel(x, y, alternative="two-sided", distribution="t",
nan_policy='propagate'):
"""Compute the Brunner-Munzel test on samples x and y.
The Brunner-Munzel test is a nonparametric test of the null hypothesis that
when values are taken one by one from each group, the probabilities of
getting large values in both groups are equal.
Unlike the Wilcoxon-Mann-Whitney's U test, this does not require the
assumption of equivariance of two groups. Note that this does not assume
the distributions are same. This test works on two independent samples,
which may have different sizes.
Parameters
----------
x, y : array_like
Array of samples, should be one-dimensional.
alternative : {'two-sided', 'less', 'greater'}, optional
Defines the alternative hypothesis.
The following options are available (default is 'two-sided'):
* 'two-sided'
* 'less': one-sided
* 'greater': one-sided
distribution : {'t', 'normal'}, optional
Defines how to get the p-value.
The following options are available (default is 't'):
* 't': get the p-value by t-distribution
* 'normal': get the p-value by standard normal distribution.
nan_policy : {'propagate', 'raise', 'omit'}, optional
Defines how to handle when input contains nan.
The following options are available (default is 'propagate'):
* 'propagate': returns nan
* 'raise': throws an error
* 'omit': performs the calculations ignoring nan values
Returns
-------
statistic : float
The Brunner-Munzer W statistic.
pvalue : float
p-value assuming an t distribution. One-sided or
two-sided, depending on the choice of `alternative` and `distribution`.
See Also
--------
mannwhitneyu : Mann-Whitney rank test on two samples.
Notes
-----
Brunner and Munzel recommended to estimate the p-value by t-distribution
when the size of data is 50 or less. If the size is lower than 10, it would
be better to use permuted Brunner Munzel test (see [2]_).
References
----------
.. [1] Brunner, E. and Munzel, U. "The nonparametric Benhrens-Fisher
problem: Asymptotic theory and a small-sample approximation".
Biometrical Journal. Vol. 42(2000): 17-25.
.. [2] Neubert, K. and Brunner, E. "A studentized permutation test for the
non-parametric Behrens-Fisher problem". Computational Statistics and
Data Analysis. Vol. 51(2007): 5192-5204.
Examples
--------
>>> from scipy import stats
>>> x1 = [1,2,1,1,1,1,1,1,1,1,2,4,1,1]
>>> x2 = [3,3,4,3,1,2,3,1,1,5,4]
>>> w, p_value = stats.brunnermunzel(x1, x2)
>>> w
3.1374674823029505
>>> p_value
0.0057862086661515377
"""
x = np.asarray(x)
y = np.asarray(y)
# check both x and y
cnx, npx = _contains_nan(x, nan_policy)
cny, npy = _contains_nan(y, nan_policy)
contains_nan = cnx or cny
if npx == "omit" or npy == "omit":
nan_policy = "omit"
if contains_nan and nan_policy == "propagate":
return BrunnerMunzelResult(np.nan, np.nan)
elif contains_nan and nan_policy == "omit":
x = ma.masked_invalid(x)
y = ma.masked_invalid(y)
return mstats_basic.brunnermunzel(x, y, alternative, distribution)
nx = len(x)
ny = len(y)
if nx == 0 or ny == 0:
return BrunnerMunzelResult(np.nan, np.nan)
rankc = rankdata(np.concatenate((x, y)))
rankcx = rankc[0:nx]
rankcy = rankc[nx:nx+ny]
rankcx_mean = np.mean(rankcx)
rankcy_mean = np.mean(rankcy)
rankx = rankdata(x)
ranky = rankdata(y)
rankx_mean = np.mean(rankx)
ranky_mean = np.mean(ranky)
Sx = np.sum(np.power(rankcx - rankx - rankcx_mean + rankx_mean, 2.0))
Sx /= nx - 1
Sy = np.sum(np.power(rankcy - ranky - rankcy_mean + ranky_mean, 2.0))
Sy /= ny - 1
wbfn = nx * ny * (rankcy_mean - rankcx_mean)
wbfn /= (nx + ny) * np.sqrt(nx * Sx + ny * Sy)
if distribution == "t":
df_numer = np.power(nx * Sx + ny * Sy, 2.0)
df_denom = np.power(nx * Sx, 2.0) / (nx - 1)
df_denom += np.power(ny * Sy, 2.0) / (ny - 1)
df = df_numer / df_denom
p = distributions.t.cdf(wbfn, df)
elif distribution == "normal":
p = distributions.norm.cdf(wbfn)
else:
raise ValueError(
"distribution should be 't' or 'normal'")
if alternative == "greater":
pass
elif alternative == "less":
p = 1 - p
elif alternative == "two-sided":
p = 2 * np.min([p, 1-p])
else:
raise ValueError(
"alternative should be 'less', 'greater' or 'two-sided'")
return BrunnerMunzelResult(wbfn, p)
def combine_pvalues(pvalues, method='fisher', weights=None):
"""
Combine p-values from independent tests bearing upon the same hypothesis.
Parameters
----------
pvalues : array_like, 1-D
Array of p-values assumed to come from independent tests.
method : {'fisher', 'pearson', 'tippett', 'stouffer',
'mudholkar_george'}, optional
Name of method to use to combine p-values.
The following methods are available (default is 'fisher'):
* 'fisher': Fisher's method (Fisher's combined probability test), the
sum of the logarithm of the p-values
* 'pearson': Pearson's method (similar to Fisher's but uses sum of the
complement of the p-values inside the logarithms)
* 'tippett': Tippett's method (minimum of p-values)
* 'stouffer': Stouffer's Z-score method
* 'mudholkar_george': the difference of Fisher's and Pearson's methods
divided by 2
weights : array_like, 1-D, optional
Optional array of weights used only for Stouffer's Z-score method.
Returns
-------
statistic: float
The statistic calculated by the specified method.
pval: float
The combined p-value.
Notes
-----
Fisher's method (also known as Fisher's combined probability test) [1]_ uses
a chi-squared statistic to compute a combined p-value. The closely related
Stouffer's Z-score method [2]_ uses Z-scores rather than p-values. The
advantage of Stouffer's method is that it is straightforward to introduce
weights, which can make Stouffer's method more powerful than Fisher's
method when the p-values are from studies of different size [6]_ [7]_.
The Pearson's method uses :math:`log(1-p_i)` inside the sum whereas Fisher's
method uses :math:`log(p_i)` [4]_. For Fisher's and Pearson's method, the
sum of the logarithms is multiplied by -2 in the implementation. This
quantity has a chi-square distribution that determines the p-value. The
`mudholkar_george` method is the difference of the Fisher's and Pearson's
test statistics, each of which include the -2 factor [4]_. However, the
`mudholkar_george` method does not include these -2 factors. The test
statistic of `mudholkar_george` is the sum of logisitic random variables and
equation 3.6 in [3]_ is used to approximate the p-value based on Student's
t-distribution.
Fisher's method may be extended to combine p-values from dependent tests
[5]_. Extensions such as Brown's method and Kost's method are not currently
implemented.
.. versionadded:: 0.15.0
References
----------
.. [1] https://en.wikipedia.org/wiki/Fisher%27s_method
.. [2] https://en.wikipedia.org/wiki/Fisher%27s_method#Relation_to_Stouffer.27s_Z-score_method
.. [3] George, E. O., and G. S. Mudholkar. "On the convolution of logistic
random variables." Metrika 30.1 (1983): 1-13.
.. [4] Heard, N. and Rubin-Delanchey, P. "Choosing between methods of
combining p-values." Biometrika 105.1 (2018): 239-246.
.. [5] Whitlock, M. C. "Combining probability from independent tests: the
weighted Z-method is superior to Fisher's approach." Journal of
Evolutionary Biology 18, no. 5 (2005): 1368-1373.
.. [6] Zaykin, Dmitri V. "Optimally weighted Z-test is a powerful method
for combining probabilities in meta-analysis." Journal of
Evolutionary Biology 24, no. 8 (2011): 1836-1841.
.. [7] https://en.wikipedia.org/wiki/Extensions_of_Fisher%27s_method
"""
pvalues = np.asarray(pvalues)
if pvalues.ndim != 1:
raise ValueError("pvalues is not 1-D")
if method == 'fisher':
statistic = -2 * np.sum(np.log(pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'pearson':
statistic = -2 * np.sum(np.log1p(-pvalues))
pval = distributions.chi2.sf(statistic, 2 * len(pvalues))
elif method == 'mudholkar_george':
normalizing_factor = np.sqrt(3/len(pvalues))/np.pi
statistic = -np.sum(np.log(pvalues)) + np.sum(np.log1p(-pvalues))
nu = 5 * len(pvalues) + 4
approx_factor = np.sqrt(nu / (nu - 2))
pval = distributions.t.sf(statistic * normalizing_factor
* approx_factor, nu)
elif method == 'tippett':
statistic = np.min(pvalues)
pval = distributions.beta.sf(statistic, 1, len(pvalues))
elif method == 'stouffer':
if weights is None:
weights = np.ones_like(pvalues)
elif len(weights) != len(pvalues):
raise ValueError("pvalues and weights must be of the same size.")
weights = np.asarray(weights)
if weights.ndim != 1:
raise ValueError("weights is not 1-D")
Zi = distributions.norm.isf(pvalues)
statistic = np.dot(weights, Zi) / np.linalg.norm(weights)
pval = distributions.norm.sf(statistic)
else:
raise ValueError(
"Invalid method '%s'. Options are 'fisher', 'pearson', \
'mudholkar_george', 'tippett', 'or 'stouffer'", method)
return (statistic, pval)
#####################################
# STATISTICAL DISTANCES #
#####################################
def wasserstein_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute the first Wasserstein distance between two 1D distributions.
This distance is also known as the earth mover's distance, since it can be
seen as the minimum amount of "work" required to transform :math:`u` into
:math:`v`, where "work" is measured as the amount of distribution weight
that must be moved, multiplied by the distance it has to be moved.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The first Wasserstein distance between the distributions :math:`u` and
:math:`v` is:
.. math::
l_1 (u, v) = \inf_{\pi \in \Gamma (u, v)} \int_{\mathbb{R} \times
\mathbb{R}} |x-y| \mathrm{d} \pi (x, y)
where :math:`\Gamma (u, v)` is the set of (probability) distributions on
:math:`\mathbb{R} \times \mathbb{R}` whose marginals are :math:`u` and
:math:`v` on the first and second factors respectively.
If :math:`U` and :math:`V` are the respective CDFs of :math:`u` and
:math:`v`, this distance also equals to:
.. math::
l_1(u, v) = \int_{-\infty}^{+\infty} |U-V|
See [2]_ for a proof of the equivalence of both definitions.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Wasserstein metric", https://en.wikipedia.org/wiki/Wasserstein_metric
.. [2] Ramdas, Garcia, Cuturi "On Wasserstein Two Sample Testing and Related
Families of Nonparametric Tests" (2015). :arXiv:`1509.02237`.
Examples
--------
>>> from scipy.stats import wasserstein_distance
>>> wasserstein_distance([0, 1, 3], [5, 6, 8])
5.0
>>> wasserstein_distance([0, 1], [0, 1], [3, 1], [2, 2])
0.25
>>> wasserstein_distance([3.4, 3.9, 7.5, 7.8], [4.5, 1.4],
... [1.4, 0.9, 3.1, 7.2], [3.2, 3.5])
4.0781331438047861
"""
return _cdf_distance(1, u_values, v_values, u_weights, v_weights)
def energy_distance(u_values, v_values, u_weights=None, v_weights=None):
r"""Compute the energy distance between two 1D distributions.
.. versionadded:: 1.0.0
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The energy distance between two distributions :math:`u` and :math:`v`, whose
respective CDFs are :math:`U` and :math:`V`, equals to:
.. math::
D(u, v) = \left( 2\mathbb E|X - Y| - \mathbb E|X - X'| -
\mathbb E|Y - Y'| \right)^{1/2}
where :math:`X` and :math:`X'` (resp. :math:`Y` and :math:`Y'`) are
independent random variables whose probability distribution is :math:`u`
(resp. :math:`v`).
As shown in [2]_, for one-dimensional real-valued variables, the energy
distance is linked to the non-distribution-free version of the Cramér-von
Mises distance:
.. math::
D(u, v) = \sqrt{2} l_2(u, v) = \left( 2 \int_{-\infty}^{+\infty} (U-V)^2
\right)^{1/2}
Note that the common Cramér-von Mises criterion uses the distribution-free
version of the distance. See [2]_ (section 2), for more details about both
versions of the distance.
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] "Energy distance", https://en.wikipedia.org/wiki/Energy_distance
.. [2] Szekely "E-statistics: The energy of statistical samples." Bowling
Green State University, Department of Mathematics and Statistics,
Technical Report 02-16 (2002).
.. [3] Rizzo, Szekely "Energy distance." Wiley Interdisciplinary Reviews:
Computational Statistics, 8(1):27-38 (2015).
.. [4] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
Examples
--------
>>> from scipy.stats import energy_distance
>>> energy_distance([0], [2])
2.0000000000000004
>>> energy_distance([0, 8], [0, 8], [3, 1], [2, 2])
1.0000000000000002
>>> energy_distance([0.7, 7.4, 2.4, 6.8], [1.4, 8. ],
... [2.1, 4.2, 7.4, 8. ], [7.6, 8.8])
0.88003340976158217
"""
return np.sqrt(2) * _cdf_distance(2, u_values, v_values,
u_weights, v_weights)
def _cdf_distance(p, u_values, v_values, u_weights=None, v_weights=None):
r"""
Compute, between two one-dimensional distributions :math:`u` and
:math:`v`, whose respective CDFs are :math:`U` and :math:`V`, the
statistical distance that is defined as:
.. math::
l_p(u, v) = \left( \int_{-\infty}^{+\infty} |U-V|^p \right)^{1/p}
p is a positive parameter; p = 1 gives the Wasserstein distance, p = 2
gives the energy distance.
Parameters
----------
u_values, v_values : array_like
Values observed in the (empirical) distribution.
u_weights, v_weights : array_like, optional
Weight for each value. If unspecified, each value is assigned the same
weight.
`u_weights` (resp. `v_weights`) must have the same length as
`u_values` (resp. `v_values`). If the weight sum differs from 1, it
must still be positive and finite so that the weights can be normalized
to sum to 1.
Returns
-------
distance : float
The computed distance between the distributions.
Notes
-----
The input distributions can be empirical, therefore coming from samples
whose values are effectively inputs of the function, or they can be seen as
generalized functions, in which case they are weighted sums of Dirac delta
functions located at the specified values.
References
----------
.. [1] Bellemare, Danihelka, Dabney, Mohamed, Lakshminarayanan, Hoyer,
Munos "The Cramer Distance as a Solution to Biased Wasserstein
Gradients" (2017). :arXiv:`1705.10743`.
"""
u_values, u_weights = _validate_distribution(u_values, u_weights)
v_values, v_weights = _validate_distribution(v_values, v_weights)
u_sorter = np.argsort(u_values)
v_sorter = np.argsort(v_values)
all_values = np.concatenate((u_values, v_values))
all_values.sort(kind='mergesort')
# Compute the differences between pairs of successive values of u and v.
deltas = np.diff(all_values)
# Get the respective positions of the values of u and v among the values of
# both distributions.
u_cdf_indices = u_values[u_sorter].searchsorted(all_values[:-1], 'right')
v_cdf_indices = v_values[v_sorter].searchsorted(all_values[:-1], 'right')
# Calculate the CDFs of u and v using their weights, if specified.
if u_weights is None:
u_cdf = u_cdf_indices / u_values.size
else:
u_sorted_cumweights = np.concatenate(([0],
np.cumsum(u_weights[u_sorter])))
u_cdf = u_sorted_cumweights[u_cdf_indices] / u_sorted_cumweights[-1]
if v_weights is None:
v_cdf = v_cdf_indices / v_values.size
else:
v_sorted_cumweights = np.concatenate(([0],
np.cumsum(v_weights[v_sorter])))
v_cdf = v_sorted_cumweights[v_cdf_indices] / v_sorted_cumweights[-1]
# Compute the value of the integral based on the CDFs.
# If p = 1 or p = 2, we avoid using np.power, which introduces an overhead
# of about 15%.
if p == 1:
return np.sum(np.multiply(np.abs(u_cdf - v_cdf), deltas))
if p == 2:
return np.sqrt(np.sum(np.multiply(np.square(u_cdf - v_cdf), deltas)))
return np.power(np.sum(np.multiply(np.power(np.abs(u_cdf - v_cdf), p),
deltas)), 1/p)
def _validate_distribution(values, weights):
"""
Validate the values and weights from a distribution input of `cdf_distance`
and return them as ndarray objects.
Parameters
----------
values : array_like
Values observed in the (empirical) distribution.
weights : array_like
Weight for each value.
Returns
-------
values : ndarray
Values as ndarray.
weights : ndarray
Weights as ndarray.
"""
# Validate the value array.
values = np.asarray(values, dtype=float)
if len(values) == 0:
raise ValueError("Distribution can't be empty.")
# Validate the weight array, if specified.
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(weights) != len(values):
raise ValueError('Value and weight array-likes for the same '
'empirical distribution must be of the same size.')
if np.any(weights < 0):
raise ValueError('All weights must be non-negative.')
if not 0 < np.sum(weights) < np.inf:
raise ValueError('Weight array-like sum must be positive and '
'finite. Set as None for an equal distribution of '
'weight.')
return values, weights
return values, None
#####################################
# SUPPORT FUNCTIONS #
#####################################
RepeatedResults = namedtuple('RepeatedResults', ('values', 'counts'))
def find_repeats(arr):
"""Find repeats and repeat counts.
Parameters
----------
arr : array_like
Input array. This is cast to float64.
Returns
-------
values : ndarray
The unique values from the (flattened) input that are repeated.
counts : ndarray
Number of times the corresponding 'value' is repeated.
Notes
-----
In numpy >= 1.9 `numpy.unique` provides similar functionality. The main
difference is that `find_repeats` only returns repeated values.
Examples
--------
>>> from scipy import stats
>>> stats.find_repeats([2, 1, 2, 3, 2, 2, 5])
RepeatedResults(values=array([2.]), counts=array([4]))
>>> stats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]])
RepeatedResults(values=array([4., 5.]), counts=array([2, 2]))
"""
# Note: always copies.
return RepeatedResults(*_find_repeats(np.array(arr, dtype=np.float64)))
def _sum_of_squares(a, axis=0):
"""Square each element of the input array, and return the sum(s) of that.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
sum_of_squares : ndarray
The sum along the given axis for (a**2).
See Also
--------
_square_of_sums : The square(s) of the sum(s) (the opposite of
`_sum_of_squares`).
"""
a, axis = _chk_asarray(a, axis)
return np.sum(a*a, axis)
def _square_of_sums(a, axis=0):
"""Sum elements of the input array, and return the square(s) of that sum.
Parameters
----------
a : array_like
Input array.
axis : int or None, optional
Axis along which to calculate. Default is 0. If None, compute over
the whole array `a`.
Returns
-------
square_of_sums : float or ndarray
The square of the sum over `axis`.
See Also
--------
_sum_of_squares : The sum of squares (the opposite of `square_of_sums`).
"""
a, axis = _chk_asarray(a, axis)
s = np.sum(a, axis)
if not np.isscalar(s):
return s.astype(float) * s
else:
return float(s) * s
def rankdata(a, method='average', *, axis=None):
"""Assign ranks to data, dealing with ties appropriately.
By default (``axis=None``), the data array is first flattened, and a flat
array of ranks is returned. Separately reshape the rank array to the
shape of the data array if desired (see Examples).
Ranks begin at 1. The `method` argument controls how ranks are assigned
to equal values. See [1]_ for further discussion of ranking methods.
Parameters
----------
a : array_like
The array of values to be ranked.
method : {'average', 'min', 'max', 'dense', 'ordinal'}, optional
The method used to assign ranks to tied elements.
The following methods are available (default is 'average'):
* 'average': The average of the ranks that would have been assigned to
all the tied values is assigned to each value.
* 'min': The minimum of the ranks that would have been assigned to all
the tied values is assigned to each value. (This is also
referred to as "competition" ranking.)
* 'max': The maximum of the ranks that would have been assigned to all
the tied values is assigned to each value.
* 'dense': Like 'min', but the rank of the next highest element is
assigned the rank immediately after those assigned to the tied
elements.
* 'ordinal': All values are given a distinct rank, corresponding to
the order that the values occur in `a`.
axis : {None, int}, optional
Axis along which to perform the ranking. If ``None``, the data array
is first flattened.
Returns
-------
ranks : ndarray
An array of size equal to the size of `a`, containing rank
scores.
References
----------
.. [1] "Ranking", https://en.wikipedia.org/wiki/Ranking
Examples
--------
>>> from scipy.stats import rankdata
>>> rankdata([0, 2, 3, 2])
array([ 1. , 2.5, 4. , 2.5])
>>> rankdata([0, 2, 3, 2], method='min')
array([ 1, 2, 4, 2])
>>> rankdata([0, 2, 3, 2], method='max')
array([ 1, 3, 4, 3])
>>> rankdata([0, 2, 3, 2], method='dense')
array([ 1, 2, 3, 2])
>>> rankdata([0, 2, 3, 2], method='ordinal')
array([ 1, 2, 4, 3])
>>> rankdata([[0, 2], [3, 2]]).reshape(2,2)
array([[1. , 2.5],
[4. , 2.5]])
>>> rankdata([[0, 2, 2], [3, 2, 5]], axis=1)
array([[1. , 2.5, 2.5],
[2. , 1. , 3. ]])
"""
if method not in ('average', 'min', 'max', 'dense', 'ordinal'):
raise ValueError('unknown method "{0}"'.format(method))
if axis is not None:
a = np.asarray(a)
if a.size == 0:
# The return values of `normalize_axis_index` are ignored. The
# call validates `axis`, even though we won't use it.
# use scipy._lib._util._normalize_axis_index when available
np.core.multiarray.normalize_axis_index(axis, a.ndim)
dt = np.float64 if method == 'average' else np.int_
return np.empty(a.shape, dtype=dt)
return np.apply_along_axis(rankdata, axis, a, method)
arr = np.ravel(np.asarray(a))
algo = 'mergesort' if method == 'ordinal' else 'quicksort'
sorter = np.argsort(arr, kind=algo)
inv = np.empty(sorter.size, dtype=np.intp)
inv[sorter] = np.arange(sorter.size, dtype=np.intp)
if method == 'ordinal':
return inv + 1
arr = arr[sorter]
obs = np.r_[True, arr[1:] != arr[:-1]]
dense = obs.cumsum()[inv]
if method == 'dense':
return dense
# cumulative counts of each unique value
count = np.r_[np.nonzero(obs)[0], len(obs)]
if method == 'max':
return count[dense]
if method == 'min':
return count[dense - 1] + 1
# average method
return .5 * (count[dense] + count[dense - 1] + 1)
| bsd-3-clause |
annayqho/TheCannon | presentations/for_michigan/make_talk_plots.py | 1 | 4618 | import pickle
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# make sample spectra
plot(dataset.wl, dataset.tr_flux[2,:], alpha=0.7, c='k')
title(r"Typical High-S/N LAMOST Spectrum", fontsize=27)
xlim(3500, 9500)
tick_params(axis='x', labelsize=27)
tick_params(axis='y', labelsize=27)
xlabel("Wavelength ($\AA$)", fontsize=27)
ylabel("Flux", fontsize=27)
savefig("typical_spec_snr186.png")
ID = "spec-55938-B5593806_sp04-159.fits"
# now find it in APOGEE...
ID = "aspcapStar-r5-v603-2M12252154+2732475.fits"
import pyfits
fits_file = ID
file_in = pyfits.open(fits_file)
flux = np.array(file_in[1].data)
npixels = len(flux)
start_wl = file_in[1].header['CRVAL1']
diff_wl = file_in[1].header['CDELT1']
val = diff_wl * (npixels) + start_wl
wl_full_log = np.arange(start_wl,val, diff_wl)
wl_full = [10 ** aval for aval in wl_full_log]
wl = np.array(wl_full)
bad = flux == 0
wl = np.ma.array(wl, mask=bad)
flux = np.ma.array(flux, mask=bad)
plot(wl, flux, alpha=0.7, c='k')
xlim(15100, 17000)
ylim(0.6, 1.15)
title(r"Typical High-S/N APOGEE Spectrum", fontsize=27)
tight_layout()
savefig("typical_spec_snr186_apogee.png")
label_file = 'reference_labels.csv'
(test_ID, test_SNR) = pickle.load(open("test_ID_SNR.p", "r"))
# for each test ID, find its index in label_file IDs
ids = np.loadtxt(label_file, usecols=(0,), dtype=str, delimiter=',')
inds = [np.where(ids==test_ID_val) for test_ID_val in test_ID]
names = ['T_{eff}', '\log g', '[Fe/H]', '[\\alpha/Fe]']
lims = [[3900,6000], [0,5], [-2, 1], [-0.1,0.4]]
#id,teff,logg,feh,alpha,snr
teff = np.loadtxt(label_file, usecols=(2,), dtype=float, delimiter=',')
logg = np.loadtxt(label_file, usecols=(3,), dtype=float, delimiter=',')
feh = np.loadtxt(label_file, usecols=(4,), dtype=float, delimiter=',')
alpha = np.loadtxt(label_file, usecols=(5,), dtype=float, delimiter=',')
apogee_label_vals = np.vstack(
(teff[inds].flatten(), logg[inds].flatten(), feh[inds].flatten(), alpha[inds].flatten())).T
test_labels = pickle.load(open("test_labels.p", "r"))
for i in range(0, len(names)):
name = names[i]
cannon = np.array(test_labels[:,i])
orig = np.array(apogee_label_vals[:,i], dtype=float)
snr = test_SNR
#bad = orig < -8000
#good = snr > 50
#orig = np.ma.array(orig, mask=bad)
#cannon = np.ma.array(cannon, mask=bad)
#snr = np.ma.array(snr, mask=bad)
#orig = orig[good]
#cannon = cannon[good]
#snr = snr[good]
scatter = np.round(np.std(orig-cannon),3)
scatter = int(scatter)
bias = np.round(np.mean(orig-cannon),4)
bias = np.round(bias, 3)
low = np.minimum(min(orig), min(cannon))
high = np.maximum(max(orig), max(cannon))
fig = plt.figure(figsize=(10,6))
gs = gridspec.GridSpec(1,2,width_ratios=[2,1], wspace=0.3)
ax1 = plt.subplot(gs[0])
ax2 = plt.subplot(gs[1])
ax1.plot([low, high], [low, high], 'k-', linewidth=2.0, label="x=y")
low = lims[i][0]
high = lims[i][1]
ax1.set_xlim(low, high)
ax1.set_ylim(low, high)
c = np.zeros(len(snr))
take = snr < 100
ax1.scatter(orig[take], cannon[take], marker='x', c='0.10', alpha=0.3, label="snr < 100")
take = snr > 100
ax1.scatter(orig[take], cannon[take], marker='x', c='k', label="snr > 100", alpha=0.7)
ax1.legend(fontsize=14, loc='lower right')
textstr = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax1.text(0.05, 0.95, textstr, transform=ax1.transAxes,
fontsize=14, verticalalignment='top')
ax1.tick_params(axis='x', labelsize=14)
ax1.tick_params(axis='y', labelsize=14)
ax1.set_xlabel("APOGEE $%s$" %name, fontsize=14)
ax1.set_ylabel("Cannon-LAMOST $%s$" %name, fontsize=14)
ax1.set_title("Cannon-LAMOST Output vs. APOGEE $%s$ " %name, fontsize=14)
diff = cannon - orig
npoints = len(diff)
mu = np.mean(diff)
sig = np.std(diff)
ax2.hist(diff, range=[-3*sig,3*sig], color='k', bins=np.sqrt(npoints),
orientation='horizontal', alpha=0.3, histtype='stepfilled')
textstr = r"$\sigma=%s$" %(np.round(sig,2))
ax2.text(0.05, 0.95, textstr, transform=ax2.transAxes,
fontsize=14, verticalalignment='top')
ax2.tick_params(axis='x', labelsize=14)
ax2.tick_params(axis='y', labelsize=14)
ax2.set_xlabel("Count", fontsize=14)
ax2.set_ylabel("Difference", fontsize=14)
ax2.axhline(y=0, c='k', lw=3, label='Difference=0')
ax2.set_title("Cannon-LAMOST Output Minus \n APOGEE Labels for $%s$" %name,
fontsize=14)
ax2.legend(fontsize=14, loc='lower center')
plt.savefig('1to1_%s.png'%i)
| mit |
fabianp/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
sanketloke/scikit-learn | examples/model_selection/plot_confusion_matrix.py | 47 | 2495 | """
================
Confusion matrix
================
Example of confusion matrix usage to evaluate the quality
of the output of a classifier on the iris data set. The
diagonal elements represent the number of points for which
the predicted label is equal to the true label, while
off-diagonal elements are those that are mislabeled by the
classifier. The higher the diagonal values of the confusion
matrix the better, indicating many correct predictions.
The figures show the confusion matrix with and without
normalization by class support size (number of elements
in each class). This kind of normalization can be
interesting in case of class imbalance to have a more
visual interpretation of which class is being misclassified.
Here the results are not as good as they could be as our
choice for the regularization parameter C was not the best.
In real life applications this parameter is usually chosen
using :ref:`grid_search`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
cm = confusion_matrix(y_test, y_pred)
np.set_printoptions(precision=2)
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plot_confusion_matrix(cm)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print('Normalized confusion matrix')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, title='Normalized confusion matrix')
plt.show()
| bsd-3-clause |
Cadair/solarbextrapolation | solarbextrapolation/analyticalmodels/base.py | 1 | 8605 | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 28 19:30:22 2015
@author: alex_
"""
# General Imports
import matplotlib as mpl
mpl.use('TkAgg') # Force mpl backend not to use qt. Else we have a conflict.
import numpy as np
#import pickle
import time
from datetime import datetime
#from collections import namedtuple
import warnings
import inspect
#from sunpy.sun._constants import physical_constants as con
# SunPy imports
import sunpy.map
from sunpy.sun import constants, sun
from sunpy.time import parse_time, is_time
from astropy.table import Table
import astropy.units as u
from mayavi import mlab
# Internal imports
#from solarbextrapolation.utilities import si_this_map
from solarbextrapolation.map3dclasses import Map3D
class AnalyticalModel(object):
"""
Common class for the development of anylitical models of magnetic fields.
Use the models to evaluate the accuracy of an extrapolation routine with
the figures of merit.
"""
def __init__(self, **kwargs):
# Default grid shape and physical ranges for the volume the model covers.
self.shape = kwargs.get('shape', u.Quantity([5, 5, 5] * u.pixel)) # (x,y,z)
self.xrange = kwargs.get('xrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('yrange', u.Quantity([-10, 10] * u.Mm))
self.yrange = kwargs.get('zrange', u.Quantity([0, 20] * u.Mm))
# Metadata
self.meta = {'ZNAXIS': 3, 'ZNAXIS1': self.shape[0].value, 'ZNAxXIS2': self.shape[0].value, 'ZNAXIS3': self.shape[0].value}
self.meta['analytical_model_notes'] = kwargs.get('notes', '')
self.meta['BUNIT'] = kwargs.get('bunit', u.T)
# CRVALn, CDELTn and NAXIS (alreadu in meta) used for storing range in 2D fits files.
self.filepath = kwargs.get('filepath', None)
self.routine = kwargs.get('analytical_model_routine', type(self))
# Default 3D magnetic field
#X,Y,Z = np.zeros(self.shape.value), np.zeros(self.shape.value), np.zeros(self.shape.value)
npField = np.zeros([3]+list(np.array(self.shape.value, dtype=np.int)))
self.field = Map3D(npField, self.meta)
# Default magnetic field on boundary
magnetogram = np.zeros(np.array(self.shape[0:2].value, dtype=np.int))
magnetogram_header = {'ZNAXIS': 2, 'ZNAXIS1': self.shape[0].value, 'ZNAXIS2': self.shape[1].value}
self.magnetogram = sunpy.map.Map((magnetogram, magnetogram_header))
def _generate_field(self, **kwargs):
"""
The method for running a model to generate the field.
This is the primary method to be edited in subclasses for specific
model implementations.
"""
# Model code goes here.
arr_4d = np.zeros([int(self.map_boundary_data.data.shape[0]), int(self.map_boundary_data.data.shape[1]), 1, 3])
# Turn the 4D array into a Map3D object.
map_output = Map3D( arr_4d, self.meta, xrange=self.xrange, yrange=self.yrange, zrange=self.zrange, xobsrange=self.xrange, yobsrange=self.yrange )
return map_output
def generate(self, **kwargs):
"""
Method to be called to calculate the vector field and return as a Map3D object.
Times and saves the extrapolation where applicable.
"""
# Record the time and duration of the extrapolation.
dt_start = datetime.now()
tim_start = time.time()
arr_output = self._generate_field(**kwargs)
tim_duration = time.time() - tim_start
# Add the duration and time to the meta/header data.
arr_output.meta['extrapolator_start_time'] = dt_start.isoformat()
arr_output.meta['extrapolator_duration'] = tim_duration
arr_output.meta['extrapolator_duration_unit'] = u.s
# Save the Map3D if a filepath has been set. (to avoid loosing work)
if self.filepath:
arr_output.save(self.filepath)
# Add the output map to the object and return.
self.map = arr_output
return arr_output
def to_los_magnetogram(self, **kwargs):
"""
Calculate the LoS vector field as a SunPy map and return.
Generally this will require that you have run generate(self, ``**kwargs``)
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field.
.. I'm not sure if this is a good default.
"""
return self.magnetogram
def to_vec_magnetogram(self, **kwargs):
"""
Calculate the vector field as a SunPy map and return.
Generally this will require that you have run ``generate(self, **kwargs)``
first, so in the base class this is checked, but it is not always the
case as some models may allow this to be determined without calculating
the full field. ######### I'm not sure if this is a good default.
"""
return self.magnetogram
if __name__ == '__main__':
# User-specified parameters
tup_shape = ( 20, 20, 20 )
x_range = ( -80.0, 80 ) * u.Mm
y_range = ( -80.0, 80 ) * u.Mm
z_range = ( 0.0, 120 ) * u.Mm
# Derived parameters (make SI where applicable)
x_0 = x_range[0].to(u.m).value
Dx = (( x_range[1] - x_range[0] ) / ( tup_shape[0] * 1.0 )).to(u.m).value
x_size = Dx * tup_shape[0]
y_0 = y_range[0].to(u.m).value
Dy = (( y_range[1] - y_range[0] ) / ( tup_shape[1] * 1.0 )).to(u.m).value
y_size = Dy * tup_shape[1]
z_0 = z_range[0].to(u.m).value
Dz = (( z_range[1] - z_range[0] ) / ( tup_shape[2] * 1.0 )).to(u.m).value
z_size = Dy * tup_shape[2]
# Define the extrapolator as a child of the Extrapolators class
class AnaOnes(AnalyticalModel):
def __init__(self, **kwargs):
super(AnaOnes, self).__init__(**kwargs)
def _generate_field(self, **kwargs):
# Adding in custom parameters to the metadata
self.meta['analytical_model_routine'] = 'Ones Model'
# Generate a trivial field and return (X,Y,Z,Vec)
outshape = list(np.array(self.shape.value, dtype=np.int)) + [3]
arr_4d = np.ones(outshape)
return Map3D(arr_4d, self.meta)
# Setup an anylitical model
xrange = u.Quantity([ 50, 300] * u.arcsec)
yrange = u.Quantity([-350, -100] * u.arcsec)
zrange = u.Quantity([ 0, 250] * u.arcsec)
aAnaMod = AnaOnes()
aMap3D = aAnaMod.generate()
# Visualise the 3D vector field
from solarbextrapolation.visualisation_functions import visualise
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
boundary_units=[1.0*u.arcsec, 1.0*u.arcsec],
show_volume_axes=True,
debug=False)
"""
fig = visualise(aMap3D,
show_boundary_axes=False,
show_volume_axes=False,
debug=False)
mlab.show()
"""
# For B_I field only, to save re-creating this interpolator for every cell.
A_I_r_perp_interpolator = interpolate_A_I_from_r_perp(flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, (x_size**2 + y_size**2 + z_size**2)**(0.5)*1.2, 1000`0)
field = np.zeros( ( tup_shape[0], tup_shape[1], tup_shape[2], 3 ) )
for i in range(0, tup_shape[0]):
for j in range(0, tup_shape[1]):
for k in range(0, tup_shape[2]):
# Position of this point in space
x_pos = x_0 + ( i + 0.5 ) * Dx
y_pos = y_0 + ( j + 0.5 ) * Dy
z_pos = z_0 + ( k + 0.5 ) * Dz
#field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0)
#field[i,j,k] = B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q)
#field[i,j,k] = B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
field[i,j,k] = B_theta(x_pos, y_pos, z_pos, flo_TD_a, flo_TD_d, flo_TD_R, flo_TD_I, flo_TD_I_0) + B_q(x_pos, y_pos, z_pos, flo_TD_L, flo_TD_d, flo_TD_q) + B_I(x_pos, y_pos, z_pos, flo_TD_R, flo_TD_a, flo_TD_d, flo_TD_I, Dx, A_I_r_perp_interpolator)
map_field = Map3D( field, {}, xrange=x_range, yrange=y_range, zrange=z_range )
np_boundary_data = field[:,:,0,2].T
dummyDataToMap(np_boundary_data, x_range, y_range)
#dic_boundary_data = { 'datavals': np_boundary_data.data.shape[0]**2, 'dsun_obs': 147065396219.34, }
visualise(map_field, scale=1.0*u.Mm, show_volume_axes=True, debug=True)
"""
| mit |
zorojean/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
lifeinoppo/littlefishlet-scode | RES/REF/python_sourcecode/ipython-master/IPython/sphinxext/ipython_directive.py | 12 | 42845 | # -*- coding: utf-8 -*-
"""
Sphinx directive to support embedded IPython code.
This directive allows pasting of entire interactive IPython sessions, prompts
and all, and their code will actually get re-executed at doc build time, with
all prompts renumbered sequentially. It also allows you to input code as a pure
python input by giving the argument python to the directive. The output looks
like an interactive ipython section.
To enable this directive, simply list it in your Sphinx ``conf.py`` file
(making sure the directory where you placed it is visible to sphinx, as is
needed for all Sphinx directives). For example, to enable syntax highlighting
and the IPython directive::
extensions = ['IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive']
The IPython directive outputs code-blocks with the language 'ipython'. So
if you do not have the syntax highlighting extension enabled as well, then
all rendered code-blocks will be uncolored. By default this directive assumes
that your prompts are unchanged IPython ones, but this can be customized.
The configurable options that can be placed in conf.py are:
ipython_savefig_dir:
The directory in which to save the figures. This is relative to the
Sphinx source directory. The default is `html_static_path`.
ipython_rgxin:
The compiled regular expression to denote the start of IPython input
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_rgxout:
The compiled regular expression to denote the start of IPython output
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
shouldn't need to change this.
ipython_promptin:
The string to represent the IPython input prompt in the generated ReST.
The default is 'In [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_promptout:
The string to represent the IPython prompt in the generated ReST. The
default is 'Out [%d]:'. This expects that the line numbers are used
in the prompt.
ipython_mplbackend:
The string which specifies if the embedded Sphinx shell should import
Matplotlib and set the backend. The value specifies a backend that is
passed to `matplotlib.use()` before any lines in `ipython_execlines` are
executed. If not specified in conf.py, then the default value of 'agg' is
used. To use the IPython directive without matplotlib as a dependency, set
the value to `None`. It may end up that matplotlib is still imported
if the user specifies so in `ipython_execlines` or makes use of the
@savefig pseudo decorator.
ipython_execlines:
A list of strings to be exec'd in the embedded Sphinx shell. Typical
usage is to make certain packages always available. Set this to an empty
list if you wish to have no imports always available. If specified in
conf.py as `None`, then it has the effect of making no imports available.
If omitted from conf.py altogether, then the default value of
['import numpy as np', 'import matplotlib.pyplot as plt'] is used.
ipython_holdcount
When the @suppress pseudo-decorator is used, the execution count can be
incremented or not. The default behavior is to hold the execution count,
corresponding to a value of `True`. Set this to `False` to increment
the execution count after each suppressed command.
As an example, to use the IPython directive when `matplotlib` is not available,
one sets the backend to `None`::
ipython_mplbackend = None
An example usage of the directive is:
.. code-block:: rst
.. ipython::
In [1]: x = 1
In [2]: y = x**2
In [3]: print(y)
See http://matplotlib.org/sampledoc/ipython_directive.html for additional
documentation.
Pseudo-Decorators
=================
Note: Only one decorator is supported per input. If more than one decorator
is specified, then only the last one is used.
In addition to the Pseudo-Decorators/options described at the above link,
several enhancements have been made. The directive will emit a message to the
console at build-time if code-execution resulted in an exception or warning.
You can suppress these on a per-block basis by specifying the :okexcept:
or :okwarning: options:
.. code-block:: rst
.. ipython::
:okexcept:
:okwarning:
In [1]: 1/0
In [2]: # raise warning.
ToDo
----
- Turn the ad-hoc test() function into a real test suite.
- Break up ipython-specific functionality from matplotlib stuff into better
separated code.
Authors
-------
- John D Hunter: orignal author.
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
- Skipper Seabold, refactoring, cleanups, pure python addition
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib
import atexit
import os
import re
import sys
import tempfile
import ast
import warnings
import shutil
# Third-party
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
# Our own
from traitlets.config import Config
from IPython import InteractiveShell
from IPython.core.profiledir import ProfileDir
from IPython.utils import io
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# for tokenizing blocks
COMMENT, INPUT, OUTPUT = range(3)
#-----------------------------------------------------------------------------
# Functions and class declarations
#-----------------------------------------------------------------------------
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
"""
part is a string of ipython text, comprised of at most one
input, one output, comments, and blank lines. The block parser
parses the text into a list of::
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
data is, depending on the type of token::
COMMENT : the comment string
INPUT: the (DECORATOR, INPUT_LINE, REST) where
DECORATOR: the input decorator (or None)
INPUT_LINE: the input as string (possibly multi-line)
REST : any stdout generated by the input line (not OUTPUT)
OUTPUT: the output string, possibly multi-line
"""
block = []
lines = part.split('\n')
N = len(lines)
i = 0
decorator = None
while 1:
if i==N:
# nothing left to parse -- the last line
break
line = lines[i]
i += 1
line_stripped = line.strip()
if line_stripped.startswith('#'):
block.append((COMMENT, line))
continue
if line_stripped.startswith('@'):
# Here is where we assume there is, at most, one decorator.
# Might need to rethink this.
decorator = line_stripped
continue
# does this look like an input line?
matchin = rgxin.match(line)
if matchin:
lineno, inputline = int(matchin.group(1)), matchin.group(2)
# the ....: continuation string
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
Nc = len(continuation)
# input lines can continue on for more than one line, if
# we have a '\' line continuation char or a function call
# echo line 'print'. The input line can only be
# terminated by the end of the block or an output line, so
# we parse out the rest of the input line if it is
# multiline as well as any echo text
rest = []
while i<N:
# look ahead; if the next line is blank, or a comment, or
# an output line, we're done
nextline = lines[i]
matchout = rgxout.match(nextline)
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
if matchout or nextline.startswith('#'):
break
elif nextline.startswith(continuation):
# The default ipython_rgx* treat the space following the colon as optional.
# However, If the space is there we must consume it or code
# employing the cython_magic extension will fail to execute.
#
# This works with the default ipython_rgx* patterns,
# If you modify them, YMMV.
nextline = nextline[Nc:]
if nextline and nextline[0] == ' ':
nextline = nextline[1:]
inputline += '\n' + nextline
else:
rest.append(nextline)
i+= 1
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
continue
# if it looks like an output line grab all the text to the end
# of the block
matchout = rgxout.match(line)
if matchout:
lineno, output = int(matchout.group(1)), matchout.group(2)
if i<N-1:
output = '\n'.join([output] + lines[i:])
block.append((OUTPUT, output))
break
return block
class EmbeddedSphinxShell(object):
"""An embedded IPython instance to run inside Sphinx"""
def __init__(self, exec_lines=None):
self.cout = StringIO()
if exec_lines is None:
exec_lines = []
# Create config object for IPython
config = Config()
config.HistoryManager.hist_file = ':memory:'
config.InteractiveShell.autocall = False
config.InteractiveShell.autoindent = False
config.InteractiveShell.colors = 'NoColor'
# create a profile so instance history isn't saved
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
profname = 'auto_profile_sphinx_build'
pdir = os.path.join(tmp_profile_dir,profname)
profile = ProfileDir.create_profile_dir(pdir)
# Create and initialize global ipython, but don't start its mainloop.
# This will persist across different EmbededSphinxShell instances.
IP = InteractiveShell.instance(config=config, profile_dir=profile)
atexit.register(self.cleanup)
# io.stdout redirect must be done after instantiating InteractiveShell
io.stdout = self.cout
io.stderr = self.cout
# For debugging, so we can see normal output, use this:
#from IPython.utils.io import Tee
#io.stdout = Tee(self.cout, channel='stdout') # dbg
#io.stderr = Tee(self.cout, channel='stderr') # dbg
# Store a few parts of IPython we'll need.
self.IP = IP
self.user_ns = self.IP.user_ns
self.user_global_ns = self.IP.user_global_ns
self.input = ''
self.output = ''
self.tmp_profile_dir = tmp_profile_dir
self.is_verbatim = False
self.is_doctest = False
self.is_suppress = False
# Optionally, provide more detailed information to shell.
# this is assigned by the SetUp method of IPythonDirective
# to point at itself.
#
# So, you can access handy things at self.directive.state
self.directive = None
# on the first call to the savefig decorator, we'll import
# pyplot as plt so we can make a call to the plt.gcf().savefig
self._pyplot_imported = False
# Prepopulate the namespace.
for line in exec_lines:
self.process_input_line(line, store_history=False)
def cleanup(self):
shutil.rmtree(self.tmp_profile_dir, ignore_errors=True)
def clear_cout(self):
self.cout.seek(0)
self.cout.truncate(0)
def process_input_line(self, line, store_history=True):
"""process the input, capturing stdout"""
stdout = sys.stdout
splitter = self.IP.input_splitter
try:
sys.stdout = self.cout
splitter.push(line)
more = splitter.push_accepts_more()
if not more:
source_raw = splitter.raw_reset()
self.IP.run_cell(source_raw, store_history=store_history)
finally:
sys.stdout = stdout
def process_image(self, decorator):
"""
# build out an image directive like
# .. image:: somefile.png
# :width 4in
#
# from an input like
# savefig somefile.png width=4in
"""
savefig_dir = self.savefig_dir
source_dir = self.source_dir
saveargs = decorator.split(' ')
filename = saveargs[1]
# insert relative path to image file in source
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
source_dir)
imagerows = ['.. image:: %s'%outfile]
for kwarg in saveargs[2:]:
arg, val = kwarg.split('=')
arg = arg.strip()
val = val.strip()
imagerows.append(' :%s: %s'%(arg, val))
image_file = os.path.basename(outfile) # only return file name
image_directive = '\n'.join(imagerows)
return image_file, image_directive
# Callbacks for each type of token
def process_input(self, data, input_prompt, lineno):
"""
Process data block for INPUT token.
"""
decorator, input, rest = data
image_file = None
image_directive = None
is_verbatim = decorator=='@verbatim' or self.is_verbatim
is_doctest = (decorator is not None and \
decorator.startswith('@doctest')) or self.is_doctest
is_suppress = decorator=='@suppress' or self.is_suppress
is_okexcept = decorator=='@okexcept' or self.is_okexcept
is_okwarning = decorator=='@okwarning' or self.is_okwarning
is_savefig = decorator is not None and \
decorator.startswith('@savefig')
input_lines = input.split('\n')
if len(input_lines) > 1:
if input_lines[-1] != "":
input_lines.append('') # make sure there's a blank line
# so splitter buffer gets reset
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
if is_savefig:
image_file, image_directive = self.process_image(decorator)
ret = []
is_semicolon = False
# Hold the execution count, if requested to do so.
if is_suppress and self.hold_count:
store_history = False
else:
store_history = True
# Note: catch_warnings is not thread safe
with warnings.catch_warnings(record=True) as ws:
for i, line in enumerate(input_lines):
if line.endswith(';'):
is_semicolon = True
if i == 0:
# process the first input line
if is_verbatim:
self.process_input_line('')
self.IP.execution_count += 1 # increment it anyway
else:
# only submit the line in non-verbatim mode
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(input_prompt, line)
else:
# process a continuation line
if not is_verbatim:
self.process_input_line(line, store_history=store_history)
formatted_line = '%s %s'%(continuation, line)
if not is_suppress:
ret.append(formatted_line)
if not is_suppress and len(rest.strip()) and is_verbatim:
# The "rest" is the standard output of the input. This needs to be
# added when in verbatim mode. If there is no "rest", then we don't
# add it, as the new line will be added by the processed output.
ret.append(rest)
# Fetch the processed output. (This is not the submitted output.)
self.cout.seek(0)
processed_output = self.cout.read()
if not is_suppress and not is_semicolon:
#
# In IPythonDirective.run, the elements of `ret` are eventually
# combined such that '' entries correspond to newlines. So if
# `processed_output` is equal to '', then the adding it to `ret`
# ensures that there is a blank line between consecutive inputs
# that have no outputs, as in:
#
# In [1]: x = 4
#
# In [2]: x = 5
#
# When there is processed output, it has a '\n' at the tail end. So
# adding the output to `ret` will provide the necessary spacing
# between consecutive input/output blocks, as in:
#
# In [1]: x
# Out[1]: 5
#
# In [2]: x
# Out[2]: 5
#
# When there is stdout from the input, it also has a '\n' at the
# tail end, and so this ensures proper spacing as well. E.g.:
#
# In [1]: print x
# 5
#
# In [2]: x = 5
#
# When in verbatim mode, `processed_output` is empty (because
# nothing was passed to IP. Sometimes the submitted code block has
# an Out[] portion and sometimes it does not. When it does not, we
# need to ensure proper spacing, so we have to add '' to `ret`.
# However, if there is an Out[] in the submitted code, then we do
# not want to add a newline as `process_output` has stuff to add.
# The difficulty is that `process_input` doesn't know if
# `process_output` will be called---so it doesn't know if there is
# Out[] in the code block. The requires that we include a hack in
# `process_block`. See the comments there.
#
ret.append(processed_output)
elif is_semicolon:
# Make sure there is a newline after the semicolon.
ret.append('')
# context information
filename = "Unknown"
lineno = 0
if self.directive.state:
filename = self.directive.state.document.current_source
lineno = self.directive.state.document.current_line
# output any exceptions raised during execution to stdout
# unless :okexcept: has been specified.
if not is_okexcept and "Traceback" in processed_output:
s = "\nException in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okexcept: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(processed_output)
sys.stdout.write('<<<' + ('-' * 73) + '\n\n')
# output any warning raised during execution to stdout
# unless :okwarning: has been specified.
if not is_okwarning:
for w in ws:
s = "\nWarning in %s at block ending on line %s\n" % (filename, lineno)
s += "Specify :okwarning: as an option in the ipython:: block to suppress this message\n"
sys.stdout.write('\n\n>>>' + ('-' * 73))
sys.stdout.write(s)
sys.stdout.write(('-' * 76) + '\n')
s=warnings.formatwarning(w.message, w.category,
w.filename, w.lineno, w.line)
sys.stdout.write(s)
sys.stdout.write('<<<' + ('-' * 73) + '\n')
self.cout.truncate(0)
return (ret, input_lines, processed_output,
is_doctest, decorator, image_file, image_directive)
def process_output(self, data, output_prompt, input_lines, output,
is_doctest, decorator, image_file):
"""
Process data block for OUTPUT token.
"""
# Recall: `data` is the submitted output, and `output` is the processed
# output from `input_lines`.
TAB = ' ' * 4
if is_doctest and output is not None:
found = output # This is the processed output
found = found.strip()
submitted = data.strip()
if self.directive is None:
source = 'Unavailable'
content = 'Unavailable'
else:
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
# Make sure the output contains the output prompt.
ind = found.find(output_prompt)
if ind < 0:
e = ('output does not contain output prompt\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'Input line(s):\n{TAB}{2}\n\n'
'Output line(s):\n{TAB}{3}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), TAB=TAB)
raise RuntimeError(e)
found = found[len(output_prompt):].strip()
# Handle the actual doctest comparison.
if decorator.strip() == '@doctest':
# Standard doctest
if found != submitted:
e = ('doctest failure\n\n'
'Document source: {0}\n\n'
'Raw content: \n{1}\n\n'
'On input line(s):\n{TAB}{2}\n\n'
'we found output:\n{TAB}{3}\n\n'
'instead of the expected:\n{TAB}{4}\n\n')
e = e.format(source, content, '\n'.join(input_lines),
repr(found), repr(submitted), TAB=TAB)
raise RuntimeError(e)
else:
self.custom_doctest(decorator, input_lines, found, submitted)
# When in verbatim mode, this holds additional submitted output
# to be written in the final Sphinx output.
# https://github.com/ipython/ipython/issues/5776
out_data = []
is_verbatim = decorator=='@verbatim' or self.is_verbatim
if is_verbatim and data.strip():
# Note that `ret` in `process_block` has '' as its last element if
# the code block was in verbatim mode. So if there is no submitted
# output, then we will have proper spacing only if we do not add
# an additional '' to `out_data`. This is why we condition on
# `and data.strip()`.
# The submitted output has no output prompt. If we want the
# prompt and the code to appear, we need to join them now
# instead of adding them separately---as this would create an
# undesired newline. How we do this ultimately depends on the
# format of the output regex. I'll do what works for the default
# prompt for now, and we might have to adjust if it doesn't work
# in other cases. Finally, the submitted output does not have
# a trailing newline, so we must add it manually.
out_data.append("{0} {1}\n".format(output_prompt, data))
return out_data
def process_comment(self, data):
"""Process data fPblock for COMMENT token."""
if not self.is_suppress:
return [data]
def save_image(self, image_file):
"""
Saves the image file to disk.
"""
self.ensure_pyplot()
command = 'plt.gcf().savefig("%s")'%image_file
#print 'SAVEFIG', command # dbg
self.process_input_line('bookmark ipy_thisdir', store_history=False)
self.process_input_line('cd -b ipy_savedir', store_history=False)
self.process_input_line(command, store_history=False)
self.process_input_line('cd -b ipy_thisdir', store_history=False)
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
self.clear_cout()
def process_block(self, block):
"""
process block from the block_parser and return a list of processed lines
"""
ret = []
output = None
input_lines = None
lineno = self.IP.execution_count
input_prompt = self.promptin % lineno
output_prompt = self.promptout % lineno
image_file = None
image_directive = None
found_input = False
for token, data in block:
if token == COMMENT:
out_data = self.process_comment(data)
elif token == INPUT:
found_input = True
(out_data, input_lines, output, is_doctest,
decorator, image_file, image_directive) = \
self.process_input(data, input_prompt, lineno)
elif token == OUTPUT:
if not found_input:
TAB = ' ' * 4
linenumber = 0
source = 'Unavailable'
content = 'Unavailable'
if self.directive:
linenumber = self.directive.state.document.current_line
source = self.directive.state.document.current_source
content = self.directive.content
# Add tabs and join into a single string.
content = '\n'.join([TAB + line for line in content])
e = ('\n\nInvalid block: Block contains an output prompt '
'without an input prompt.\n\n'
'Document source: {0}\n\n'
'Content begins at line {1}: \n\n{2}\n\n'
'Problematic block within content: \n\n{TAB}{3}\n\n')
e = e.format(source, linenumber, content, block, TAB=TAB)
# Write, rather than include in exception, since Sphinx
# will truncate tracebacks.
sys.stdout.write(e)
raise RuntimeError('An invalid block was detected.')
out_data = \
self.process_output(data, output_prompt, input_lines,
output, is_doctest, decorator,
image_file)
if out_data:
# Then there was user submitted output in verbatim mode.
# We need to remove the last element of `ret` that was
# added in `process_input`, as it is '' and would introduce
# an undesirable newline.
assert(ret[-1] == '')
del ret[-1]
if out_data:
ret.extend(out_data)
# save the image files
if image_file is not None:
self.save_image(image_file)
return ret, image_directive
def ensure_pyplot(self):
"""
Ensures that pyplot has been imported into the embedded IPython shell.
Also, makes sure to set the backend appropriately if not set already.
"""
# We are here if the @figure pseudo decorator was used. Thus, it's
# possible that we could be here even if python_mplbackend were set to
# `None`. That's also strange and perhaps worthy of raising an
# exception, but for now, we just set the backend to 'agg'.
if not self._pyplot_imported:
if 'matplotlib.backends' not in sys.modules:
# Then ipython_matplotlib was set to None but there was a
# call to the @figure decorator (and ipython_execlines did
# not set a backend).
#raise Exception("No backend was set, but @figure was used!")
import matplotlib
matplotlib.use('agg')
# Always import pyplot into embedded shell.
self.process_input_line('import matplotlib.pyplot as plt',
store_history=False)
self._pyplot_imported = True
def process_pure_python(self, content):
"""
content is a list of strings. it is unedited directive content
This runs it line by line in the InteractiveShell, prepends
prompts as needed capturing stderr and stdout, then returns
the content as a list as if it were ipython code
"""
output = []
savefig = False # keep up with this to clear figure
multiline = False # to handle line continuation
multiline_start = None
fmtin = self.promptin
ct = 0
for lineno, line in enumerate(content):
line_stripped = line.strip()
if not len(line):
output.append(line)
continue
# handle decorators
if line_stripped.startswith('@'):
output.extend([line])
if 'savefig' in line:
savefig = True # and need to clear figure
continue
# handle comments
if line_stripped.startswith('#'):
output.extend([line])
continue
# deal with lines checking for multiline
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
if not multiline:
modified = u"%s %s" % (fmtin % ct, line_stripped)
output.append(modified)
ct += 1
try:
ast.parse(line_stripped)
output.append(u'')
except Exception: # on a multiline
multiline = True
multiline_start = lineno
else: # still on a multiline
modified = u'%s %s' % (continuation, line)
output.append(modified)
# if the next line is indented, it should be part of multiline
if len(content) > lineno + 1:
nextline = content[lineno + 1]
if len(nextline) - len(nextline.lstrip()) > 3:
continue
try:
mod = ast.parse(
'\n'.join(content[multiline_start:lineno+1]))
if isinstance(mod.body[0], ast.FunctionDef):
# check to see if we have the whole function
for element in mod.body[0].body:
if isinstance(element, ast.Return):
multiline = False
else:
output.append(u'')
multiline = False
except Exception:
pass
if savefig: # clear figure if plotted
self.ensure_pyplot()
self.process_input_line('plt.clf()', store_history=False)
self.clear_cout()
savefig = False
return output
def custom_doctest(self, decorator, input_lines, found, submitted):
"""
Perform a specialized doctest.
"""
from .custom_doctests import doctests
args = decorator.split()
doctest_type = args[1]
if doctest_type in doctests:
doctests[doctest_type](self, args, input_lines, found, submitted)
else:
e = "Invalid option to @doctest: {0}".format(doctest_type)
raise Exception(e)
class IPythonDirective(Directive):
has_content = True
required_arguments = 0
optional_arguments = 4 # python, suppress, verbatim, doctest
final_argumuent_whitespace = True
option_spec = { 'python': directives.unchanged,
'suppress' : directives.flag,
'verbatim' : directives.flag,
'doctest' : directives.flag,
'okexcept': directives.flag,
'okwarning': directives.flag
}
shell = None
seen_docs = set()
def get_config_options(self):
# contains sphinx configuration variables
config = self.state.document.settings.env.config
# get config variables to set figure output directory
outdir = self.state.document.settings.env.app.outdir
savefig_dir = config.ipython_savefig_dir
source_dir = os.path.dirname(self.state.document.current_source)
if savefig_dir is None:
savefig_dir = config.html_static_path or '_static'
if isinstance(savefig_dir, list):
savefig_dir = os.path.join(*savefig_dir)
savefig_dir = os.path.join(outdir, savefig_dir)
# get regex and prompt stuff
rgxin = config.ipython_rgxin
rgxout = config.ipython_rgxout
promptin = config.ipython_promptin
promptout = config.ipython_promptout
mplbackend = config.ipython_mplbackend
exec_lines = config.ipython_execlines
hold_count = config.ipython_holdcount
return (savefig_dir, source_dir, rgxin, rgxout,
promptin, promptout, mplbackend, exec_lines, hold_count)
def setup(self):
# Get configuration values.
(savefig_dir, source_dir, rgxin, rgxout, promptin, promptout,
mplbackend, exec_lines, hold_count) = self.get_config_options()
if self.shell is None:
# We will be here many times. However, when the
# EmbeddedSphinxShell is created, its interactive shell member
# is the same for each instance.
if mplbackend:
import matplotlib
# Repeated calls to use() will not hurt us since `mplbackend`
# is the same each time.
matplotlib.use(mplbackend)
# Must be called after (potentially) importing matplotlib and
# setting its backend since exec_lines might import pylab.
self.shell = EmbeddedSphinxShell(exec_lines)
# Store IPython directive to enable better error messages
self.shell.directive = self
# reset the execution count if we haven't processed this doc
#NOTE: this may be borked if there are multiple seen_doc tmp files
#check time stamp?
if not self.state.document.current_source in self.seen_docs:
self.shell.IP.history_manager.reset()
self.shell.IP.execution_count = 1
self.shell.IP.prompt_manager.width = 0
self.seen_docs.add(self.state.document.current_source)
# and attach to shell so we don't have to pass them around
self.shell.rgxin = rgxin
self.shell.rgxout = rgxout
self.shell.promptin = promptin
self.shell.promptout = promptout
self.shell.savefig_dir = savefig_dir
self.shell.source_dir = source_dir
self.shell.hold_count = hold_count
# setup bookmark for saving figures directory
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
store_history=False)
self.shell.clear_cout()
return rgxin, rgxout, promptin, promptout
def teardown(self):
# delete last bookmark
self.shell.process_input_line('bookmark -d ipy_savedir',
store_history=False)
self.shell.clear_cout()
def run(self):
debug = False
#TODO, any reason block_parser can't be a method of embeddable shell
# then we wouldn't have to carry these around
rgxin, rgxout, promptin, promptout = self.setup()
options = self.options
self.shell.is_suppress = 'suppress' in options
self.shell.is_doctest = 'doctest' in options
self.shell.is_verbatim = 'verbatim' in options
self.shell.is_okexcept = 'okexcept' in options
self.shell.is_okwarning = 'okwarning' in options
# handle pure python code
if 'python' in self.arguments:
content = self.content
self.content = self.shell.process_pure_python(content)
# parts consists of all text within the ipython-block.
# Each part is an input/output block.
parts = '\n'.join(self.content).split('\n\n')
lines = ['.. code-block:: ipython', '']
figures = []
for part in parts:
block = block_parser(part, rgxin, rgxout, promptin, promptout)
if len(block):
rows, figure = self.shell.process_block(block)
for row in rows:
lines.extend([' {0}'.format(line)
for line in row.split('\n')])
if figure is not None:
figures.append(figure)
for figure in figures:
lines.append('')
lines.extend(figure.split('\n'))
lines.append('')
if len(lines) > 2:
if debug:
print('\n'.join(lines))
else:
# This has to do with input, not output. But if we comment
# these lines out, then no IPython code will appear in the
# final output.
self.state_machine.insert_input(
lines, self.state_machine.input_lines.source(0))
# cleanup
self.teardown()
return []
# Enable as a proper Sphinx directive
def setup(app):
setup.app = app
app.add_directive('ipython', IPythonDirective)
app.add_config_value('ipython_savefig_dir', None, 'env')
app.add_config_value('ipython_rgxin',
re.compile('In \[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_rgxout',
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), 'env')
app.add_config_value('ipython_promptin', 'In [%d]:', 'env')
app.add_config_value('ipython_promptout', 'Out[%d]:', 'env')
# We could just let matplotlib pick whatever is specified as the default
# backend in the matplotlibrc file, but this would cause issues if the
# backend didn't work in headless environments. For this reason, 'agg'
# is a good default backend choice.
app.add_config_value('ipython_mplbackend', 'agg', 'env')
# If the user sets this config value to `None`, then EmbeddedSphinxShell's
# __init__ method will treat it as [].
execlines = ['import numpy as np', 'import matplotlib.pyplot as plt']
app.add_config_value('ipython_execlines', execlines, 'env')
app.add_config_value('ipython_holdcount', True, 'env')
metadata = {'parallel_read_safe': True, 'parallel_write_safe': True}
return metadata
# Simple smoke test, needs to be converted to a proper automatic test.
def test():
examples = [
r"""
In [9]: pwd
Out[9]: '/home/jdhunter/py4science/book'
In [10]: cd bookdata/
/home/jdhunter/py4science/book/bookdata
In [2]: from pylab import *
In [2]: ion()
In [3]: im = imread('stinkbug.png')
@savefig mystinkbug.png width=4in
In [4]: imshow(im)
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
""",
r"""
In [1]: x = 'hello world'
# string methods can be
# used to alter the string
@doctest
In [2]: x.upper()
Out[2]: 'HELLO WORLD'
@verbatim
In [3]: x.st<TAB>
x.startswith x.strip
""",
r"""
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
In [131]: print url.split('&')
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
In [60]: import urllib
""",
r"""\
In [133]: import numpy.random
@suppress
In [134]: numpy.random.seed(2358)
@doctest
In [135]: numpy.random.rand(10,2)
Out[135]:
array([[ 0.64524308, 0.59943846],
[ 0.47102322, 0.8715456 ],
[ 0.29370834, 0.74776844],
[ 0.99539577, 0.1313423 ],
[ 0.16250302, 0.21103583],
[ 0.81626524, 0.1312433 ],
[ 0.67338089, 0.72302393],
[ 0.7566368 , 0.07033696],
[ 0.22591016, 0.77731835],
[ 0.0072729 , 0.34273127]])
""",
r"""
In [106]: print x
jdh
In [109]: for i in range(10):
.....: print i
.....:
.....:
0
1
2
3
4
5
6
7
8
9
""",
r"""
In [144]: from pylab import *
In [145]: ion()
# use a semicolon to suppress the output
@savefig test_hist.png width=4in
In [151]: hist(np.random.randn(10000), 100);
@savefig test_plot.png width=4in
In [151]: plot(np.random.randn(10000), 'o');
""",
r"""
# use a semicolon to suppress the output
In [151]: plt.clf()
@savefig plot_simple.png width=4in
In [151]: plot([1,2,3])
@savefig hist_simple.png width=4in
In [151]: hist(np.random.randn(10000), 100);
""",
r"""
# update the current fig
In [151]: ylabel('number')
In [152]: title('normal distribution')
@savefig hist_with_text.png
In [153]: grid(True)
@doctest float
In [154]: 0.1 + 0.2
Out[154]: 0.3
@doctest float
In [155]: np.arange(16).reshape(4,4)
Out[155]:
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
In [1]: x = np.arange(16, dtype=float).reshape(4,4)
In [2]: x[0,0] = np.inf
In [3]: x[0,1] = np.nan
@doctest float
In [4]: x
Out[4]:
array([[ inf, nan, 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
""",
]
# skip local-file depending first example:
examples = examples[1:]
#ipython_directive.DEBUG = True # dbg
#options = dict(suppress=True) # dbg
options = dict()
for example in examples:
content = example.split('\n')
IPythonDirective('debug', arguments=None, options=options,
content=content, lineno=0,
content_offset=None, block_text=None,
state=None, state_machine=None,
)
# Run test suite as a script
if __name__=='__main__':
if not os.path.isdir('_static'):
os.mkdir('_static')
test()
print('All OK? Check figures in _static/')
| gpl-2.0 |
cs-chan/Deep-Plant | GRU-CFA/Codes/visualClef.py | 1 | 18504 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 2 15:24:59 2018
@author: root
"""
import cPickle as pkl
import numpy
import cv2
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import skimage
import skimage.transform
import skimage.io
from PIL import Image, ImageEnhance
import scipy.misc
import tensorflow as tf
import numpy as np
import os
import struct
import scipy.io as sio
from array import array as pyarray
from numpy import array, int8, uint8, zeros
import collections
import pickle
import functools
import sets
from tensorflow.python.ops import rnn, array_ops
from tensorflow.contrib.rnn import GRUCell, DropoutWrapper, MultiRNNCell
from tensorflow.python import debug as tf_debug
from attn_7_1_ex import VariableSequenceClassification
from temp_createStruct5 import ConstructLookupTable
from time import gmtime, strftime
from logging_util import makelog
logfile=makelog()
class DataSet(object):
def __init__(self, layername, numMap):
"""Construct a DataSet."""
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/train_obs_list.mat')
self._trainList = mat_contents['train_obs_list']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/train_obs_class.mat')
self._trainLabels = mat_contents['train_obs_class']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_list.mat')
self._testList = mat_contents['test_obs_list']
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_class.mat')
self._testLabels = mat_contents['test_obs_class']
self.layerextract = layername
self.numMap = numMap
self._num_examples = self._trainLabels.shape[0]
self._perm_list = np.arange(self._num_examples)
np.random.shuffle(self._perm_list)
self._trainLabelsPerm = self._trainLabels[self._perm_list]
self._num_testexamples = self._testLabels.shape[0]
self._perm_list_test = np.arange(self._num_testexamples)
self._batch_seq = 0
self._epochs_completed = 0
self._index_in_epoch = 0
self._index_in_epoch_test = 0
self._max_seq = 0
self.Batch_Up_model = ConstructLookupTable()
self.mydict2_test256 = self.Batch_Up_model.main(self._testList,2) # for train_testID ! = 1
self.feature_size_conv = self.numMap*14*14
self.feature_size_fc = 4096
def trainList(self):
return self._trainList
def trainLabels(self):
return self._trainLabels
def trainLabelsPerm(self):
return self._trainLabelsPerm
def testList(self):
return self._testList
def testLabels(self):
return self._testLabels
def num_examples(self):
return self._num_examples
def num_testexamples(self):
return self._num_testexamples
def epochs_completed(self):
return self._epochs_completed
def index_in_epoch(self):
return self._index_in_epoch
def max_seq(self):
return self._max_seq
def batch_seq(self):
return self._batch_seq
def PrepareTrainingBatch(self,Newpermbatch, batch_size, indicator):
if indicator == 1:
mydictG = self.Batch_Up_model.main(self._trainList,1) # for train_testID == 1
else:
mydictG = self.mydict2_test256
i = 0
temp = np.zeros(batch_size)
while i < batch_size:
temp[i] = len(mydictG[Newpermbatch[i]][1])
i = i + 1
self._max_seq = int(np.amax(temp))
self._batch_seq = temp
batch_conv = np.zeros([batch_size,self._max_seq,self.feature_size_conv])
batch_fc = np.zeros([batch_size,self._max_seq,self.feature_size_fc])
i = 0
while i < batch_size:
media_length = len(mydictG[Newpermbatch[i]][1])
j = 0
while j < media_length:
### for 256 image size for testing
# pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
# output = pickle.load(pkl_file1)
# pkl_file1.close()
#
# pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
# output2 = pickle.load(pkl_file2)
# pkl_file2.close()
#
# pkl_file3 = open(mydictG[Newpermbatch[i]][1][j][2], 'rb')
# output3 = pickle.load(pkl_file3)
# pkl_file3.close()
#
# output.update(output2)
# output.update(output3)
# mat_contents = output[self.layerextract[0]]
# batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
#
# mat_contents = output[self.layerextract[1]]
## batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3_O'
# batch_fc[i][j][:] = mat_contents #'convfc7'
#
# j = j + 1
######################################################################
## for 384,512 image size for testing
if indicator == 1: # training ###################
pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
output = pickle.load(pkl_file1)
pkl_file1.close()
pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
output2 = pickle.load(pkl_file2)
pkl_file2.close()
pkl_file3 = open(mydictG[Newpermbatch[i]][1][j][2], 'rb')
output3 = pickle.load(pkl_file3)
pkl_file3.close()
output.update(output2)
output.update(output3)
mat_contents = output[self.layerextract[0]]
batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
mat_contents = output[self.layerextract[1]]
batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_fc) #'conv5_3_O'
j = j + 1
else: # testing
pkl_file1 = open(mydictG[Newpermbatch[i]][1][j][0], 'rb')
output = pickle.load(pkl_file1)
pkl_file1.close()
pkl_file2 = open(mydictG[Newpermbatch[i]][1][j][1], 'rb')
output2 = pickle.load(pkl_file2)
pkl_file2.close()
output.update(output2)
mat_contents = output[self.layerextract[0]]
batch_conv[i][j][:] = mat_contents.reshape(self.feature_size_conv) #'conv5_3'
mat_contents = output[self.layerextract[1]]
batch_fc[i][j][:] = mat_contents.reshape(self.feature_size_fc) #'conv5_3_O'
j = j + 1
#########################################################
if indicator == 1:
J = np.arange(media_length)
np.random.shuffle(J)
temp_arr = batch_conv[i,:media_length,:]
temp_arr = temp_arr[J,:]
batch_conv[i,:media_length,:] = temp_arr
temp_arr = batch_fc[i,:media_length,:]
temp_arr = temp_arr[J,:]
batch_fc[i,:media_length,:] = temp_arr
i = i + 1
return batch_fc, batch_conv
def dense_to_one_hot(self,labels_dense, num_classes=1000):
labels_dense = labels_dense.astype(int)
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
labels_one_hot = labels_one_hot.astype(np.float32)
temp = zeros((labels_one_hot.shape[0],self._max_seq,num_classes))
i=0
while i < labels_one_hot.shape[0]:
temp[i][0:int(self._batch_seq[i])] = labels_one_hot[i]
i=i+1
return temp
def next_batch(self,batch_size):
start = self._index_in_epoch
self._index_in_epoch += batch_size
if self._index_in_epoch > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Shuffle the data
self._perm_list = np.arange(self._num_examples)
np.random.shuffle(self._perm_list)
self._trainLabelsPerm = self._trainLabels[self._perm_list]
# Start next epoch
start = 0
self._index_in_epoch = batch_size
assert batch_size <= self._num_examples
end = self._index_in_epoch
return self.PrepareTrainingBatch(self._perm_list[start:end], batch_size, 1), self.dense_to_one_hot(self._trainLabelsPerm[start:end])
def PrepareTestingBatch(self,test_total):
start = self._index_in_epoch_test
self._index_in_epoch_test += test_total
if self._index_in_epoch_test > self._num_testexamples:
start = 0
self._index_in_epoch_test = test_total
assert test_total <= self._num_testexamples
end = self._index_in_epoch_test
return self.PrepareTrainingBatch(self._perm_list_test[start:end], test_total, 0), self.dense_to_one_hot(self._testLabels[start:end])
## Testing
def Reset_index_in_epoch_test(self, init_v = 0):
self._index_in_epoch_test = init_v
def crop_image(self, x, target_height=224, target_width=224):
image = skimage.img_as_float(skimage.io.imread(x)).astype(np.float32)
if len(image.shape) == 2:
image = np.tile(image[:,:,None], 3)
elif len(image.shape) == 4:
image = image[:,:,:,0]
height, width, rgb = image.shape
if width == height:
resized_image = cv2.resize(image, (target_height,target_width))
elif height < width:
resized_image = cv2.resize(image, (int(width * float(target_height)/height), target_width))
cropping_length = int((resized_image.shape[1] - target_height) / 2)
resized_image = resized_image[:,cropping_length:resized_image.shape[1] - cropping_length]
else:
resized_image = cv2.resize(image, (target_height, int(height * float(target_width) / width)))
cropping_length = int((resized_image.shape[0] - target_width) / 2)
resized_image = resized_image[cropping_length:resized_image.shape[0] - cropping_length,:]
return cv2.resize(resized_image, (target_height, target_width))
####### Network Parameters ########
training_iters = 10000000 # run 10000 epoch
batch_size = 15
display_step = 280 #280
test_num_total = 15
layername_conv = 'conv5_3'
layername_fc = 'fc7_final'
layername = [layername_conv, layername_fc]
numMap = 512
featMap = 14*14
num_classes = 1000
dropTrain = 0.5
dropTest = 1
prob_path = '/media/titanz/Data3TB/tensorboard_log/model_20180211/prob_256/'
savefigfile ='/media/titanz/Data3TB/tensorboard_log/model_20180309/attn_visual_imsz384/'
#################################
plantclefdata = DataSet(layername,numMap)
# tf Graph input
x = tf.placeholder("float", [None, None, 4096])
data = tf.placeholder("float", [None, None, numMap*14*14])
target = tf.placeholder("float", [None, None, num_classes])
dropout = tf.placeholder(tf.float32)
batch_size2 = tf.placeholder(tf.int32)
model = VariableSequenceClassification(x, data, target, dropout, batch_size2)
sess = tf.InteractiveSession()
#sess = tf.Session()
#sess = tf_debug.LocalCLIDebugWrapperSession(sess)
saver = tf.train.Saver(max_to_keep = None)
saver.restore(sess, "/media/titanz/Data3TB/tensorboard_log/model_20180309/model_55160")
#################################################################################
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_list.mat')
testList = mat_contents['test_obs_list']
mat_contents = sio.loadmat('/media/titanz/Data3TB/tensorboard_log/model_20180309/obs_tp_media_re.mat')
obs_tp_media_re = mat_contents['obs_tp_media_re']
imagesz = 1 # choose image size 1 = 256 , 2 = 384, 3 = 512
if imagesz == 1: #256
path_folder = '/media/titanz/Data3TB/PlantclefVGA2/PlantClefImageTest/resize_species/species/'
elif imagesz == 2: #384
path_folder = '/media/titanz/data3TB_02/PlantClefolddata_augmented/PlantClefImageTest_SR/resize_species_384/'
else: #512
path_folder = '/media/titanz/data3TB_02/PlantClefolddata_augmented/PlantClefImageTest_SR/resize_species_512/'
smooth = True
# read python dict back from the testing_256 file
pkl_file_test = open('/home/titanz/tensor_flow/tensorflow-master/tensorflow/examples/RNN/myfile_test_256.pkl', 'rb')
mydict2_test = pickle.load(pkl_file_test)
pkl_file_test.close()
mat_contents = sio.loadmat('/media/titanz/Data3TB/tensorflowlist/species/ClassID_CNNID.mat')
classIDList = mat_contents['ClassID_CNNID']
mediafolderTest_content = '/media/titanz/Data3TB/tensorflowlist/VGG_multipath_res_bn_lastconv/test_obs_media_content_256/'
mat_contents = sio.loadmat('/home/titanz/Documents/SueHan/matlab/PlantClefVGG_net/RNN_plantclef/test_obs_class.mat')
testLabels = mat_contents['test_obs_class']
################################################################################
for count in xrange(obs_tp_media_re.shape[0]):
print('Obs num {:7.0f}'.format(count))
ObsIDchosen = obs_tp_media_re[count][0].astype(int) - 1#8258#12# chose 0 <= x < 13887
Obs_name = testList[ObsIDchosen].astype(int)
Obs_name = str(Obs_name).split('[')
Obs_name = Obs_name[1].split(']')
directory = savefigfile + str(Obs_name[0])
if not os.path.exists(directory):
os.makedirs(directory)
plantclefdata.Reset_index_in_epoch_test(init_v = ObsIDchosen)
(test_data_x, test_data_conv), test_label = plantclefdata.PrepareTestingBatch(test_num_total)
pred, alpha_forward1, alpha_forward2, alpha_forward3, alpha_backward1, alpha_backward2, alpha_backward3 = sess.run(model.alpha_list_com, feed_dict={x: test_data_x, data: test_data_conv, batch_size2: test_num_total, target: test_label, dropout: dropTest})#, batch_size: batch_size})
pred_re = pred[0,:,:]
B = np.argmax(pred_re,axis=1)
alpha_forward1 = np.array(alpha_forward1).swapaxes(1,0) # alpha(max_seq, batch, 196)
alpha_forward2 = np.array(alpha_forward2).swapaxes(1,0) # alpha(max_seq, batch, 196)
alpha_forward3 = np.array(alpha_forward3).swapaxes(1,0) # alpha(max_seq, batch, 196)
mat_contents2 = sio.loadmat(mediafolderTest_content + str(mydict2_test[ObsIDchosen][0]) + '.mat',mat_dtype=True)
used = mat_contents2['v']
alphas1 = np.array(alpha_forward1).swapaxes(1,0) # alpha(max_seq, batch, 196)
alphas1 = alphas1[0:used.shape[1],:,:]
alphas2 = np.array(alpha_forward2).swapaxes(1,0) # alpha(max_seq, batch, 196)
alphas2 = alphas2[0:used.shape[1],:,:]
alphas3 = np.array(alpha_forward3).swapaxes(1,0) # alpha(max_seq, batch, 196)
alphas3 = alphas3[0:used.shape[1],:,:]
pred_re = pred[0,:,:]
pred_re = pred_re[0:used.shape[1],:]
B = np.argmax(pred_re,axis=1)
B = B[0:used.shape[1]]
class_picken = testLabels[ObsIDchosen]
class_picken = class_picken.astype(int)
index_plot = 1;
index_plotnc = 1;
for ii in xrange(alphas1.shape[0]): # eg: 0,1,2 #list(range(0,alphas.shape[0]*2,2)):
organlabel = int(used[0,ii])
if organlabel == 0:
organlabelD = 'Branch'
elif organlabel == 1:
organlabelD = 'Entire'
elif organlabel == 2:
organlabelD = 'Flower'
elif organlabel == 3:
organlabelD = 'Fruit'
elif organlabel == 4:
organlabelD = 'Leaf'
elif organlabel == 5:
organlabelD = 'LeafScan'
else:
organlabelD = 'Stem'
plt.figure(1)
L = mydict2_test[ObsIDchosen][1][ii].split('/')
name_str = L[len(L)-1]
name_str2 = name_str.split('.mat')
name_str3 = name_str2[0]
path = path_folder + '{:04d}'.format(class_picken[0]) + '-' + str(classIDList[class_picken[0]][0]) +'/' + name_str3
img = plantclefdata.crop_image(path)
plt.imshow(img)
if smooth:
alpha_img = skimage.transform.pyramid_expand(alphas1[ii,0,:].reshape(14,14), upscale=16, sigma=20)
else:
alpha_img = skimage.transform.resize(alphas1[ii,0,:].reshape(14,14), [img.shape[0], img.shape[1]])
plt.imshow(alpha_img, alpha=0.7)
plt.set_cmap(cm.Greys_r)
plt.axis('off')
plt.savefig(directory + '/' + "course" + str(ii) + ".png")
plt.imshow(img)
plt.axis('off')
lab2 = organlabelD + '_'+ str(class_picken[0]) + '-' + str(B[ii])
plt.savefig(directory + '/' + lab2 + '_' + str(ii) + ".png")
plt.figure(2)
plt.imshow(img)
if smooth:
alpha_img = skimage.transform.pyramid_expand(alphas2[ii,0,:].reshape(14,14), upscale=16, sigma=20)
else:
alpha_img = skimage.transform.resize(alphas2[ii,0,:].reshape(14,14), [img.shape[0], img.shape[1]])
plt.imshow(alpha_img, alpha=0.7) # show attn
plt.set_cmap(cm.Greys_r)
plt.axis('off')
plt.savefig(directory + '/' + "fine" + str(ii) + ".png")
| bsd-3-clause |
lin-credible/scikit-learn | examples/tree/plot_iris.py | 271 | 2186 | """
================================================================
Plot the decision surface of a decision tree on the iris dataset
================================================================
Plot the decision surface of a decision tree trained on pairs
of features of the iris dataset.
See :ref:`decision tree <tree>` for more information on the estimator.
For each pair of iris features, the decision tree learns decision
boundaries made of combinations of simple thresholding rules inferred from
the training samples.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
plot_colors = "bry"
plot_step = 0.02
# Load data
iris = load_iris()
for pairidx, pair in enumerate([[0, 1], [0, 2], [0, 3],
[1, 2], [1, 3], [2, 3]]):
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = DecisionTreeClassifier().fit(X, y)
# Plot the decision boundary
plt.subplot(2, 3, pairidx + 1)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.xlabel(iris.feature_names[pair[0]])
plt.ylabel(iris.feature_names[pair[1]])
plt.axis("tight")
# Plot the training points
for i, color in zip(range(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.axis("tight")
plt.suptitle("Decision surface of a decision tree using paired features")
plt.legend()
plt.show()
| bsd-3-clause |
rhyolight/nupic.research | projects/sequence_prediction/continuous_sequence/run_elm.py | 10 | 8061 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import csv
from optparse import OptionParser
from matplotlib import pyplot as plt
import numpy as np
from hpelm import ELM
from swarm_runner import SwarmRunner
from scipy import random
import pandas as pd
from htmresearch.support.sequence_learning_utils import *
from htmresearch.algorithms.online_extreme_learning_machine import OSELM
plt.ion()
def initializeELMnet(nDimInput, nDimOutput, numNeurons=10):
# Build ELM network with nDim input units,
# numNeurons hidden units (LSTM cells) and nDimOutput cells
# net = ELM(nDimInput, nDimOutput)
# net.add_neurons(numNeurons, "sigm")
net = OSELM(nDimInput, nDimOutput,
numHiddenNeurons=numNeurons, activationFunction='sig')
return net
def readDataSet(dataSet):
filePath = 'data/'+dataSet+'.csv'
# df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data'])
# sequence = df['data']
if dataSet=='nyc_taxi':
df = pd.read_csv(filePath, header=0, skiprows=[1,2],
names=['time', 'data', 'timeofday', 'dayofweek'])
sequence = df['data']
dayofweek = df['dayofweek']
timeofday = df['timeofday']
seq = pd.DataFrame(np.array(pd.concat([sequence, timeofday, dayofweek], axis=1)),
columns=['data', 'timeofday', 'dayofweek'])
elif dataSet=='sine':
df = pd.read_csv(filePath, header=0, skiprows=[1, 2], names=['time', 'data'])
sequence = df['data']
seq = pd.DataFrame(np.array(sequence), columns=['data'])
else:
raise(' unrecognized dataset type ')
return seq
def getTimeEmbeddedMatrix(sequence, numLags=100, predictionStep=1,
useTimeOfDay=True, useDayOfWeek=True):
print "generate time embedded matrix "
print "the training data contains ", str(nTrain-predictionStep), "records"
inDim = numLags + int(useTimeOfDay) + int(useDayOfWeek)
if useTimeOfDay:
print "include time of day as input field"
if useDayOfWeek:
print "include day of week as input field"
X = np.zeros(shape=(len(sequence), inDim))
T = np.zeros(shape=(len(sequence), 1))
for i in xrange(numLags-1, len(sequence)-predictionStep):
if useTimeOfDay and useDayOfWeek:
sample = np.concatenate([np.array(sequence['data'][(i-numLags+1):(i+1)]),
np.array([sequence['timeofday'][i]]),
np.array([sequence['dayofweek'][i]])])
elif useTimeOfDay:
sample = np.concatenate([np.array(sequence['data'][(i-numLags+1):(i+1)]),
np.array([sequence['timeofday'][i]])])
elif useDayOfWeek:
sample = np.concatenate([np.array(sequence['data'][(i-numLags+1):(i+1)]),
np.array([sequence['dayofweek'][i]])])
else:
sample = np.array(sequence['data'][(i-numLags+1):(i+1)])
X[i, :] = sample
T[i, :] = sequence['data'][i+predictionStep]
return (X, T)
def _getArgs():
parser = OptionParser(usage="%prog PARAMS_DIR OUTPUT_DIR [options]"
"\n\nCompare TM performance with trivial predictor using "
"model outputs in prediction directory "
"and outputting results to result directory.")
parser.add_option("-d",
"--dataSet",
type=str,
default='nyc_taxi',
dest="dataSet",
help="DataSet Name, choose from sine, SantaFe_A, MackeyGlass")
# parser.add_option("-n",
# "--predictionstep",
# type=int,
# default=1,
# dest="predictionstep",
# help="number of steps ahead to be predicted")
(options, remainder) = parser.parse_args()
print options
return options, remainder
def saveResultToFile(dataSet, predictedInput, algorithmName):
inputFileName = 'data/' + dataSet + '.csv'
inputFile = open(inputFileName, "rb")
csvReader = csv.reader(inputFile)
# skip header rows
csvReader.next()
csvReader.next()
csvReader.next()
outputFileName = './prediction/' + dataSet + '_' + algorithmName + '_pred.csv'
outputFile = open(outputFileName, "w")
csvWriter = csv.writer(outputFile)
csvWriter.writerow(
['timestamp', 'data', 'prediction-' + str(predictionStep) + 'step'])
csvWriter.writerow(['datetime', 'float', 'float'])
csvWriter.writerow(['', '', ''])
for i in xrange(len(sequence)):
row = csvReader.next()
csvWriter.writerow([row[0], row[1], predictedInput[i]])
inputFile.close()
outputFile.close()
if __name__ == "__main__":
(_options, _args) = _getArgs()
dataSet = _options.dataSet
print "run ELM on ", dataSet
SWARM_CONFIG = SwarmRunner.importSwarmDescription(dataSet)
predictedField = SWARM_CONFIG['inferenceArgs']['predictedField']
nTrain = SWARM_CONFIG["streamDef"]['streams'][0]['last_record']
predictionStep = SWARM_CONFIG['inferenceArgs']['predictionSteps'][0]
useTimeOfDay = True
useDayOfWeek = True
nTrain = 500
numLags = 100
# prepare dataset as pyBrain sequential dataset
sequence = readDataSet(dataSet)
# standardize data by subtracting mean and dividing by std
meanSeq = np.mean(sequence['data'])
stdSeq = np.std(sequence['data'])
sequence['data'] = (sequence['data'] - meanSeq)/stdSeq
meanTimeOfDay = np.mean(sequence['timeofday'])
stdTimeOfDay = np.std(sequence['timeofday'])
sequence['timeofday'] = (sequence['timeofday'] - meanTimeOfDay)/stdTimeOfDay
meanDayOfWeek = np.mean(sequence['dayofweek'])
stdDayOfWeek = np.std(sequence['dayofweek'])
sequence['dayofweek'] = (sequence['dayofweek'] - meanDayOfWeek)/stdDayOfWeek
(X, T) = getTimeEmbeddedMatrix(sequence, numLags, predictionStep,
useTimeOfDay, useDayOfWeek)
random.seed(6)
net = initializeELMnet(nDimInput=X.shape[1],
nDimOutput=1, numNeurons=50)
net.initializePhase(X[:nTrain, :], T[:nTrain, :])
predictedInput = np.zeros((len(sequence),))
targetInput = np.zeros((len(sequence),))
trueData = np.zeros((len(sequence),))
for i in xrange(nTrain, len(sequence)-predictionStep):
net.train(X[[i], :], T[[i], :])
Y = net.predict(X[[i], :])
predictedInput[i] = Y[-1]
targetInput[i] = sequence['data'][i+predictionStep]
trueData[i] = sequence['data'][i]
print "Iteration {} target input {:2.2f} predicted Input {:2.2f} ".format(
i, targetInput[i], predictedInput[i])
predictedInput = (predictedInput * stdSeq) + meanSeq
targetInput = (targetInput * stdSeq) + meanSeq
trueData = (trueData * stdSeq) + meanSeq
saveResultToFile(dataSet, predictedInput, 'elm')
plt.figure()
plt.plot(targetInput)
plt.plot(predictedInput)
plt.xlim([12800, 13500])
plt.ylim([0, 30000])
skipTrain = 6000
from plot import computeSquareDeviation
squareDeviation = computeSquareDeviation(predictedInput, targetInput)
squareDeviation[:skipTrain] = None
nrmse = np.sqrt(np.nanmean(squareDeviation)) / np.nanstd(targetInput)
print "NRMSE {}".format(nrmse) | gpl-3.0 |
niun/pyoscope | tests/display_wr.py | 2 | 1164 | #!/usr/bin/env python
#
# PyUSBtmc
# display_channel.py
#
# Copyright (c) 2011 Mike Hadmack
# Copyright (c) 2010 Matt Mets
# This code is distributed under the MIT license
#
# This script is just to test waverunner functionality as a module
#
import numpy
from matplotlib import pyplot
import sys
import os
sys.path.append(os.path.expanduser('.'))
from waverunner import Waverunner
""" Example program to plot the Y-T data from one scope channel
derived from capture_channel_1.py but using new interface methods """
# Initialize our scope
scope = Waverunner("127.0.0.1")
scope.grabData()
data1 = scope.getScaledWaveform(1)
data2 = scope.getScaledWaveform(2)
# Now, generate a time axis.
time = scope.getTimeAxis()
# See if we should use a different time axis
if (time[599] < 1e-3):
time = time * 1e6
tUnit = "uS"
elif (time[599] < 1):
time = time * 1e3
tUnit = "mS"
else:
tUnit = "S"
# close interface
scope.close()
# Plot the data
pyplot.plot(time,data1)
pyplot.plot(time,data2)
pyplot.title("Oscilloscope Data")
pyplot.ylabel("Voltage (V)")
pyplot.xlabel("Time (" + tUnit + ")")
pyplot.xlim(time[0], time[599])
pyplot.show()
| mit |
rgommers/numpy | numpy/core/numeric.py | 7 | 76727 | import functools
import itertools
import operator
import sys
import warnings
import numbers
import numpy as np
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
asfortranarray, broadcast, can_cast, compare_chararrays,
concatenate, copyto, dot, dtype, empty,
empty_like, flatiter, frombuffer, fromfile, fromiter, fromstring,
inner, lexsort, matmul, may_share_memory,
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
from . import overrides
from . import umath
from . import shape_base
from .overrides import set_array_function_like_doc, set_module
from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
from ._exceptions import TooHardError, AxisError
from ._ufunc_config import errstate
bitwise_not = invert
ufunc = type(sin)
newaxis = None
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
__all__ = [
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
'fromstring', 'fromfile', 'frombuffer', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
'identity', 'allclose', 'compare_chararrays', 'putmask',
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
@set_module('numpy')
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
As implemented, casting a complex number to a real discards its imaginary
part, but this behavior may not be what the user actually wants.
"""
pass
def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_zeros_like_dispatcher)
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
full_like : Return a new array with shape of input filled with value.
zeros : Return a new array setting values to zero.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.zeros_like(x)
array([[0, 0, 0],
[0, 0, 0]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.zeros_like(y)
array([0., 0., 0.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
# needed instead of a 0 to get same result as zeros for for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
def _ones_dispatcher(shape, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def ones(shape, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with ones.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the array, e.g., `numpy.int8`. Default is
`numpy.float64`.
order : {'C', 'F'}, optional, default: C
Whether to store multi-dimensional data in row-major
(C-style) or column-major (Fortran-style) order in
memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and order.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty : Return a new uninitialized array.
zeros : Return a new array setting values to zero.
full : Return a new array of given shape filled with value.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
if like is not None:
return _ones_with_like(shape, dtype=dtype, order=order, like=like)
a = empty(shape, dtype, order)
multiarray.copyto(a, 1, casting='unsafe')
return a
_ones_with_like = array_function_dispatch(
_ones_dispatcher
)(ones)
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_ones_like_dispatcher)
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.ones_like(x)
array([[1, 1, 1],
[1, 1, 1]])
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.])
>>> np.ones_like(y)
array([1., 1., 1.])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, 1, casting='unsafe')
return res
def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
return(like,)
@set_array_function_like_doc
@set_module('numpy')
def full(shape, fill_value, dtype=None, order='C', *, like=None):
"""
Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or array_like
Fill value.
dtype : data-type, optional
The desired data-type for the array The default, None, means
``np.array(fill_value).dtype``.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
See Also
--------
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), np.inf)
array([[inf, inf],
[inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
>>> np.full((2, 2), [1, 2])
array([[1, 2],
[1, 2]])
"""
if like is not None:
return _full_with_like(shape, fill_value, dtype=dtype, order=order, like=like)
if dtype is None:
fill_value = asarray(fill_value)
dtype = fill_value.dtype
a = empty(shape, dtype, order)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
_full_with_like = array_function_dispatch(
_full_dispatcher
)(full)
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
return (a,)
@array_function_dispatch(_full_like_dispatcher)
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
'C' otherwise. 'K' means match the layout of `a` as closely
as possible.
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `a`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1])
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
array([nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
return (a,)
@array_function_dispatch(_count_nonzero_dispatcher)
def count_nonzero(a, axis=None, *, keepdims=False):
"""
Counts the number of non-zero values in the array ``a``.
The word "non-zero" is in reference to the Python 2.x
built-in method ``__nonzero__()`` (renamed ``__bool__()``
in Python 3.x) of Python objects that tests an object's
"truthfulness". For example, any number is considered
truthful if it is nonzero, whereas any string is considered
truthful if it is not the empty string. Thus, this function
(recursively) counts how many elements in ``a`` (and in
sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
method evaluated to ``True``.
Parameters
----------
a : array_like
The array for which to count non-zeros.
axis : int or tuple, optional
Axis or tuple of axes along which to count non-zeros.
Default is None, meaning that non-zeros will be counted
along a flattened version of ``a``.
.. versionadded:: 1.12.0
keepdims : bool, optional
If this is set to True, the axes that are counted are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.19.0
Returns
-------
count : int or array of int
Number of non-zero values in the array along a given axis.
Otherwise, the total number of non-zero values in the array
is returned.
See Also
--------
nonzero : Return the coordinates of all the non-zero values.
Examples
--------
>>> np.count_nonzero(np.eye(4))
4
>>> a = np.array([[0, 1, 7, 0],
... [3, 0, 2, 19]])
>>> np.count_nonzero(a)
5
>>> np.count_nonzero(a, axis=0)
array([1, 1, 2, 1])
>>> np.count_nonzero(a, axis=1)
array([2, 3])
>>> np.count_nonzero(a, axis=1, keepdims=True)
array([[2],
[3]])
"""
if axis is None and not keepdims:
return multiarray.count_nonzero(a)
a = asanyarray(a)
# TODO: this works around .astype(bool) not working properly (gh-9847)
if np.issubdtype(a.dtype, np.character):
a_bool = a != a.dtype.type()
else:
a_bool = a.astype(np.bool_, copy=False)
return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
@set_module('numpy')
def isfortran(a):
"""
Check if the array is Fortran contiguous but *not* C contiguous.
This function is obsolete and, because of changes due to relaxed stride
checking, its return value for the same array may differ for versions
of NumPy >= 1.10.0 and previous versions. If you only want to check if an
array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
Parameters
----------
a : ndarray
Input array.
Returns
-------
isfortran : bool
Returns True if the array is Fortran contiguous but *not* C contiguous.
Examples
--------
np.array allows to specify whether the array is written in C-contiguous
order (last index varies the fastest), or FORTRAN-contiguous order in
memory (first index varies the fastest).
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(b)
True
The transpose of a C-ordered array is a FORTRAN-ordered array.
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.isfortran(a)
False
>>> b = a.T
>>> b
array([[1, 4],
[2, 5],
[3, 6]])
>>> np.isfortran(b)
True
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
>>> np.isfortran(np.array([1, 2], order='F'))
False
"""
return a.flags.fnc
def _argwhere_dispatcher(a):
return (a,)
@array_function_dispatch(_argwhere_dispatcher)
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
Parameters
----------
a : array_like
Input data.
Returns
-------
index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
non-zero items.
See Also
--------
where, nonzero
Notes
-----
``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argwhere(x>1)
array([[0, 2],
[1, 0],
[1, 1],
[1, 2]])
"""
# nonzero does not behave well on 0d, so promote to 1d
if np.ndim(a) == 0:
a = shape_base.atleast_1d(a)
# then remove the added dimension
return argwhere(a)[:,:0]
return transpose(nonzero(a))
def _flatnonzero_dispatcher(a):
return (a,)
@array_function_dispatch(_flatnonzero_dispatcher)
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return np.nonzero(np.ravel(a))[0]
def _correlate_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_correlate_dispatcher)
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
This function computes the correlation as generally defined in signal
processing texts::
c_{av}[k] = sum_n a[n+k] * conj(v[n])
with a and v sequences being zero-padded where necessary and conj being
the conjugate.
Parameters
----------
a, v : array_like
Input sequences.
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is 'valid', unlike `convolve`, which uses 'full'.
old_behavior : bool
`old_behavior` was removed in NumPy 1.10. If you need the old
behavior, use `multiarray.correlate`.
Returns
-------
out : ndarray
Discrete cross-correlation of `a` and `v`.
See Also
--------
convolve : Discrete, linear convolution of two one-dimensional sequences.
multiarray.correlate : Old, no conjugate, version of correlate.
scipy.signal.correlate : uses FFT which has superior performance on large arrays.
Notes
-----
The definition of correlation above is not unique and sometimes correlation
may be defined differently. Another common definition is::
c'_{av}[k] = sum_n a[n] conj(v[n+k])
which is related to ``c_{av}[k]`` by ``c'_{av}[k] = c_{av}[-k]``.
`numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
be preferable.
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
array([3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
array([2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
array([0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
Note that you get the time reversed, complex conjugated result
when the two input sequences change places, i.e.,
``c_{va}[k] = c^{*}_{av}[-k]``:
>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
"""
return multiarray.correlate2(a, v, mode)
def _convolve_dispatcher(a, v, mode=None):
return (a, v)
@array_function_dispatch(_convolve_dispatcher)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_. In
probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
If `v` is longer than `a`, the arrays are swapped before computation.
Parameters
----------
a : (N,) array_like
First one-dimensional input array.
v : (M,) array_like
Second one-dimensional input array.
mode : {'full', 'valid', 'same'}, optional
'full':
By default, mode is 'full'. This returns the convolution
at each point of overlap, with an output shape of (N+M-1,). At
the end-points of the convolution, the signals do not overlap
completely, and boundary effects may be seen.
'same':
Mode 'same' returns output of length ``max(M, N)``. Boundary
effects are still visible.
'valid':
Mode 'valid' returns output of length
``max(M, N) - min(M, N) + 1``. The convolution product is only given
for points where the signals overlap completely. Values outside
the signal boundary have no effect.
Returns
-------
out : ndarray
Discrete, linear convolution of `a` and `v`.
See Also
--------
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
Transform.
scipy.linalg.toeplitz : Used to construct the convolution operator.
polymul : Polynomial multiplication. Same output as convolve, but also
accepts poly1d objects as input.
Notes
-----
The discrete convolution operation is defined as
.. math:: (a * v)[n] = \\sum_{m = -\\infty}^{\\infty} a[m] v[n - m]
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
domain, after appropriate padding (padding is necessary to prevent
circular convolution). Since multiplication is more efficient (faster)
than convolution, the function `scipy.signal.fftconvolve` exploits the
FFT to calculate the convolution of large data-sets.
References
----------
.. [1] Wikipedia, "Convolution",
https://en.wikipedia.org/wiki/Convolution
Examples
--------
Note how the convolution operator flips the second array
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
array([0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
array([1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
array([2.5])
"""
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
if len(a) == 0:
raise ValueError('a cannot be empty')
if len(v) == 0:
raise ValueError('v cannot be empty')
return multiarray.correlate(a, v[::-1], mode)
def _outer_dispatcher(a, b, out=None):
return (a, b, out)
@array_function_dispatch(_outer_dispatcher)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) array_like
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) array_like
Second input vector. Input is flattened if
not already 1-dimensional.
out : (M, N) ndarray, optional
A location where the result is stored
.. versionadded:: 1.9.0
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to dimensions other than 1D and other
operations. ``np.multiply.outer(a.ravel(), b.ravel())``
is the equivalent.
tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
An example using a "vector" of letters:
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
array([['a', 'aa', 'aaa'],
['b', 'bb', 'bbb'],
['c', 'cc', 'ccc']], dtype=object)
"""
a = asarray(a)
b = asarray(b)
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
def _tensordot_dispatcher(a, b, axes=None):
return (a, b)
@array_function_dispatch(_tensordot_dispatcher)
def tensordot(a, b, axes=2):
"""
Compute tensor dot product along specified axes.
Given two tensors, `a` and `b`, and an array_like object containing
two array_like objects, ``(a_axes, b_axes)``, sum the products of
`a`'s and `b`'s elements (components) over the axes specified by
``a_axes`` and ``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
of `a` and the first ``N`` dimensions of `b` are summed over.
Parameters
----------
a, b : array_like
Tensors to "dot".
axes : int or (2,) array_like
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) array_like
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
Returns
-------
output : ndarray
The tensor dot product of the input.
See Also
--------
dot, einsum
Notes
-----
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
The shape of the result consists of the non-contracted axes of the
first tensor, followed by the non-contracted axes of the second.
Examples
--------
A "traditional" example:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
... for j in range(2):
... for k in range(3):
... for n in range(4):
... d[i,j] += a[k,n,i] * b[n,k,j]
>>> c == d
array([[ True, True],
[ True, True],
[ True, True],
[ True, True],
[ True, True]])
An extended example taking advantage of the overloading of + and \\*:
>>> a = np.array(range(1, 9))
>>> a.shape = (2, 2, 2)
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
>>> A.shape = (2, 2)
>>> a; A
array([[[1, 2],
[3, 4]],
[[5, 6],
[7, 8]]])
array([['a', 'b'],
['c', 'd']], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, 1)
array([[['acc', 'bdd'],
['aaacccc', 'bbbdddd']],
[['aaaaacccccc', 'bbbbbdddddd'],
['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
array([[[[['a', 'b'],
['c', 'd']],
...
>>> np.tensordot(a, A, (0, 1))
array([[['abbbbb', 'cddddd'],
['aabbbbbb', 'ccdddddd']],
[['aaabbbbbbb', 'cccddddddd'],
['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
array([[['abb', 'cdd'],
['aaabbbb', 'cccdddd']],
[['aaaaabbbbbb', 'cccccdddddd'],
['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
"""
try:
iter(axes)
except Exception:
axes_a = list(range(-axes, 0))
axes_b = list(range(0, axes))
else:
axes_a, axes_b = axes
try:
na = len(axes_a)
axes_a = list(axes_a)
except TypeError:
axes_a = [axes_a]
na = 1
try:
nb = len(axes_b)
axes_b = list(axes_b)
except TypeError:
axes_b = [axes_b]
nb = 1
a, b = asarray(a), asarray(b)
as_ = a.shape
nda = a.ndim
bs = b.shape
ndb = b.ndim
equal = True
if na != nb:
equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
equal = False
break
if axes_a[k] < 0:
axes_a[k] += nda
if axes_b[k] < 0:
axes_b[k] += ndb
if not equal:
raise ValueError("shape-mismatch for sum")
# Move the axes to sum over to the end of "a"
# and to the front of "b"
notin = [k for k in range(nda) if k not in axes_a]
newaxes_a = notin + axes_a
N2 = 1
for axis in axes_a:
N2 *= as_[axis]
newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
olda = [as_[axis] for axis in notin]
notin = [k for k in range(ndb) if k not in axes_b]
newaxes_b = axes_b + notin
N2 = 1
for axis in axes_b:
N2 *= bs[axis]
newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
oldb = [bs[axis] for axis in notin]
at = a.transpose(newaxes_a).reshape(newshape_a)
bt = b.transpose(newaxes_b).reshape(newshape_b)
res = dot(at, bt)
return res.reshape(olda + oldb)
def _roll_dispatcher(a, shift, axis=None):
return (a,)
@array_function_dispatch(_roll_dispatcher)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : array_like
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
See Also
--------
rollaxis : Roll the specified axis backwards, until it lies in a
given position.
Notes
-----
.. versionadded:: 1.12.0
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
>>> np.roll(x, -2)
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
>>> np.roll(x2, -1)
array([[1, 2, 3, 4, 5],
[6, 7, 8, 9, 0]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, -1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
>>> np.roll(x2, -1, axis=1)
array([[1, 2, 3, 4, 0],
[6, 7, 8, 9, 5]])
"""
a = asanyarray(a)
if axis is None:
return roll(a.ravel(), shift, 0).reshape(a.shape)
else:
axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
broadcasted = broadcast(shift, axis)
if broadcasted.ndim > 1:
raise ValueError(
"'shift' and 'axis' should be scalars or 1D sequences")
shifts = {ax: 0 for ax in range(a.ndim)}
for sh, ax in broadcasted:
shifts[ax] += sh
rolls = [((slice(None), slice(None)),)] * a.ndim
for ax, offset in shifts.items():
offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
if offset:
# (original, result), (original, result)
rolls[ax] = ((slice(None, -offset), slice(offset, None)),
(slice(-offset, None), slice(None, offset)))
result = empty_like(a)
for indices in itertools.product(*rolls):
arr_index, res_index = zip(*indices)
result[res_index] = a[arr_index]
return result
def _rollaxis_dispatcher(a, axis, start=None):
return (a,)
@array_function_dispatch(_rollaxis_dispatcher)
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
This function continues to be supported for backward compatibility, but you
should prefer `moveaxis`. The `moveaxis` function was added in NumPy
1.11.
Parameters
----------
a : ndarray
Input array.
axis : int
The axis to be rolled. The positions of the other axes do not
change relative to one another.
start : int, optional
When ``start <= axis``, the axis is rolled back until it lies in
this position. When ``start > axis``, the axis is rolled until it
lies before this position. The default, 0, results in a "complete"
roll. The following table describes how negative values of ``start``
are interpreted:
.. table::
:align: left
+-------------------+----------------------+
| ``start`` | Normalized ``start`` |
+===================+======================+
| ``-(arr.ndim+1)`` | raise ``AxisError`` |
+-------------------+----------------------+
| ``-arr.ndim`` | 0 |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``-1`` | ``arr.ndim-1`` |
+-------------------+----------------------+
| ``0`` | ``0`` |
+-------------------+----------------------+
| |vdots| | |vdots| |
+-------------------+----------------------+
| ``arr.ndim`` | ``arr.ndim`` |
+-------------------+----------------------+
| ``arr.ndim + 1`` | raise ``AxisError`` |
+-------------------+----------------------+
.. |vdots| unicode:: U+22EE .. Vertical Ellipsis
Returns
-------
res : ndarray
For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
NumPy versions a view of `a` is returned only if the order of the
axes is changed, otherwise the input array is returned.
See Also
--------
moveaxis : Move array axes to new positions.
roll : Roll the elements of an array by a number of positions along a
given axis.
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
n = a.ndim
axis = normalize_axis_index(axis, n)
if start < 0:
start += n
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
if not (0 <= start < n + 1):
raise AxisError(msg % ('start', -n, 'start', n + 1, start))
if axis < start:
# it's been removed
start -= 1
if axis == start:
return a[...]
axes = list(range(0, n))
axes.remove(axis)
axes.insert(start, axis)
return a.transpose(axes)
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
"""
Normalizes an axis argument into a tuple of non-negative integer axes.
This handles shorthands such as ``1`` and converts them to ``(1,)``,
as well as performing the handling of negative indices covered by
`normalize_axis_index`.
By default, this forbids axes from being specified multiple times.
Used internally by multi-axis-checking logic.
.. versionadded:: 1.13.0
Parameters
----------
axis : int, iterable of int
The un-normalized index or indices of the axis.
ndim : int
The number of dimensions of the array that `axis` should be normalized
against.
argname : str, optional
A prefix to put before the error message, typically the name of the
argument.
allow_duplicate : bool, optional
If False, the default, disallow an axis from being specified twice.
Returns
-------
normalized_axes : tuple of int
The normalized axis index, such that `0 <= normalized_axis < ndim`
Raises
------
AxisError
If any axis provided is out of range
ValueError
If an axis is repeated
See also
--------
normalize_axis_index : normalizing a single scalar axis
"""
# Optimization to speed-up the most common cases.
if type(axis) not in (tuple, list):
try:
axis = [operator.index(axis)]
except TypeError:
pass
# Going via an iterator directly is slower than via list comprehension.
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
if not allow_duplicate and len(set(axis)) != len(axis):
if argname:
raise ValueError('repeated axis in `{}` argument'.format(argname))
else:
raise ValueError('repeated axis')
return axis
def _moveaxis_dispatcher(a, source, destination):
return (a,)
@array_function_dispatch(_moveaxis_dispatcher)
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
Other axes remain in their original order.
.. versionadded:: 1.11.0
Parameters
----------
a : np.ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : np.ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose : Permute the dimensions of an array.
swapaxes : Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
try:
# allow duck-array types if they define transpose
transpose = a.transpose
except AttributeError:
a = asarray(a)
transpose = a.transpose
source = normalize_axis_tuple(source, a.ndim, 'source')
destination = normalize_axis_tuple(destination, a.ndim, 'destination')
if len(source) != len(destination):
raise ValueError('`source` and `destination` arguments must have '
'the same number of elements')
order = [n for n in range(a.ndim) if n not in source]
for dest, src in sorted(zip(destination, source)):
order.insert(dest, src)
result = transpose(order)
return result
# fix hack in scipy which imports this function
def _move_axis_to_0(a, axis):
return moveaxis(a, axis, 0)
def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
return (a, b)
@array_function_dispatch(_cross_dispatcher)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : array_like
Components of the first vector(s).
b : array_like
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
See Also
--------
inner : Inner product
outer : Outer product.
ix_ : Construct index arrays.
Notes
-----
.. versionadded:: 1.9.0
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = [1, 2, 3]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([-3, 6, -3])
One vector with dimension 2.
>>> x = [1, 2]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Equivalently:
>>> x = [1, 2, 0]
>>> y = [4, 5, 6]
>>> np.cross(x, y)
array([12, -6, -3])
Both vectors with dimension 2.
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
array(-3)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1,2,3], [4,5,6]])
>>> y = np.array([[4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[-3, 6, -3],
[ 3, -6, 3]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3, 3],
[ 6, -6],
[-3, 3]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
>>> np.cross(x, y)
array([[ -6, 12, -6],
[ 0, 0, 0],
[ 6, -12, 6]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24, 48, -24],
[-30, 60, -30],
[-36, 72, -36]])
"""
if axis is not None:
axisa, axisb, axisc = (axis,) * 3
a = asarray(a)
b = asarray(b)
# Check axisa and axisb are within bounds
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
# Move working axis to the end of the shape
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
msg = ("incompatible dimensions for cross product\n"
"(dimension must be 2 or 3)")
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError(msg)
# Create the output array
shape = broadcast(a[..., 0], b[..., 0]).shape
if a.shape[-1] == 3 or b.shape[-1] == 3:
shape += (3,)
# Check axisc is within bounds
axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
dtype = promote_types(a.dtype, b.dtype)
cp = empty(shape, dtype)
# create local aliases for readability
a0 = a[..., 0]
a1 = a[..., 1]
if a.shape[-1] == 3:
a2 = a[..., 2]
b0 = b[..., 0]
b1 = b[..., 1]
if b.shape[-1] == 3:
b2 = b[..., 2]
if cp.ndim != 0 and cp.shape[-1] == 3:
cp0 = cp[..., 0]
cp1 = cp[..., 1]
cp2 = cp[..., 2]
if a.shape[-1] == 2:
if b.shape[-1] == 2:
# a0 * b1 - a1 * b0
multiply(a0, b1, out=cp)
cp -= a1 * b0
return cp
else:
assert b.shape[-1] == 3
# cp0 = a1 * b2 - 0 (a2 = 0)
# cp1 = 0 - a0 * b2 (a2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
multiply(a0, b2, out=cp1)
negative(cp1, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
else:
assert a.shape[-1] == 3
if b.shape[-1] == 3:
# cp0 = a1 * b2 - a2 * b1
# cp1 = a2 * b0 - a0 * b2
# cp2 = a0 * b1 - a1 * b0
multiply(a1, b2, out=cp0)
tmp = array(a2 * b1)
cp0 -= tmp
multiply(a2, b0, out=cp1)
multiply(a0, b2, out=tmp)
cp1 -= tmp
multiply(a0, b1, out=cp2)
multiply(a1, b0, out=tmp)
cp2 -= tmp
else:
assert b.shape[-1] == 2
# cp0 = 0 - a2 * b1 (b2 = 0)
# cp1 = a2 * b0 - 0 (b2 = 0)
# cp2 = a0 * b1 - a1 * b0
multiply(a2, b1, out=cp0)
negative(cp0, out=cp0)
multiply(a2, b0, out=cp1)
multiply(a0, b1, out=cp2)
cp2 -= a1 * b0
return moveaxis(cp, -1, axisc)
little_endian = (sys.byteorder == 'little')
@set_module('numpy')
def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : dtype, optional
Data type of the result.
sparse : boolean, optional
Return a sparse representation of the grid instead of a dense
representation. Default is False.
.. versionadded:: 1.17
Returns
-------
grid : one ndarray or tuple of ndarrays
If sparse is False:
Returns one array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
If sparse is True:
Returns a tuple of arrays, with
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
dimensions[i] in the ith place
See Also
--------
mgrid, ogrid, meshgrid
Notes
-----
The output shape in the dense case is obtained by prepending the number
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]])
>>> grid[1] # column indices
array([[0, 1, 2],
[0, 1, 2]])
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0, 1, 2],
[4, 5, 6]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
If sparse is set to true, the grid will be returned in a sparse
representation.
>>> i, j = np.indices((2, 3), sparse=True)
>>> i.shape
(2, 1)
>>> j.shape
(1, 3)
>>> i # row indices
array([[0],
[1]])
>>> j # column indices
array([[0, 1, 2]])
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
if sparse:
res = tuple()
else:
res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
if sparse:
res = res + (idx,)
else:
res[i] = idx
return res
def _fromfunction_dispatcher(function, shape, *, dtype=None, like=None, **kwargs):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
"""
Construct an array by executing a function over each coordinate.
The resulting array therefore has a value ``fn(x, y, z)`` at
coordinate ``(x, y, z)``.
Parameters
----------
function : callable
The function is called with N parameters, where N is the rank of
`shape`. Each parameter represents the coordinates of the array
varying along a specific axis. For example, if `shape`
were ``(2, 2)``, then the parameters would be
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
shape : (N,) tuple of ints
Shape of the output array, which also determines the shape of
the coordinate arrays passed to `function`.
dtype : data-type, optional
Data-type of the coordinate arrays passed to `function`.
By default, `dtype` is float.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
fromfunction : any
The result of the call to `function` is passed back directly.
Therefore the shape of `fromfunction` is completely determined by
`function`. If `function` returns a scalar value, the shape of
`fromfunction` would not match the `shape` parameter.
See Also
--------
indices, meshgrid
Notes
-----
Keywords other than `dtype` are passed to `function`.
Examples
--------
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
array([[ True, False, False],
[False, True, False],
[False, False, True]])
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
array([[0, 1, 2],
[1, 2, 3],
[2, 3, 4]])
"""
if like is not None:
return _fromfunction_with_like(function, shape, dtype=dtype, like=like, **kwargs)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
_fromfunction_with_like = array_function_dispatch(
_fromfunction_dispatcher
)(fromfunction)
def _frombuffer(buf, dtype, shape, order):
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
@set_module('numpy')
def isscalar(element):
"""
Returns True if the type of `element` is a scalar type.
Parameters
----------
element : any
Input argument, can be of any type and shape.
Returns
-------
val : bool
True if `element` is a scalar type, False if it is not.
See Also
--------
ndim : Get the number of dimensions of an array
Notes
-----
If you need a stricter way to identify a *numerical* scalar, use
``isinstance(x, numbers.Number)``, as that returns ``False`` for most
non-numerical elements such as strings.
In most cases ``np.ndim(x) == 0`` should be used instead of this function,
as that will also return true for 0d arrays. This is how numpy overloads
functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
argument to `histogram`. Some key differences:
+--------------------------------------+---------------+-------------------+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
+======================================+===============+===================+
| PEP 3141 numeric objects (including | ``True`` | ``True`` |
| builtins) | | |
+--------------------------------------+---------------+-------------------+
| builtin string and buffer objects | ``True`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other builtin objects, like | ``False`` | ``True`` |
| `pathlib.Path`, `Exception`, | | |
| the result of `re.compile` | | |
+--------------------------------------+---------------+-------------------+
| third-party objects like | ``False`` | ``True`` |
| `matplotlib.figure.Figure` | | |
+--------------------------------------+---------------+-------------------+
| zero-dimensional numpy arrays | ``False`` | ``True`` |
+--------------------------------------+---------------+-------------------+
| other numpy arrays | ``False`` | ``False`` |
+--------------------------------------+---------------+-------------------+
| `list`, `tuple`, and other sequence | ``False`` | ``False`` |
| objects | | |
+--------------------------------------+---------------+-------------------+
Examples
--------
>>> np.isscalar(3.1)
True
>>> np.isscalar(np.array(3.1))
False
>>> np.isscalar([3.1])
False
>>> np.isscalar(False)
True
>>> np.isscalar('numpy')
True
NumPy supports PEP 3141 numbers:
>>> from fractions import Fraction
>>> np.isscalar(Fraction(5, 17))
True
>>> from numbers import Number
>>> np.isscalar(Number())
True
"""
return (isinstance(element, generic)
or type(element) in ScalarType
or isinstance(element, numbers.Number))
@set_module('numpy')
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
For negative numbers, if width is not given, a minus sign is added to the
front. If width is given, the two's complement of the number is
returned, with respect to that width.
In a two's-complement system negative numbers are represented by the two's
complement of the absolute value. This is the most common method of
representing signed integers on computers [1]_. A N-bit two's-complement
system can represent every integer in the range
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
Parameters
----------
num : int
Only an integer decimal number can be used.
width : int, optional
The length of the returned string if `num` is positive, or the length
of the two's complement if `num` is negative, provided that `width` is
at least a sufficient number of bits for `num` to be represented in the
designated form.
If the `width` value is insufficient, it will be ignored, and `num` will
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
with its width equal to the minimum number of bits needed to represent
the number in the designated form. This behavior is deprecated and will
later raise an error.
.. deprecated:: 1.12.0
Returns
-------
bin : str
Binary representation of `num` or two's complement of `num`.
See Also
--------
base_repr: Return a string representation of a number in the given base
system.
bin: Python's built-in binary representation generator of an integer.
Notes
-----
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
faster.
References
----------
.. [1] Wikipedia, "Two's complement",
https://en.wikipedia.org/wiki/Two's_complement
Examples
--------
>>> np.binary_repr(3)
'11'
>>> np.binary_repr(-3)
'-11'
>>> np.binary_repr(3, width=4)
'0011'
The two's complement is returned when the input number is negative and
width is specified:
>>> np.binary_repr(-3, width=3)
'101'
>>> np.binary_repr(-3, width=5)
'11101'
"""
def warn_if_insufficient(width, binwidth):
if width is not None and width < binwidth:
warnings.warn(
"Insufficient bit width provided. This behavior "
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
# Ensure that num is a Python integer to avoid overflow or unwanted
# casts to floating point.
num = operator.index(num)
if num == 0:
return '0' * (width or 1)
elif num > 0:
binary = bin(num)[2:]
binwidth = len(binary)
outwidth = (binwidth if width is None
else max(binwidth, width))
warn_if_insufficient(width, binwidth)
return binary.zfill(outwidth)
else:
if width is None:
return '-' + bin(-num)[2:]
else:
poswidth = len(bin(-num)[2:])
# See gh-8679: remove extra digit
# for numbers at boundaries.
if 2**(poswidth - 1) == -num:
poswidth -= 1
twocomp = 2**(poswidth + 1) + num
binary = bin(twocomp)[2:]
binwidth = len(binary)
outwidth = max(binwidth, width)
warn_if_insufficient(width, binwidth)
return '1' * (outwidth - binwidth) + binary
@set_module('numpy')
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
Parameters
----------
number : int
The value to convert. Positive and negative values are handled.
base : int, optional
Convert `number` to the `base` number system. The valid range is 2-36,
the default value is 2.
padding : int, optional
Number of zeros padded on the left. Default is 0 (no padding).
Returns
-------
out : str
String representation of `number` in `base` system.
See Also
--------
binary_repr : Faster version of `base_repr` for base 2.
Examples
--------
>>> np.base_repr(5)
'101'
>>> np.base_repr(6, 5)
'11'
>>> np.base_repr(7, base=5, padding=3)
'00012'
>>> np.base_repr(10, base=16)
'A'
>>> np.base_repr(32, base=16)
'20'
"""
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if base > len(digits):
raise ValueError("Bases greater than 36 not handled in base_repr.")
elif base < 2:
raise ValueError("Bases less than 2 not handled in base_repr.")
num = abs(number)
res = []
while num:
res.append(digits[num % base])
num //= base
if padding:
res.append('0' * padding)
if number < 0:
res.append('-')
return ''.join(reversed(res or '0'))
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
def _maketup(descr, val):
dt = dtype(descr)
# Place val in all scalar tuples:
fields = dt.fields
if fields is None:
return val
else:
res = [_maketup(fields[name][0], val) for name in dt.names]
return tuple(res)
def _identity_dispatcher(n, dtype=None, *, like=None):
return (like,)
@set_array_function_like_doc
@set_module('numpy')
def identity(n, dtype=None, *, like=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output. Defaults to ``float``.
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
if like is not None:
return _identity_with_like(n, dtype=dtype, like=like)
from numpy import eye
return eye(n, dtype=dtype, like=like)
_identity_with_like = array_function_dispatch(
_identity_dispatcher
)(identity)
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_allclose_dispatcher)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
NaNs are treated as equal if they are in the same place and if
``equal_nan=True``. Infs are treated as equal if they are in the same
place and of the same sign in both arrays.
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
.. versionadded:: 1.10.0
Returns
-------
allclose : bool
Returns True if the two arrays are equal within the given
tolerance; False otherwise.
See Also
--------
isclose, all, any, equal
Notes
-----
If the following equation is element-wise True, then allclose returns
True.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
The above equation is not symmetric in `a` and `b`, so that
``allclose(a, b)`` might be different from ``allclose(b, a)`` in
some rare cases.
The comparison of `a` and `b` uses standard broadcasting, which
means that `a` and `b` need not have the same shape in order for
``allclose(a, b)`` to evaluate to True. The same is true for
`equal` but not `array_equal`.
`allclose` is not defined for non-numeric data types.
Examples
--------
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
False
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
True
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
True
"""
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
return bool(res)
def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
return (a, b)
@array_function_dispatch(_isclose_dispatcher)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
tolerance.
The tolerance values are positive, typically very small numbers. The
relative difference (`rtol` * abs(`b`)) and the absolute difference
`atol` are added together to compare against the absolute difference
between `a` and `b`.
.. warning:: The default `atol` is not appropriate for comparing numbers
that are much smaller than one (see Notes).
Parameters
----------
a, b : array_like
Input arrays to compare.
rtol : float
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
considered equal to NaN's in `b` in the output array.
Returns
-------
y : array_like
Returns a boolean array of where `a` and `b` are equal within the
given tolerance. If both `a` and `b` are scalars, returns a single
boolean value.
See Also
--------
allclose
math.isclose
Notes
-----
.. versionadded:: 1.7.0
For finite values, isclose uses the following equation to test whether
two floating point values are equivalent.
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
Unlike the built-in `math.isclose`, the above equation is not symmetric
in `a` and `b` -- it assumes `b` is the reference value -- so that
`isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
the default value of atol is not zero, and is used to determine what
small values should be considered close to zero. The default value is
appropriate for expected values of order unity: if the expected values
are significantly smaller than one, it can result in false positives.
`atol` should be carefully selected for the use case at hand. A zero value
for `atol` will result in `False` if either `a` or `b` is zero.
`isclose` is not defined for non-numeric data types.
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
array([ True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
array([ True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
array([ True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
array([ True, True])
>>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
array([ True, False])
>>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
array([False, False])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
array([ True, True])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
array([False, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
return less_equal(abs(x-y), atol + rtol * abs(y))
x = asanyarray(a)
y = asanyarray(b)
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
# This will cause casting of x later. Also, make sure to allow subclasses
# (e.g., for numpy.ma).
# NOTE: We explicitly allow timedelta, which used to work. This could
# possibly be deprecated. See also gh-18286.
# timedelta works if `atol` is an integer or also a timedelta.
# Although, the default tolerances are unlikely to be useful
if y.dtype.kind != "m":
dt = multiarray.result_type(y, 1.)
y = asanyarray(y, dtype=dt)
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
return within_tol(x, y, atol, rtol)
else:
finite = xfin & yfin
cond = zeros_like(finite, subok=True)
# Because we're using boolean indexing, x & y must be the same shape.
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
# lib.stride_tricks, though, so we can't import it here.
x = x * ones_like(cond)
y = y * ones_like(cond)
# Avoid subtraction with infinite/nan values...
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
# Check for equality of infinite values...
cond[~finite] = (x[~finite] == y[~finite])
if equal_nan:
# Make NaN == NaN
both_nan = isnan(x) & isnan(y)
# Needed to treat masked arrays correctly. = True would not work.
cond[both_nan] = both_nan[both_nan]
return cond[()] # Flatten 0d arrays to scalars
def _array_equal_dispatcher(a1, a2, equal_nan=None):
return (a1, a2)
@array_function_dispatch(_array_equal_dispatcher)
def array_equal(a1, a2, equal_nan=False):
"""
True if two arrays have the same shape and elements, False otherwise.
Parameters
----------
a1, a2 : array_like
Input arrays.
equal_nan : bool
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
complex, values will be considered equal if either the real or the
imaginary component of a given value is ``nan``.
.. versionadded:: 1.19.0
Returns
-------
b : bool
Returns True if the arrays are equal.
See Also
--------
allclose: Returns True if two arrays are element-wise equal within a
tolerance.
array_equiv: Returns True if input arrays are shape consistent and all
elements equal.
Examples
--------
>>> np.array_equal([1, 2], [1, 2])
True
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
True
>>> np.array_equal([1, 2], [1, 2, 3])
False
>>> np.array_equal([1, 2], [1, 4])
False
>>> a = np.array([1, np.nan])
>>> np.array_equal(a, a)
False
>>> np.array_equal(a, a, equal_nan=True)
True
When ``equal_nan`` is True, complex values with nan components are
considered equal if either the real *or* the imaginary components are nan.
>>> a = np.array([1 + 1j])
>>> b = a.copy()
>>> a.real = np.nan
>>> b.imag = np.nan
>>> np.array_equal(a, b, equal_nan=True)
True
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
if a1.shape != a2.shape:
return False
if not equal_nan:
return bool(asarray(a1 == a2).all())
# Handling NaN values if equal_nan is True
a1nan, a2nan = isnan(a1), isnan(a2)
# NaN's occur at different locations
if not (a1nan == a2nan).all():
return False
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
def _array_equiv_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_array_equiv_dispatcher)
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
Shape consistent means they are either the same shape, or one input array
can be broadcasted to create the same shape as the other one.
Parameters
----------
a1, a2 : array_like
Input arrays.
Returns
-------
out : bool
True if equivalent, False otherwise.
Examples
--------
>>> np.array_equiv([1, 2], [1, 2])
True
>>> np.array_equiv([1, 2], [1, 3])
False
Showing the shape equivalence:
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
True
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
False
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
False
"""
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
try:
multiarray.broadcast(a1, a2)
except Exception:
return False
return bool(asarray(a1 == a2).all())
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
True_ = bool_(True)
def extend_all(module):
existing = set(__all__)
mall = getattr(module, '__all__')
for a in mall:
if a not in existing:
__all__.append(a)
from .umath import *
from .numerictypes import *
from . import fromnumeric
from .fromnumeric import *
from . import arrayprint
from .arrayprint import *
from . import _asarray
from ._asarray import *
from . import _ufunc_config
from ._ufunc_config import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
extend_all(arrayprint)
extend_all(_asarray)
extend_all(_ufunc_config)
| bsd-3-clause |
Balandat/cont_no_regret | old_code/testing.py | 1 | 3136 | '''
Created on Feb 24, 2015
@author: balandat
'''
import numpy as np
from scipy.integrate import quad
import matplotlib.pyplot as plt
from ContNoRegret.Domains import S
from ContNoRegret.Distributions import Uniform
from ContNoRegret.utils import create_random_Sigmas
from ContNoRegret.LossFunctions import GaussianLossFunction
from scipy.stats import expon
from scipy.interpolate import SmoothBivariateSpline, LSQBivariateSpline
# def compute_constants(gamma):
# c = (gamma-1)**(-1)
# a2 = gamma*(1+gamma)/2
# a1 = gamma - 2*c*a2
# a0 = 1 - c*a1 - c**2*a2
# return c, np.array([a0, a1, a2])
#
# def phi(u, gamma):
# c,a = compute_constants(gamma)
# return ( (u<c)*(gamma/(gamma-1)-np.minimum(u,c))**(-gamma) +
# (u>=c)*(a[0]+a[1]*np.maximum(u,c)+a[2]*np.maximum(u,c)**2) )
#
# def phi_prime(u, gamma):
# c,a = compute_constants(gamma)
# return (u<c)*gamma*(gamma/(gamma-1)-np.minimum(u,c))**(-(1+gamma)) + (u>=c)*(a[1]+2*a[2]*np.maximum(u,c))
#
# def phi_double_prime(u, gamma):
# c,a = compute_constants(gamma)
# return (u<c)*gamma*(1+gamma)*(gamma/(gamma-1)-np.minimum(u,c))**(-(2+gamma)) + (u>=c)*2*a[2]
#
# def phi_inv(u, gamma):
# c,a = compute_constants(gamma)
# b = phi(c, gamma)
# return ( (u<b)*(gamma/(gamma-1)-np.minimum(u,b)**(-1/gamma)) +
# (u>=b)*(-a[1]/2/a[2]+np.sqrt(a[1]**2/4/a[2]**2 - (a[0]-np.maximum(u,b))/a[2])) )
#
# def phi_inv_prime(u, gamma):
# return 1/phi_prime(phi_inv(u, gamma))
#
#
# # Plot some functions
# gammas = [1.25, 1.5, 1.75, 2, 3]
# u = np.linspace(-1.5,5,10000)
# v = np.linspace(0.001,10,10000)
# f,axs = plt.subplots(3,1)
# axs[0].plot(u, np.exp(u-1))
# axs[1].plot(u, np.exp(u-1))
# axs[2].plot(u, np.exp(u-1))
# for gamma in gammas:
# axs[0].plot(u, phi(u,gamma))
# axs[1].plot(u, phi_prime(u,gamma))
# axs[2].plot(u, phi_double_prime(u,gamma))
# plt.show()
# for gamma in gammas:
# # gamma = 1.5
# ctilde = gamma/(gamma-1)
# a2 = 0.5*gamma*(1+gamma)/((ctilde-1)**(2+gamma))
# a1 = gamma/((ctilde-1)**(1+gamma)) - 2*a2
# a0 = 1/((ctilde-1)**gamma) - a1 - a2
#
# def phi(u):
# return (u<1)*(ctilde-np.minimum(u,1))**(-gamma) + (u>=1)*(a0+a1*np.maximum(u,1)+a2*np.maximum(u,1)**2)
#
# def phiprime(u):
# return (u<1)*gamma*(ctilde-np.minimum(u,1))**(-(1+gamma)) + (u>=1)*(a1+2*a2*np.maximum(u,1))
#
# def phiinv(u):
# return (u<1)*(ctilde-np.minimum(u,1)**(-1/gamma)) + (u>=1)*(-a1/2/a2+np.sqrt(a1**2/4/a2**2 - (a0-np.maximum(u,1))/a2))
#
# def phiinvprime(u):
# return 1/phiprime(phiinv(u))
# # return (u<1)/gamma*u**(-1+1/gamma) + (u>=1)*(a1**2-4*a2*(a0-np.maximum(u,1)))**(-1/2)
#
#
# # fig2, (ax2, ax3) = plt.subplots(2, 1)
# # fig3, ax4 = plt.subplots(1)
#
# ax1.plot(u, phi(u))#, u, np.exp(u-1))
# # v = np.linspace(0.001, 5, 10000)
# # ax2.plot(v, phiinv(v), v, 1+np.log(v))
# # ax3.plot(v, phiinvprime(v), v, 1/v)
# # ax4.plot(v, phiinvprime(v)-1/(3*v))
# # print(np.min(phiinvprime(v)-1/(3+v))
# plt.show()
| mit |
mjudsp/Tsallis | examples/preprocessing/plot_robust_scaling.py | 85 | 2698 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Robust Scaling on Toy Data
=========================================================
Making sure that each Feature has approximately the same scale can be a
crucial preprocessing step. However, when data contains outliers,
:class:`StandardScaler <sklearn.preprocessing.StandardScaler>` can often
be mislead. In such cases, it is better to use a scaler that is robust
against outliers.
Here, we demonstrate this on a toy dataset, where one single datapoint
is a large outlier.
"""
from __future__ import print_function
print(__doc__)
# Code source: Thomas Unterthiner
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn.preprocessing import StandardScaler, RobustScaler
# Create training and test data
np.random.seed(42)
n_datapoints = 100
Cov = [[0.9, 0.0], [0.0, 20.0]]
mu1 = [100.0, -3.0]
mu2 = [101.0, -3.0]
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_train = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_train = np.vstack([X1, X2])
X1 = np.random.multivariate_normal(mean=mu1, cov=Cov, size=n_datapoints)
X2 = np.random.multivariate_normal(mean=mu2, cov=Cov, size=n_datapoints)
Y_test = np.hstack([[-1]*n_datapoints, [1]*n_datapoints])
X_test = np.vstack([X1, X2])
X_train[0, 0] = -1000 # a fairly large outlier
# Scale data
standard_scaler = StandardScaler()
Xtr_s = standard_scaler.fit_transform(X_train)
Xte_s = standard_scaler.transform(X_test)
robust_scaler = RobustScaler()
Xtr_r = robust_scaler.fit_transform(X_train)
Xte_r = robust_scaler.transform(X_test)
# Plot data
fig, ax = plt.subplots(1, 3, figsize=(12, 4))
ax[0].scatter(X_train[:, 0], X_train[:, 1],
color=np.where(Y_train > 0, 'r', 'b'))
ax[1].scatter(Xtr_s[:, 0], Xtr_s[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[2].scatter(Xtr_r[:, 0], Xtr_r[:, 1], color=np.where(Y_train > 0, 'r', 'b'))
ax[0].set_title("Unscaled data")
ax[1].set_title("After standard scaling (zoomed in)")
ax[2].set_title("After robust scaling (zoomed in)")
# for the scaled data, we zoom in to the data center (outlier can't be seen!)
for a in ax[1:]:
a.set_xlim(-3, 3)
a.set_ylim(-3, 3)
plt.tight_layout()
plt.show()
# Classify using k-NN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(Xtr_s, Y_train)
acc_s = knn.score(Xte_s, Y_test)
print("Testset accuracy using standard scaler: %.3f" % acc_s)
knn.fit(Xtr_r, Y_train)
acc_r = knn.score(Xte_r, Y_test)
print("Testset accuracy using robust scaler: %.3f" % acc_r)
| bsd-3-clause |
mne-tools/mne-tools.github.io | 0.19/_downloads/33b5e3cff5c172d72c79c6eec192b031/plot_label_from_stc.py | 20 | 4093 | """
=================================================
Generate a functional label from source estimates
=================================================
Threshold source estimates and produce a functional label. The label
is typically the region of interest that contains high values.
Here we compare the average time course in the anatomical label obtained
by FreeSurfer segmentation and the average time course from the
functional label. As expected the time course in the functional
label yields higher values.
"""
# Author: Luke Bloy <luke.bloy@gmail.com>
# Alex Gramfort <alexandre.gramfort@inria.fr>
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.minimum_norm import read_inverse_operator, apply_inverse
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = 'sample'
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
# Compute a label/ROI based on the peak power between 80 and 120 ms.
# The label bankssts-lh is used for the comparison.
aparc_label_name = 'bankssts-lh'
tmin, tmax = 0.080, 0.120
# Load data
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
src = inverse_operator['src'] # get the source space
# Compute inverse solution
stc = apply_inverse(evoked, inverse_operator, lambda2, method,
pick_ori='normal')
# Make an STC in the time interval of interest and take the mean
stc_mean = stc.copy().crop(tmin, tmax).mean()
# use the stc_mean to generate a functional label
# region growing is halted at 60% of the peak value within the
# anatomical label / ROI specified by aparc_label_name
label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
stc_mean_label = stc_mean.in_label(label)
data = np.abs(stc_mean_label.data)
stc_mean_label.data[data < 0.6 * np.max(data)] = 0.
# 8.5% of original source space vertices were omitted during forward
# calculation, suppress the warning here with verbose='error'
func_labels, _ = mne.stc_to_label(stc_mean_label, src=src, smooth=True,
subjects_dir=subjects_dir, connected=True,
verbose='error')
# take first as func_labels are ordered based on maximum values in stc
func_label = func_labels[0]
# load the anatomical ROI for comparison
anat_label = mne.read_labels_from_annot(subject, parc='aparc',
subjects_dir=subjects_dir,
regexp=aparc_label_name)[0]
# extract the anatomical time course for each label
stc_anat_label = stc.in_label(anat_label)
pca_anat = stc.extract_label_time_course(anat_label, src, mode='pca_flip')[0]
stc_func_label = stc.in_label(func_label)
pca_func = stc.extract_label_time_course(func_label, src, mode='pca_flip')[0]
# flip the pca so that the max power between tmin and tmax is positive
pca_anat *= np.sign(pca_anat[np.argmax(np.abs(pca_anat))])
pca_func *= np.sign(pca_func[np.argmax(np.abs(pca_anat))])
###############################################################################
# plot the time courses....
plt.figure()
plt.plot(1e3 * stc_anat_label.times, pca_anat, 'k',
label='Anatomical %s' % aparc_label_name)
plt.plot(1e3 * stc_func_label.times, pca_func, 'b',
label='Functional %s' % aparc_label_name)
plt.legend()
plt.show()
###############################################################################
# plot brain in 3D with PySurfer if available
brain = stc_mean.plot(hemi='lh', subjects_dir=subjects_dir)
brain.show_view('lateral')
# show both labels
brain.add_label(anat_label, borders=True, color='k')
brain.add_label(func_label, borders=True, color='b')
| bsd-3-clause |
hfut721/RPN | tools/demo.py | 10 | 5028 | #!/usr/bin/env python
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
prototxt = os.path.join(cfg.MODELS_DIR, NETS[args.demo_net][0],
'faster_rcnn_alt_opt', 'faster_rcnn_test.pt')
caffemodel = os.path.join(cfg.DATA_DIR, 'faster_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/script/'
'fetch_faster_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
# Warmup on a dummy image
im = 128 * np.ones((300, 500, 3), dtype=np.uint8)
for i in xrange(2):
_, _= im_detect(net, im)
im_names = ['000456.jpg', '000542.jpg', '001150.jpg',
'001763.jpg', '004545.jpg']
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
demo(net, im_name)
plt.show()
| mit |
ZENGXH/scikit-learn | examples/calibration/plot_compare_calibration.py | 241 | 5008 | """
========================================
Comparison of Calibration of Classifiers
========================================
Well calibrated classifiers are probabilistic classifiers for which the output
of the predict_proba method can be directly interpreted as a confidence level.
For instance a well calibrated (binary) classifier should classify the samples
such that among the samples to which it gave a predict_proba value close to
0.8, approx. 80% actually belong to the positive class.
LogisticRegression returns well calibrated predictions as it directly
optimizes log-loss. In contrast, the other methods return biased probilities,
with different biases per method:
* GaussianNaiveBayes tends to push probabilties to 0 or 1 (note the counts in
the histograms). This is mainly because it makes the assumption that features
are conditionally independent given the class, which is not the case in this
dataset which contains 2 redundant features.
* RandomForestClassifier shows the opposite behavior: the histograms show
peaks at approx. 0.2 and 0.9 probability, while probabilities close to 0 or 1
are very rare. An explanation for this is given by Niculescu-Mizil and Caruana
[1]: "Methods such as bagging and random forests that average predictions from
a base set of models can have difficulty making predictions near 0 and 1
because variance in the underlying base models will bias predictions that
should be near zero or one away from these values. Because predictions are
restricted to the interval [0,1], errors caused by variance tend to be one-
sided near zero and one. For example, if a model should predict p = 0 for a
case, the only way bagging can achieve this is if all bagged trees predict
zero. If we add noise to the trees that bagging is averaging over, this noise
will cause some trees to predict values larger than 0 for this case, thus
moving the average prediction of the bagged ensemble away from 0. We observe
this effect most strongly with random forests because the base-level trees
trained with random forests have relatively high variance due to feature
subseting." As a result, the calibration curve shows a characteristic sigmoid
shape, indicating that the classifier could trust its "intuition" more and
return probabilties closer to 0 or 1 typically.
* Support Vector Classification (SVC) shows an even more sigmoid curve as
the RandomForestClassifier, which is typical for maximum-margin methods
(compare Niculescu-Mizil and Caruana [1]), which focus on hard samples
that are close to the decision boundary (the support vectors).
.. topic:: References:
.. [1] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import numpy as np
np.random.seed(0)
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.calibration import calibration_curve
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=2)
train_samples = 100 # Samples used for training the models
X_train = X[:train_samples]
X_test = X[train_samples:]
y_train = y[:train_samples]
y_test = y[train_samples:]
# Create classifiers
lr = LogisticRegression()
gnb = GaussianNB()
svc = LinearSVC(C=1.0)
rfc = RandomForestClassifier(n_estimators=100)
###############################################################################
# Plot calibration plots
plt.figure(figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(gnb, 'Naive Bayes'),
(svc, 'Support Vector Classification'),
(rfc, 'Random Forest')]:
clf.fit(X_train, y_train)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s" % (name, ))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
plt.show()
| bsd-3-clause |
nismod/energy_demand | energy_demand/plotting/fig_p2_weather_val.py | 1 | 14554 | """Fig 2 figure
"""
import numpy as np
import matplotlib.pyplot as plt
#from scipy.stats import mstats
import pandas as pd
import geopandas as gpd
from scipy import stats
from shapely.geometry import Point
import matplotlib.pyplot as plt
from collections import defaultdict
from matplotlib.colors import Normalize
from energy_demand.plotting import result_mapping
from energy_demand.technologies import tech_related
from energy_demand.plotting import basic_plot_functions
def run(
data_input,
regions,
simulation_yr_to_plot,
population,
fueltype_str,
path_shapefile,
fig_name
):
"""
"""
fueltype_int = tech_related.get_fueltype_int(fueltype_str)
# -----------------------------------------------------------
# Iterate overall weather_yrs and store data in dataframe
# (columns = timestep, rows: value of year)
# -----------------------------------------------------------
# List of selected data for every weather year and region (which is then converted to array)
weather_yrs_data = defaultdict(dict)
print("Weather yrs: " + str(list(data_input.keys())), flush=True)
for weather_yr, data_weather_yr in data_input.items():
# Weather year specific data for every region
regions_fuel = data_weather_yr[simulation_yr_to_plot][fueltype_int] # Select fueltype
for region_nr, region_name in enumerate(regions):
try:
weather_yrs_data[region_name].append(regions_fuel[region_nr])
except (KeyError, AttributeError):
weather_yrs_data[region_name] = [regions_fuel[region_nr]]
regional_statistics_columns = [
'name',
'mean_peak_h',
'diff_av_max',
'mean_peak_h_pp',
'diff_av_max_pp',
'std_dev_average_every_h',
'std_dev_peak_h_norm_pop']
df_stats = pd.DataFrame(columns=regional_statistics_columns)
for region_name, region_data in weather_yrs_data.items():
# Convert regional data to dataframe
region_data_array = np.array(region_data)
df = pd.DataFrame(
region_data_array,
columns=range(8760))
# Calculate regional statistics
mean = df.mean(axis=0)
std_dev = df.std(axis=0) #standard deviation across every hour
# Get maximum per colum
#max_every_h = df.max()
#colum_max_h = max_every_h.argmax() #get colum (respesctively hour) of maximum value
# Average standard deviation across every hour
std_dev_average_every_h = np.std(list(std_dev))
max_entry = df.max(axis=0) #maximum entry for every hour
min_entry = df.min(axis=0) #maximum entry for every hour
# Get hour number with maximum demand
hour_nr_max = max_entry.argmax()
hour_nr_min = min_entry.argmin()
# standard deviation of peak hour
std_dev_peak_h = std_dev[hour_nr_max]
# Difference between average and max
diff_av_max = max_entry[hour_nr_max] - mean[hour_nr_max]
mean_peak_h = mean[hour_nr_max]
# Convert GW to KW
diff_av_max = diff_av_max * 1000000 #GW to KW
mean_peak_h = mean_peak_h * 1000000 #GW to KW
# Weight with population
for region_nr, n in enumerate(regions):
if region_name == n:
nr_of_reg = region_nr
break
pop = population[nr_of_reg]
# Divide standard deviation of peak hour by population
# which gives measure of weather variability in peak hour
std_dev_peak_h_norm_pop = std_dev_peak_h / pop
diff_av_max_pp = diff_av_max / pop
mean_peak_h_pp = mean_peak_h / pop
line_entry = [[
str(region_name),
mean_peak_h,
diff_av_max,
mean_peak_h_pp,
diff_av_max_pp,
std_dev_average_every_h,
std_dev_peak_h_norm_pop]]
line_df = pd.DataFrame(
line_entry, columns=regional_statistics_columns)
df_stats = df_stats.append(line_df)
print(df_stats['diff_av_max'].max())
print(df_stats['mean_peak_h'].max())
print(df_stats['std_dev_peak_h_norm_pop'].max())
print("-")
print(df_stats['diff_av_max_pp'].max())
print(df_stats['diff_av_max_pp'].min())
print("-")
print(df_stats['mean_peak_h_pp'].max())
print(df_stats['mean_peak_h_pp'].min())
# ---------------
# Create spatial maps
# http://darribas.org/gds15/content/labs/lab_03.html
# http://nbviewer.jupyter.org/gist/jorisvandenbossche/57d392c085901eb4981054402b37b6b1
# ---------------
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile)
# Merge stats to geopanda
shp_gdp_merged = uk_shapefile.merge(
df_stats,
on='name')
# Assign projection
crs = {'init': 'epsg:27700'} #27700: OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(shp_gdp_merged, crs=crs)
ax = uk_gdf.plot()
# Assign bin colors according to defined cmap and whether
# plot with min_max values or only min/max values
#bin_values = [0, 0.0025, 0.005, 0.0075, 0.01]
#bin_values = [0, 0.02, 0.04, 0.06, 0.08, 0.1] #list(np.arange(0.0, 1.0, 0.1))
# Field to plot
field_to_plot = "diff_av_max_pp" # Difference between average and peak per person in KWh
#field_to_plot = "diff_av_max" # Difference between average and peak
field_to_plot = 'std_dev_peak_h_norm_pop'
nr_of_intervals = 6
bin_values = result_mapping.get_reasonable_bin_values_II(
data_to_plot=list(uk_gdf[field_to_plot]),
nr_of_intervals=nr_of_intervals)
print(float(uk_gdf[field_to_plot]))
print("BINS " + str(bin_values))
uk_gdf, cmap_rgb_colors, color_zero, min_value, max_value = user_defined_bin_classification(
uk_gdf,
field_to_plot,
bin_values=bin_values)
# plot with face color attribute
uk_gdf.plot(ax=ax, facecolor=uk_gdf['bin_color'], edgecolor='black', linewidth=0.5)
#shp_gdp_merged.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
#ax = uk_gdf.plot(column='diff_av_max', scheme='QUANTILES', k=5, cmap='OrRd', linewidth=0.1)
#uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')
#uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')
# Get legend patches TODO IMPROVE
# TODO IMRPVE: MAKE CORRECT ONE FOR NEW PROCESSING
legend_handles = result_mapping.get_legend_handles(
bin_values[1:-1],
cmap_rgb_colors,
color_zero,
min_value,
max_value)
plt.legend(
handles=legend_handles,
title="tittel_elgend",
prop={'size': 8},
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
frameon=False)
# PLot bins on plot
plt.text(
0,
-20,
bin_values[:-1], #leave away maximum value
fontsize=8)
plt.tight_layout()
plt.show()
raise Exception
plt.savefig(fig_name)
plt.close()
def norm_cmap(values, cmap, vmin=None, vmax=None):
"""
Normalize and set colormap
Parameters
----------
values : Series or array to be normalized
cmap : matplotlib Colormap
normalize : matplotlib.colors.Normalize
cm : matplotlib.cm
vmin : Minimum value of colormap. If None, uses min(values).
vmax : Maximum value of colormap. If None, uses max(values).
Returns
-------
n_cmap : mapping of normalized values to colormap (cmap)
Source
------
https://ocefpaf.github.io/python4oceanographers/blog/2015/08/24/choropleth/
"""
mn = vmin or min(values)
mx = vmax or max(values)
norm = Normalize(vmin=mn, vmax=mx)
n_cmap = plt.cm.ScalarMappable(norm=norm, cmap=cmap)
rgb_colors = [n_cmap.to_rgba(value) for value in values]
return n_cmap, rgb_colors
def plot_colors(rgb_colors):
"""function to plot colors
"""
nr_dots = len(rgb_colors)
dots = []
x = []
y = []
for i in range(nr_dots):
x.append(i + 20)
y.append(i + 20)
#plt.scatter(x, y, c=cmap, s=50)
plt.scatter(x, y, c=rgb_colors, s=50)
plt.show()
def user_defined_bin_classification(
input_df,
field_name,
bin_values,
cmap_diverging=None,
cmap_sequential=None
):
"""Classify values according to bins
Arguments
---------
input_df : dataframe
Dataframe to plot
higher_as_bin : int
Bin value of > than last bin
cmap_sequential : str
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds','YlOrBr',
'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu',
'PuBuGn', 'BuGn', 'YlGn'
cmap_diverging : str
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu', 'RdYlBu', 'RdYlGn',
'Spectral', 'coolwarm', 'bwr', 'seismic'
Info
-----
Include 0 in min_max_plot == False
Python colors:
https://matplotlib.org/1.4.1/examples/color/colormaps_reference.html
https://ocefpaf.github.io/python4oceanographers/blog/2015/08/24/choropleth/
https://matplotlib.org/examples/color/colormaps_reference.html
"""
# Check if largest value is large than last bin
max_real_value = float(input_df[field_name].max())
min_real_value = float(input_df[field_name].min())
if max_real_value > 0 and min_real_value < 0:
min_max_plot = True
else:
min_max_plot = False
if not min_max_plot:
# If only minus values
if max_real_value < 0: #only min values
if min_real_value > bin_values[0]:
# add "higher as bin"
bin_values.insert(0, min_real_value)
elif bin_values[0] < min_real_value:
crit_append_val = False
raise Exception("The minimum user defined bin smaller is larger than minimum existing value")
if not cmap_sequential:
cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap='Purples') #'YlOrBr'
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values[:1], cmap=cmap_sequential) #'YlOrBr'
else: #only positive values
if max_real_value > bin_values[-1]:
# add "higher as bin"
bin_values.append(max_real_value)
elif bin_values[-1] > max_real_value:
raise Exception("The maximum user defined bin value is larger than maximum min: min: {} max: {}".format(bin_values[-1], max_real_value))
if not cmap_sequential:
cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap='Purples')
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values[1:], cmap=cmap_sequential)
# e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin
input_df['bin_color'] = pd.cut(
input_df[field_name],
bin_values,
include_lowest=True,
right=True,
labels=cmap_rgb_colors)
color_zero = 'grey' # default
else:
if max_real_value < bin_values[-1]:
raise Exception("The maximum user defined bin value is larger than maximum value {} {}".format(bin_values[-1], max_real_value))
elif min_real_value > bin_values[0]:
raise Exception("The minimum user defined bin smaller is larger than minimum existing value")
else:
pass
# Add minimum and maximum value
bin_values.append(max_real_value)
bin_values.insert(0, min_real_value)
if not cmap_diverging:
cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap='coolwarm')
else:
cmap, cmap_rgb_colors = norm_cmap(bin_values, cmap=cmap_diverging)
# Reclassify zero value
positive_bin_colors = []
minus_bin_colors = []
minus_bins = []
positive_bins = [0]
for cnt, i in enumerate(bin_values):
if i < 0:
minus_bin_colors.append(cmap_rgb_colors[cnt])
minus_bins.append(i)
elif i == 0:
color_zero = cmap_rgb_colors[cnt]
else:
positive_bin_colors.append(cmap_rgb_colors[cnt])
positive_bins.append(i)
minus_bins.append(0)
# ----
# Classify
# ----
# Classify values in dataframe and assign color value as "bin" column
minus_dataframe = input_df[field_name][input_df[field_name] < 0].to_frame()
zero_dataframe = input_df[field_name][input_df[field_name] == 0].to_frame()
plus_dataframe = input_df[field_name][input_df[field_name] > 0].to_frame()
# e.g. [0, 3, 6] --> generates (0, 3], and (3, 6] bin
minus_dataframe['bin_color'] = pd.cut(
minus_dataframe[field_name],
minus_bins,
include_lowest=True,
right=True,
labels=minus_bin_colors)
zero_dataframe['bin_color'] = [color_zero for _ in range(len(zero_dataframe))] #create list with zero color
plus_dataframe['bin_color'] = pd.cut(
plus_dataframe[field_name],
positive_bins,
include_lowest=True,
right=True,
labels=positive_bin_colors)
# Add bins
input_df = minus_dataframe.append(zero_dataframe)
input_df = input_df.append(plus_dataframe)
return input_df, cmap_rgb_colors, color_zero, min_real_value, max_real_value
'''ax = input_df.plot()
# Calculate color values
#uk_gdf[uk_gdf['name'] == 'E06000024'].plot(ax=ax, facecolor='green', edgecolor='black')
#uk_gdf[uk_gdf['diff_av_max'] < 0.01].plot(ax=ax, facecolor='blue', edgecolor='black')
# Convert dict to dataframe
#df = pd.DataFrame.from_dict(input_df, orient='index')
#df['Coordinates'] = list(zip(df.longitude, df.latitude))
#df['Coordinates'] = df['Coordinates'].apply(Point)
# Load uk shapefile
uk_shapefile = gpd.read_file(path_shapefile)
# Assign correct projection
crs = {'init': 'epsg:27700'} #27700 == OSGB_1936_British_National_Grid
uk_gdf = gpd.GeoDataFrame(uk_shapefile, crs=crs)
# Transform
uk_gdf = uk_gdf.to_crs({'init' :'epsg:4326'})
# Plot
ax = uk_gdf.plot(color='white', edgecolor='black')
# print coordinates
#world.plot(column='gdp_per_cap', cmap='OrRd', scheme='quantiles');
plt.savefig(fig_path)'''
| mit |
xiaoxiamii/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 128 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
rbalda/neural_ocr | env/lib/python2.7/site-packages/numpy/fft/fftpack.py | 72 | 45497 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
take, sqrt)
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache=_fft_cache):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def _unitary(norm):
if norm not in (None, "ortho"):
raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
% norm)
return norm is not None
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
a = asarray(a).astype(complex)
if n is None:
n = a.shape[axis]
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
if _unitary(norm):
output *= 1 / sqrt(n)
return output
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
_real_fft_cache)
if _unitary(norm):
output *= 1 / sqrt(a.shape[axis])
return output
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache)
return output * (1 / (sqrt(n) if unitary else n))
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
output = conjugate(rfft(a, n, axis))
return output * (1 / (sqrt(n) if unitary else n))
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii], norm=norm)
return a
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii], norm)
return a
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes, norm)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
# The copy may be required for multithreading.
a = array(a, copy=True, dtype=complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
a = irfft(a, s[-1], axes[-1], norm)
return a
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
.. versionadded:: 1.10.0
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes, norm)
| mit |
printedheart/h2o-3 | h2o-py/h2o/model/binomial.py | 5 | 24203 | """
Binomial Models
"""
from metrics_base import *
class H2OBinomialModel(ModelBase):
"""
Class for Binomial models.
"""
def __init__(self, dest_key, model_json):
"""
Create a new binomial model.
"""
super(H2OBinomialModel, self).__init__(dest_key, model_json,H2OBinomialModelMetrics)
def F1(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F1 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F1 value for the training data.
:param valid: If valid is True, then return the F1 value for the validation data.
:param xval: If xval is True, then return the F1 value for the cross validation data.
:return: The F1 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f1", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def F2(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F2 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F2 value for the training data.
:param valid: If valid is True, then return the F2 value for the validation data.
:param xval: If xval is True, then return the F2 value for the cross validation data.
:return: The F2 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f2", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def F0point5(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the F0.5 for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the F0point5 value for the training data.
:param valid: If valid is True, then return the F0point5 value for the validation data.
:param xval: If xval is True, then return the F0point5 value for the cross validation data.
:return: The F0point5 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("f0point5", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def accuracy(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the accuracy for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the accuracy value for the training data.
:param valid: If valid is True, then return the accuracy value for the validation data.
:param xval: If xval is True, then return the accuracy value for the cross validation data.
:return: The accuracy for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("accuracy", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def error(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the error for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the error value for the training data.
:param valid: If valid is True, then return the error value for the validation data.
:param xval: If xval is True, then return the error value for the cross validation data.
:return: The error for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else [[acc[0],1-acc[1]] for acc in v.metric("accuracy", thresholds=thresholds)]
return m.values()[0] if len(m) == 1 else m
def precision(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the precision for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the precision value for the training data.
:param valid: If valid is True, then return the precision value for the validation data.
:param xval: If xval is True, then return the precision value for the cross validation data.
:return: The precision for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("precision", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def tpr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the True Positive Rate for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the tpr value for the training data.
:param valid: If valid is True, then return the tpr value for the validation data.
:param xval: If xval is True, then return the tpr value for the cross validation data.
:return: The tpr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def tnr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the True Negative Rate for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the tnr value for the training data.
:param valid: If valid is True, then return the tnr value for the validation data.
:param xval: If xval is True, then return the tnr value for the cross validation data.
:return: The F1 for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fnr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the False Negative Rates for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fnr value for the training data.
:param valid: If valid is True, then return the fnr value for the validation data.
:param xval: If xval is True, then return the fnr value for the cross validation data.
:return: The fnr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fpr(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the False Positive Rates for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fpr value for the training data.
:param valid: If valid is True, then return the fpr value for the validation data.
:param xval: If xval is True, then return the fpr value for the cross validation data.
:return: The fpr for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def recall(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the Recall (AKA True Positive Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the recall value for the training data.
:param valid: If valid is True, then return the recall value for the validation data.
:param xval: If xval is True, then return the recall value for the cross validation data.
:return: The recall for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def sensitivity(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the sensitivity (AKA True Positive Rate or Recall) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the sensitivity value for the training data.
:param valid: If valid is True, then return the sensitivity value for the validation data.
:param xval: If xval is True, then return the sensitivity value for the cross validation data.
:return: The sensitivity for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def fallout(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the Fallout (AKA False Positive Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the fallout value for the training data.
:param valid: If valid is True, then return the fallout value for the validation data.
:param xval: If xval is True, then return the fallout value for the cross validation data.
:return: The fallout for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fpr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def missrate(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the miss rate (AKA False Negative Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the missrate value for the training data.
:param valid: If valid is True, then return the missrate value for the validation data.
:param xval: If xval is True, then return the missrate value for the cross validation data.
:return: The missrate for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("fnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def specificity(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the specificity (AKA True Negative Rate) for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the specificity value for the training data.
:param valid: If valid is True, then return the specificity value for the validation data.
:param xval: If xval is True, then return the specificity value for the cross validation data.
:return: The specificity for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("tnr", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def mcc(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the mcc for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the mcc value for the training data.
:param valid: If valid is True, then return the mcc value for the validation data.
:param xval: If xval is True, then return the mcc value for the cross validation data.
:return: The mcc for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric("absolute_MCC", thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def max_per_class_error(self, thresholds=None, train=False, valid=False, xval=False):
"""
Get the max per class error for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the max_per_class_error value for the training data.
:param valid: If valid is True, then return the max_per_class_error value for the validation data.
:param xval: If xval is True, then return the max_per_class_error value for the cross validation data.
:return: The max_per_class_error for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else [[mpca[0],1-mpca[1]] for mpca in v.metric("min_per_class_accuracy", thresholds=thresholds)]
return m.values()[0] if len(m) == 1 else m
def metric(self, metric, thresholds=None, train=False, valid=False, xval=False):
"""
Get the metric value for a set of thresholds.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the metrics for the training data.
:param valid: If valid is True, then return the metrics for the validation data.
:param xval: If xval is True, then return the metrics for the cross validation data.
:return: The metrics for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.metric(metric,thresholds)
return m.values()[0] if len(m) == 1 else m
def plot(self, type="roc", train=False, valid=False, xval=False, **kwargs):
"""
Produce the desired metric plot
If all are False (default), then return the training metric value.
:param type: the type of metric plot (currently, only ROC supported)
:param train: If train is True, then plot for training data.
:param valid: If valid is True, then plot for validation data.
:param xval: If xval is True, then plot for cross validation data.
:param show: if False, the plot is not shown. matplotlib show method is blocking.
:return: None
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
for k,v in zip(tm.keys(),tm.values()):
if v is not None: v.plot(type=type, **kwargs)
def roc(self, train=False, valid=False, xval=False):
"""
Return the coordinates of the ROC curve for a given set of data,
as a two-tuple containing the false positive rates as a list and true positive
rates as a list.
If all are False (default), then return is the training data.
If more than one ROC curve is requested, the data is returned as a dictionary
of two-tuples.
:param train: If train is true, then return the ROC coordinates for the training data.
:param valid: If valid is true, then return the ROC coordinates for the validation data.
:param xval: If xval is true, then return the ROC coordinates for the cross validation data.
:return rocs_cooridinates: the true cooridinates of the roc curve.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()):
if v is not None:
m[k] = (v.fprs, v.tprs)
return m.values()[0] if len(m) == 1 else m
def confusion_matrix(self, metrics=None, thresholds=None, train=False, valid=False, xval=False):
"""
Get the confusion matrix for the specified metrics/thresholds
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param metrics: A string (or list of strings) in {"min_per_class_accuracy", "absolute_MCC", "tnr", "fnr", "fpr", "tpr", "precision", "accuracy", "f0point5", "f2", "f1"}
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the thresholds in this set of metrics will be used.
:param train: If train is True, then return the confusion matrix value for the training data.
:param valid: If valid is True, then return the confusion matrix value for the validation data.
:param xval: If xval is True, then return the confusion matrix value for the cross validation data.
:return: The confusion matrix for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.confusion_matrix(metrics=metrics, thresholds=thresholds)
return m.values()[0] if len(m) == 1 else m
def find_threshold_by_max_metric(self,metric,train=False, valid=False, xval=False):
"""
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the threshold_by_max_metric value for the training data.
:param valid: If valid is True, then return the threshold_by_max_metric value for the validation data.
:param xval: If xval is True, then return the threshold_by_max_metric value for the cross validation data.
:return: The threshold_by_max_metric for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.find_threshold_by_max_metric(metric)
return m.values()[0] if len(m) == 1 else m
def find_idx_by_threshold(self,threshold,train=False, valid=False, xval=False):
"""
Retrieve the index in this metric's threshold list at which the given threshold is located.
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the idx_by_threshold for the training data.
:param valid: If valid is True, then return the idx_by_threshold for the validation data.
:param xval: If xval is True, then return the idx_by_threshold for the cross validation data.
:return: The idx_by_threshold for this binomial model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(tm.keys(),tm.values()): m[k] = None if v is None else v.find_idx_by_threshold(threshold)
return m.values()[0] if len(m) == 1 else m
| apache-2.0 |
wdurhamh/statsmodels | statsmodels/sandbox/examples/ex_cusum.py | 33 | 3219 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 02 11:41:25 2010
Author: josef-pktd
"""
import numpy as np
from scipy import stats
from numpy.testing import assert_almost_equal
import statsmodels.api as sm
from statsmodels.sandbox.regression.onewaygls import OneWayLS
from statsmodels.stats.diagnostic import recursive_olsresiduals
from statsmodels.sandbox.stats.diagnostic import _recursive_olsresiduals2 as recursive_olsresiduals2
#examples from ex_onewaygls.py
#choose example
#--------------
example = ['null', 'smalldiff', 'mediumdiff', 'largediff'][1]
example_size = [20, 100][1]
example_groups = ['2', '2-2'][1]
#'2-2': 4 groups,
# groups 0 and 1 and groups 2 and 3 have identical parameters in DGP
#generate example
#----------------
#np.random.seed(87654589)
nobs = example_size
x1 = 0.1+np.random.randn(nobs)
y1 = 10 + 15*x1 + 2*np.random.randn(nobs)
x1 = sm.add_constant(x1, prepend=False)
#assert_almost_equal(x1, np.vander(x1[:,0],2), 16)
#res1 = sm.OLS(y1, x1).fit()
#print res1.params
#print np.polyfit(x1[:,0], y1, 1)
#assert_almost_equal(res1.params, np.polyfit(x1[:,0], y1, 1), 14)
#print res1.summary(xname=['x1','const1'])
#regression 2
x2 = 0.1+np.random.randn(nobs)
if example == 'null':
y2 = 10 + 15*x2 + 2*np.random.randn(nobs) # if H0 is true
elif example == 'smalldiff':
y2 = 11 + 16*x2 + 2*np.random.randn(nobs)
elif example == 'mediumdiff':
y2 = 12 + 16*x2 + 2*np.random.randn(nobs)
else:
y2 = 19 + 17*x2 + 2*np.random.randn(nobs)
x2 = sm.add_constant(x2, prepend=False)
# stack
x = np.concatenate((x1,x2),0)
y = np.concatenate((y1,y2))
if example_groups == '2':
groupind = (np.arange(2*nobs)>nobs-1).astype(int)
else:
groupind = np.mod(np.arange(2*nobs),4)
groupind.sort()
#x = np.column_stack((x,x*groupind[:,None]))
res1 = sm.OLS(y, x).fit()
skip = 8
rresid, rparams, rypred, rresid_standardized, rresid_scaled, rcusum, rcusumci = \
recursive_olsresiduals(res1, skip)
print(rcusum)
print(rresid_scaled[skip-1:])
assert_almost_equal(rparams[-1], res1.params)
import matplotlib.pyplot as plt
plt.plot(rcusum)
plt.plot(rcusumci[0])
plt.plot(rcusumci[1])
plt.figure()
plt.plot(rresid)
plt.plot(np.abs(rresid))
print('cusum test reject:')
print(((rcusum[1:]>rcusumci[1])|(rcusum[1:]<rcusumci[0])).any())
rresid2, rparams2, rypred2, rresid_standardized2, rresid_scaled2, rcusum2, rcusumci2 = \
recursive_olsresiduals2(res1, skip)
#assert_almost_equal(rparams[skip+1:], rparams2[skip:-1],13)
assert_almost_equal(rparams[skip:], rparams2[skip:],13)
#np.c_[rparams[skip+1:], rparams2[skip:-1]]
#plt.show()
#################### Example break test
#import statsmodels.sandbox.tools.stattools
from statsmodels.sandbox.stats.diagnostic import breaks_hansen, \
breaks_cusumolsresid#, breaks_cusum
H, crit95, ft, s = breaks_hansen(res1)
print(H)
print(crit95)
supb, pval, crit = breaks_cusumolsresid(res1.resid)
print(supb, pval, crit)
##check whether this works directly: Ploberger/Kramer framing of standard cusum
##no, it's different, there is another denominator
#print breaks_cusumolsresid(rresid[skip:])
#this function is still completely wrong, cut and paste doesn't apply
#print breaks_cusum(rresid[skip:])
| bsd-3-clause |
Alecardv/College-projects | Metodos Numericos 2012/trapecio.py | 1 | 1308 | import function
from matplotlib.pyplot import *
from pylab import *
import numpy as np
import math
class Trapecio:
def __init__(self, fun, xi, xf):
self.fun = function.Function(fun,'x')
self.a,self.b = xi,xf
self.fig, self.ax = subplots()
def relativeError(self):
f = self.fun.getDerivate()
Ea = ((self.b-self.a)**3/12)*((f.evalFunction(self.b) - f.evalFunction(self.a))/(self.b-self.a))
return Ea
def graph(self):
figure()
root = self.method()
print 'AreaAprox = ',root
print 'AreaReal = ',self.fun.getAndEvalIntegral([self.a,self.b])
print 'Error = ',self.relativeError()
Ox = np.arange(self.a-5,self.b+5, 0.02)
Oy = []
for i in Ox:
Oy.append( self.fun.evalFunction(i) )
self.ax.plot(Ox, Oy, color = "blue",lw = 1,label="f(x)")
self.ax.legend(loc=2)
show()
def px(self,x):
return (self.fun.evalFunction(self.b)-self.fun.evalFunction(self.a))/(self.b-self.a)*(x-self.a) + self.fun.evalFunction(self.a)
def method(self):
I = (self.b-self.a)*((self.fun.evalFunction(self.a) + self.fun.evalFunction(self.b))/2)
self.ax.vlines(self.a,0,self.fun.evalFunction(self.a))
self.ax.vlines(self.b,0,self.fun.evalFunction(self.b))
Ox = np.arange(self.a,self.b, 0.02)
Oy = []
for i in Ox:
Oy.append(self.px(i))
self.ax.plot(Ox, Oy,lw = 2)
return I
| gpl-3.0 |
voxlol/scikit-learn | examples/cluster/plot_dict_face_patches.py | 337 | 2747 | """
Online learning of a dictionary of parts of faces
==================================================
This example uses a large dataset of faces to learn a set of 20 x 20
images patches that constitute faces.
From the programming standpoint, it is interesting because it shows how
to use the online API of the scikit-learn to process a very large
dataset by chunks. The way we proceed is that we load an image at a time
and extract randomly 50 patches from this image. Once we have accumulated
500 of these patches (using 10 images), we run the `partial_fit` method
of the online KMeans object, MiniBatchKMeans.
The verbose setting on the MiniBatchKMeans enables us to see that some
clusters are reassigned during the successive calls to
partial-fit. This is because the number of patches that they represent
has become too low, and it is better to choose a random new
cluster.
"""
print(__doc__)
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.cluster import MiniBatchKMeans
from sklearn.feature_extraction.image import extract_patches_2d
faces = datasets.fetch_olivetti_faces()
###############################################################################
# Learn the dictionary of images
print('Learning the dictionary... ')
rng = np.random.RandomState(0)
kmeans = MiniBatchKMeans(n_clusters=81, random_state=rng, verbose=True)
patch_size = (20, 20)
buffer = []
index = 1
t0 = time.time()
# The online learning part: cycle over the whole dataset 6 times
index = 0
for _ in range(6):
for img in faces.images:
data = extract_patches_2d(img, patch_size, max_patches=50,
random_state=rng)
data = np.reshape(data, (len(data), -1))
buffer.append(data)
index += 1
if index % 10 == 0:
data = np.concatenate(buffer, axis=0)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
kmeans.partial_fit(data)
buffer = []
if index % 100 == 0:
print('Partial fit of %4i out of %i'
% (index, 6 * len(faces.images)))
dt = time.time() - t0
print('done in %.2fs.' % dt)
###############################################################################
# Plot the results
plt.figure(figsize=(4.2, 4))
for i, patch in enumerate(kmeans.cluster_centers_):
plt.subplot(9, 9, i + 1)
plt.imshow(patch.reshape(patch_size), cmap=plt.cm.gray,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Patches of faces\nTrain time %.1fs on %d patches' %
(dt, 8 * len(faces.images)), fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
xyguo/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
henrykironde/scikit-learn | examples/manifold/plot_compare_methods.py | 259 | 4031 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = plt.figure(figsize=(15, 8))
plt.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(251, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=plt.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(251, projection='3d')
plt.scatter(X[:, 0], X[:, 2], c=color, cmap=plt.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(252 + i)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(257)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(258)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(259)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
t0 = time()
tsne = manifold.TSNE(n_components=n_components, init='pca', random_state=0)
Y = tsne.fit_transform(X)
t1 = time()
print("t-SNE: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(250)
plt.scatter(Y[:, 0], Y[:, 1], c=color, cmap=plt.cm.Spectral)
plt.title("t-SNE (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
plt.axis('tight')
plt.show()
| bsd-3-clause |
adamrp/qiime | scripts/categorized_dist_scatterplot.py | 15 | 6299 | #!/usr/bin/env python
# File created on 19 Jan 2011
from __future__ import division
__author__ = "Justin Kuczynski"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Justin Kuczynski"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "Justin Kuczynski"
__email__ = "justinak@gmail.com"
from qiime.util import make_option
import os
import warnings
warnings.filterwarnings('ignore', 'Not using MPI as mpi4py not found')
from qiime.parse import parse_distmat_to_dict, parse_mapping_file,\
mapping_file_to_dict
from qiime.util import parse_command_line_parameters
import numpy
import matplotlib.pyplot as plt
from qiime.categorized_dist_scatterplot import get_avg_dists, get_sam_ids
script_info = {}
script_info[
'brief_description'] = "Create a categorized distance scatterplot representing average distances between samples, broken down by categories"
script_info[
'script_description'] = "Create a figure representing average distances between samples, broken down by categories. I call it a 'categorized distance scatterplot'. See script usage for more details. The mapping file specifies the relevant data - if you have e.g. 'N/A' values or samples you don't want included, first use filter_samples_from_otu_table.py to remove unwanted samples from the mapping file, and thus the analysis. Note that the resulting plot will include only samples in both the mapping file AND the distance matrix."
script_info['script_usage'] = [(
"Canonical Example:",
"Split samples by country. Within each country compare each child to all adults. Plot the average distance from that child to all adults, vs. the age of that child",
"python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -s AgeCategory:Adult -a AgeYears -o fig1.png"),
("Example 2:",
"Same as above, but compares Child with all other categories (e.g.: NA, Infant, etc.)",
"python categorized_dist_scatterplot.py -m map.txt -d unifrac_distance.txt -c Country -p AgeCategory:Child -a AgeYears -o fig1.svg")]
script_info[
'output_description'] = "a figure and the text dat for that figure "
script_info['required_options'] = [
make_option('-m', '--map', type='existing_filepath',
help='mapping file'),
make_option('-d', '--distance_matrix', type='existing_filepath',
help='distance matrix'),
make_option('-p', '--primary_state', type='string',
help="Samples matching this state will be plotted. E.g.: AgeCategory:Child . See qiime's filter_samples_from_otu_table.py for more syntax options"),
make_option('-a', '--axis_category', type='string',
help='this will form the horizontal axis of the figure, e.g.: AgeYears . Must be numbers'),
make_option('-o', '--output_path', type='new_dirpath',
help='output figure, filename extention determines format. E.g.: "fig1.png" or similar. A "fig1.txt" or similar will also be created with the data underlying the figure'),
]
script_info['optional_options'] = [
make_option('-c', '--colorby', type='string',
help='samples will first be separated by this column of the mapping file. They will be colored by this column of the mapping file, and all comparisons will be done only among samples with the same value in this column. e.g.: Country. You may omit -c, and the samples will not be separated'),
make_option('-s', '--secondary_state', type='string',
help='all samples matching the primary state will be compared to samples matcthing this secondary state. E.g.: AgeCategory:Adult'),
]
script_info['version'] = __version__
def main():
option_parser, opts, args =\
parse_command_line_parameters(**script_info)
map_data, map_header, map_comments = parse_mapping_file(
open(opts.map, 'U'))
map_dict = mapping_file_to_dict(map_data, map_header)
distdict = parse_distmat_to_dict(open(opts.distance_matrix, 'U'))
if opts.colorby is None:
colorby_cats = [None]
else:
colorby_idx = map_header.index(opts.colorby)
colorby_cats = list(set([map_data[i][colorby_idx] for
i in range(len(map_data))]))
textfilename = os.path.splitext(opts.output_path)[0] + '.txt'
text_fh = open(textfilename, 'w')
text_fh.write(opts.axis_category + '\tdistance\tSampleID' + '\n')
colorby_cats.sort()
plt.figure()
for cat_num, cat in enumerate(colorby_cats):
# collect the primary and secondary samples within this category
state1_samids, state2_samids = get_sam_ids(map_data, map_header,
opts.colorby, cat, opts.primary_state, opts.secondary_state)
state1_samids =\
list(set(state1_samids).intersection(set(distdict.keys())))
state2_samids =\
list(set(state2_samids).intersection(set(distdict.keys())))
if state1_samids == [] or state2_samids == [] or \
(len(state1_samids) == 1 and state1_samids == state2_samids):
raise RuntimeError("one category of samples didn't have any valid" +
" distances. try eliminating samples from -p or -s, or changing" +
" your mapping file with filter_samples_from_otu_table.py")
# go through dmtx
state1_avg_dists = get_avg_dists(
state1_samids,
state2_samids,
distdict)
# plot
xvals = [float(map_dict[sam][opts.axis_category]) for
sam in state1_samids]
try:
color = plt.cm.jet(cat_num / (len(colorby_cats) - 1))
except ZeroDivisionError: # only one cat
color = 'b'
plt.scatter(xvals, state1_avg_dists, edgecolors=color, alpha=.5,
facecolors='none')
plt.xlabel(opts.axis_category)
plt.ylabel('average distance')
lines = [str(xvals[i]) + '\t' + str(state1_avg_dists[i]) +
'\t' + state1_samids[i] + '\n' for i in range(len(xvals))]
text_fh.writelines(lines)
if opts.colorby is not None:
plt.legend(colorby_cats)
plt.savefig(opts.output_path)
if __name__ == "__main__":
main()
| gpl-2.0 |
bsipocz/scikit-image | doc/examples/plot_label.py | 23 | 1557 | """
===================
Label image regions
===================
This example shows how to segment an image with image labelling. The following
steps are applied:
1. Thresholding with automatic Otsu method
2. Close small holes with binary closing
3. Remove artifacts touching image border
4. Measure image regions to filter small objects
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from skimage import data
from skimage.filters import threshold_otsu
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square
from skimage.measure import regionprops
from skimage.color import label2rgb
image = data.coins()[50:-50, 50:-50]
# apply threshold
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))
# remove artifacts connected to image border
cleared = bw.copy()
clear_border(cleared)
# label image regions
label_image = label(cleared)
borders = np.logical_xor(bw, cleared)
label_image[borders] = -1
image_label_overlay = label2rgb(label_image, image=image)
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(image_label_overlay)
for region in regionprops(label_image):
# skip small images
if region.area < 100:
continue
# draw rectangle around segmented coins
minr, minc, maxr, maxc = region.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
plt.show()
| bsd-3-clause |
AnasGhrab/scikit-learn | examples/plot_digits_pipe.py | 250 | 1809 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Pipelining: chaining a PCA and a logistic regression
=========================================================
The PCA does an unsupervised dimensionality reduction, while the logistic
regression does the prediction.
We use a GridSearchCV to set the dimensionality of the PCA
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, decomposition, datasets
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
logistic = linear_model.LogisticRegression()
pca = decomposition.PCA()
pipe = Pipeline(steps=[('pca', pca), ('logistic', logistic)])
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
###############################################################################
# Plot the PCA spectrum
pca.fit(X_digits)
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
###############################################################################
# Prediction
n_components = [20, 40, 64]
Cs = np.logspace(-4, 4, 3)
#Parameters of pipelines can be set using ‘__’ separated parameter names:
estimator = GridSearchCV(pipe,
dict(pca__n_components=n_components,
logistic__C=Cs))
estimator.fit(X_digits, y_digits)
plt.axvline(estimator.best_estimator_.named_steps['pca'].n_components,
linestyle=':', label='n_components chosen')
plt.legend(prop=dict(size=12))
plt.show()
| bsd-3-clause |
ml-lab/pylearn2 | pylearn2/models/tests/test_s3c_inference.py | 4 | 14275 | from pylearn2.models.s3c import S3C
from pylearn2.models.s3c import E_Step_Scan
from pylearn2.models.s3c import Grad_M_Step
from pylearn2.models.s3c import E_Step
from theano import function
import numpy as np
import theano.tensor as T
from theano import config
#from pylearn2.utils import serial
import warnings
def broadcast(mat, shape_0):
rval = mat
if mat.shape[0] != shape_0:
assert mat.shape[0] == 1
rval = np.zeros((shape_0, mat.shape[1]),dtype=mat.dtype)
for i in xrange(shape_0):
rval[i,:] = mat[0,:]
return rval
class Test_S3C_Inference:
def setUp(self):
# Temporarily change config.floatX to float64, as s3c inference
# tests currently fail due to numerical issues for float32.
self.prev_floatX = config.floatX
config.floatX = 'float64'
def tearDown(self):
# Restore previous value of floatX
config.floatX = self.prev_floatX
def __init__(self):
""" gets a small batch of data
sets up an S3C model
"""
# We also have to change the value of config.floatX in __init__.
self.prev_floatX = config.floatX
config.floatX = 'float64'
try:
self.tol = 1e-5
#dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_patches/data.pkl')
#X = dataset.get_batch_design(1000)
#X = X[:,0:5]
X = np.random.RandomState([1,2,3]).randn(1000,5)
X -= X.mean()
X /= X.std()
m, D = X.shape
N = 5
#don't give the model an e_step or learning rate so it won't spend years compiling a learn_func
self.model = S3C(nvis = D,
nhid = N,
irange = .1,
init_bias_hid = 0.,
init_B = 3.,
min_B = 1e-8,
max_B = 1000.,
init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
init_mu = 1., e_step = None,
m_step = Grad_M_Step(),
min_bias_hid = -1e30, max_bias_hid = 1e30,
)
self.model.make_pseudoparams()
self.h_new_coeff_schedule = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1. ]
self.e_step = E_Step_Scan(h_new_coeff_schedule = self.h_new_coeff_schedule)
self.e_step.register_model(self.model)
self.X = X
self.N = N
self.m = m
finally:
config.floatX = self.prev_floatX
def test_match_unrolled(self):
""" tests that inference with scan matches result using unrolled loops """
unrolled_e_step = E_Step(h_new_coeff_schedule = self.h_new_coeff_schedule)
unrolled_e_step.register_model(self.model)
V = T.matrix()
scan_result = self.e_step.infer(V)
unrolled_result = unrolled_e_step.infer(V)
outputs = []
for key in scan_result:
outputs.append(scan_result[key])
outputs.append(unrolled_result[key])
f = function([V], outputs)
outputs = f(self.X)
assert len(outputs) % 2 == 0
for i in xrange(0,len(outputs),2):
assert np.allclose(outputs[i],outputs[i+1])
def test_grad_s(self):
"tests that the gradients with respect to s_i are 0 after doing a mean field update of s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
model.test_batch_size = X.shape[0]
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
grad_Mu1 = T.grad(trunc_kl.sum(), Mu1_var)
grad_Mu1_idx = grad_Mu1[:,idx]
grad_func = function([H_var, Mu1_var, idx], grad_Mu1_idx)
for i in xrange(self.N):
Mu1[:,i] = s_i_func(H, Mu1, i)
g = grad_func(H,Mu1,i)
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
raise Exception('after mean field step, gradient of kl divergence wrt mean field parameter should be 0, but here the max magnitude of a gradient element is '+str(g_abs_max)+' after updating s_'+str(i))
def test_value_s(self):
"tests that the value of the kl divergence decreases with each update to s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat( V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
Mu1[:,i] = s_i_func(H, Mu1, i)
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
mx = increase.max()
if mx > 1e-3:
raise Exception('after mean field step in s, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating s_'+str(i))
def test_grad_h(self):
"tests that the gradients with respect to h_i are 0 after doing a mean field update of h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
new_H = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = new_H[:,idx]
updates_func = function([H_var,Mu1_var,idx], h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0,
var_s1_hat = Sigma1)
grad_H = T.grad(trunc_kl.sum(), H_var)
assert len(grad_H.type.broadcastable) == 2
#from theano.printing import min_informative_str
#print min_informative_str(grad_H)
#grad_H = Print('grad_H')(grad_H)
#grad_H_idx = grad_H[:,idx]
grad_func = function([H_var, Mu1_var], grad_H)
failed = False
for i in xrange(self.N):
rval = updates_func(H, Mu1, i)
H[:,i] = rval
g = grad_func(H,Mu1)[:,i]
assert not np.any(np.isnan(g))
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
#print "new values of H"
#print H[:,i]
#print "gradient on new values of H"
#print g
failed = True
print 'iteration ',i
#print 'max value of new H: ',H[:,i].max()
#print 'H for failing g: '
failing_h = H[np.abs(g) > self.tol, i]
#print failing_h
#from matplotlib import pyplot as plt
#plt.scatter(H[:,i],g)
#plt.show()
#ignore failures extremely close to h=1
high_mask = failing_h > .001
low_mask = failing_h < .999
mask = high_mask * low_mask
print 'masked failures: ',mask.shape[0],' err ',g_abs_max
if mask.sum() > 0:
print 'failing h passing the range mask'
print failing_h[ mask.astype(bool) ]
raise Exception('after mean field step, gradient of kl divergence'
' wrt freshly updated variational parameter should be 0, '
'but here the max magnitude of a gradient element is '
+str(g_abs_max)+' after updating h_'+str(i))
#assert not failed
def test_value_h(self):
"tests that the value of the kl divergence decreases with each update to h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
newH = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = newH[:,idx]
h_i_func = function([H_var,Mu1_var,idx],h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
H[:,i] = h_i_func(H, Mu1, i)
#we don't update mu, the whole point of the split e step is we don't have to
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
print 'failures after iteration ',i,': ',(increase > self.tol).sum()
mx = increase.max()
if mx > 1e-4:
print 'increase amounts of failing examples:'
print increase[increase > self.tol]
print 'failing H:'
print H[increase > self.tol,:]
print 'failing Mu1:'
print Mu1[increase > self.tol,:]
print 'failing V:'
print X[increase > self.tol,:]
raise Exception('after mean field step in h, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating h_'+str(i))
if __name__ == '__main__':
obj = Test_S3C_Inference()
#obj.test_grad_h()
#obj.test_grad_s()
#obj.test_value_s()
obj.test_value_h()
| bsd-3-clause |
miloharper/neural-network-animation | matplotlib/tests/test_colors.py | 9 | 9307 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from nose.tools import assert_raises, assert_equal
import numpy as np
from numpy.testing.utils import assert_array_equal, assert_array_almost_equal
import matplotlib.colors as mcolors
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.testing.decorators import image_comparison, cleanup
def test_colormap_endian():
"""
Github issue #1005: a bug in putmask caused erroneous
mapping of 1.0 when input from a non-native-byteorder
array.
"""
cmap = cm.get_cmap("jet")
# Test under, over, and invalid along with values 0 and 1.
a = [-0.5, 0, 0.5, 1, 1.5, np.nan]
for dt in ["f2", "f4", "f8"]:
anative = np.ma.masked_invalid(np.array(a, dtype=dt))
aforeign = anative.byteswap().newbyteorder()
#print(anative.dtype.isnative, aforeign.dtype.isnative)
assert_array_equal(cmap(anative), cmap(aforeign))
def test_BoundaryNorm():
"""
Github issue #1258: interpolation was failing with numpy
1.7 pre-release.
"""
# TODO: expand this into a more general test of BoundaryNorm.
boundaries = [0, 1.1, 2.2]
vals = [-1, 0, 2, 2.2, 4]
expected = [-1, 0, 2, 3, 3]
# ncolors != len(boundaries) - 1 triggers interpolation
ncolors = len(boundaries)
bn = mcolors.BoundaryNorm(boundaries, ncolors)
assert_array_equal(bn(vals), expected)
def test_LogNorm():
"""
LogNorm ignored clip, now it has the same
behavior as Normalize, e.g., values > vmax are bigger than 1
without clip, with clip they are 1.
"""
ln = mcolors.LogNorm(clip=True, vmax=5)
assert_array_equal(ln([1, 6]), [0, 1.0])
def test_PowerNorm():
a = np.array([0, 0.5, 1, 1.5], dtype=np.float)
pnorm = mcolors.PowerNorm(1)
norm = mcolors.Normalize()
assert_array_almost_equal(norm(a), pnorm(a))
a = np.array([-0.5, 0, 2, 4, 8], dtype=np.float)
expected = [0, 0, 1/16, 1/4, 1]
pnorm = mcolors.PowerNorm(2, vmin=0, vmax=8)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[2]), expected[2])
assert_array_almost_equal(a[1:], pnorm.inverse(pnorm(a))[1:])
# Clip = True
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=True)
assert_array_almost_equal(pnorm(a), expected)
assert_equal(pnorm(a[0]), expected[0])
assert_equal(pnorm(a[-1]), expected[-1])
# Clip = True at call time
a = np.array([-0.5, 0, 1, 8, 16], dtype=np.float)
expected = [0, 0, 0, 1, 1]
pnorm = mcolors.PowerNorm(2, vmin=2, vmax=8, clip=False)
assert_array_almost_equal(pnorm(a, clip=True), expected)
assert_equal(pnorm(a[0], clip=True), expected[0])
assert_equal(pnorm(a[-1], clip=True), expected[-1])
def test_Normalize():
norm = mcolors.Normalize()
vals = np.arange(-10, 10, 1, dtype=np.float)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
def test_SymLogNorm():
"""
Test SymLogNorm behavior
"""
norm = mcolors.SymLogNorm(3, vmax=5, linscale=1.2)
vals = np.array([-30, -1, 2, 6], dtype=np.float)
normed_vals = norm(vals)
expected = [0., 0.53980074, 0.826991, 1.02758204]
assert_array_almost_equal(normed_vals, expected)
_inverse_tester(norm, vals)
_scalar_tester(norm, vals)
_mask_tester(norm, vals)
# Ensure that specifying vmin returns the same result as above
norm = mcolors.SymLogNorm(3, vmin=-30, vmax=5, linscale=1.2)
normed_vals = norm(vals)
assert_array_almost_equal(normed_vals, expected)
def _inverse_tester(norm_instance, vals):
"""
Checks if the inverse of the given normalization is working.
"""
assert_array_almost_equal(norm_instance.inverse(norm_instance(vals)), vals)
def _scalar_tester(norm_instance, vals):
"""
Checks if scalars and arrays are handled the same way.
Tests only for float.
"""
scalar_result = [norm_instance(float(v)) for v in vals]
assert_array_almost_equal(scalar_result, norm_instance(vals))
def _mask_tester(norm_instance, vals):
"""
Checks mask handling
"""
masked_array = np.ma.array(vals)
masked_array[0] = np.ma.masked
assert_array_equal(masked_array.mask, norm_instance(masked_array).mask)
@image_comparison(baseline_images=['levels_and_colors'],
extensions=['png'])
def test_cmap_and_norm_from_levels_and_colors():
data = np.linspace(-2, 4, 49).reshape(7, 7)
levels = [-1, 2, 2.5, 3]
colors = ['red', 'green', 'blue', 'yellow', 'black']
extend = 'both'
cmap, norm = mcolors.from_levels_and_colors(levels, colors, extend=extend)
ax = plt.axes()
m = plt.pcolormesh(data, cmap=cmap, norm=norm)
plt.colorbar(m)
# Hide the axes labels (but not the colorbar ones, as they are useful)
for lab in ax.get_xticklabels() + ax.get_yticklabels():
lab.set_visible(False)
def test_cmap_and_norm_from_levels_and_colors2():
levels = [-1, 2, 2.5, 3]
colors = ['red', (0, 1, 0), 'blue', (0.5, 0.5, 0.5), (0.0, 0.0, 0.0, 1.0)]
clr = mcolors.colorConverter.to_rgba_array(colors)
bad = (0.1, 0.1, 0.1, 0.1)
no_color = (0.0, 0.0, 0.0, 0.0)
masked_value = 'masked_value'
# Define the test values which are of interest.
# Note: levels are lev[i] <= v < lev[i+1]
tests = [('both', None, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: clr[4],
3.5: clr[4],
masked_value: bad}),
('min', -1, {-2: clr[0],
-1: clr[1],
2: clr[2],
2.25: clr[2],
3: no_color,
3.5: no_color,
masked_value: bad}),
('max', -1, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: clr[3],
3.5: clr[3],
masked_value: bad}),
('neither', -2, {-2: no_color,
-1: clr[0],
2: clr[1],
2.25: clr[1],
3: no_color,
3.5: no_color,
masked_value: bad}),
]
for extend, i1, cases in tests:
cmap, norm = mcolors.from_levels_and_colors(levels, colors[0:i1],
extend=extend)
cmap.set_bad(bad)
for d_val, expected_color in cases.items():
if d_val == masked_value:
d_val = np.ma.array([1], mask=True)
else:
d_val = [d_val]
assert_array_equal(expected_color, cmap(norm(d_val))[0],
'Wih extend={0!r} and data '
'value={1!r}'.format(extend, d_val))
assert_raises(ValueError, mcolors.from_levels_and_colors, levels, colors)
def test_rgb_hsv_round_trip():
for a_shape in [(500, 500, 3), (500, 3), (1, 3), (3,)]:
np.random.seed(0)
tt = np.random.random(a_shape)
assert_array_almost_equal(tt,
mcolors.hsv_to_rgb(mcolors.rgb_to_hsv(tt)))
assert_array_almost_equal(tt,
mcolors.rgb_to_hsv(mcolors.hsv_to_rgb(tt)))
@cleanup
def test_autoscale_masked():
# Test for #2336. Previously fully masked data would trigger a ValueError.
data = np.ma.masked_all((12, 20))
plt.pcolor(data)
plt.draw()
def test_colors_no_float():
# Gray must be a string to distinguish 3-4 grays from RGB or RGBA.
def gray_from_float_rgb():
return mcolors.colorConverter.to_rgb(0.4)
def gray_from_float_rgba():
return mcolors.colorConverter.to_rgba(0.4)
assert_raises(ValueError, gray_from_float_rgb)
assert_raises(ValueError, gray_from_float_rgba)
def test_light_source_shading_color_range():
# see also
#http://matplotlib.org/examples/pylab_examples/shading_example.html
from matplotlib.colors import LightSource
from matplotlib.colors import Normalize
refinput = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
norm = Normalize(vmin=0, vmax=50)
ls = LightSource(azdeg=0, altdeg=65)
testoutput = ls.shade(refinput, plt.cm.jet, norm=norm)
refoutput = np.array([
[[0., 0., 0.58912656, 1.],
[0., 0., 0.67825312, 1.],
[0., 0., 0.76737968, 1.],
[0., 0., 0.85650624, 1.]],
[[0., 0., 0.9456328, 1.],
[0., 0., 1., 1.],
[0., 0.04901961, 1., 1.],
[0., 0.12745098, 1., 1.]],
[[0., 0.22156863, 1., 1.],
[0., 0.3, 1., 1.],
[0., 0.37843137, 1., 1.],
[0., 0.45686275, 1., 1.]]
])
assert_array_almost_equal(refoutput, testoutput)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
Leotrinos/agpy | agpy/showspec.py | 6 | 51670 | """
showspec is my homegrown spectrum plotter, meant to somewhat follow STARLINK's
SPLAT and have functionality similar to GAIA, but with an emphasis on producing
publication-quality plots (which, while splat may do, it does unreproducibly)
.. todo::
-add spectrum arithmetic tools
(as is, you can use numpy.interp with sp.vind and sp.spectrum pretty
easily)
-implement other fitters
-e.g., NH3 hyperfine, Voigt
-move to object-oriented pylab/pyplot implementation (for bulk / speedup work)
-allow for non-plotting fitting work (curious... I've never needed that yet)
-Equivalent Width measurement without gaussian fitting
-probably should be part of the baseline code
-write documentation other people can read
12/21/2011 - ALL of the above to do is IS DONE! It's now hosted at <http://pyspeckit.bitbucket.org>
"""
import math
import pylab
from pylab import *
for k,v in pylab.__dict__.iteritems():
if hasattr(v,'__module__'):
if v.__module__ is None:
locals()[k].__module__ = 'pylab'
import matplotlib
from agpy.mpfit import mpfit
from collapse_gaussfit import *
from ratosexagesimal import *
import pyfits
import gaussfitter
import numpy
from numpy import isnan
from mad import MAD,nanmedian
def steppify(arr,isX=False,interval=0,sign=+1.0):
"""
*support function*
Converts an array to double-length for step plotting
"""
if isX and interval==0:
interval = numpy.abs(arr[1]-arr[0]) / 2.0
newarr = pylab.array(zip(arr-sign*interval,arr+sign*interval)).ravel()
return newarr
class SpecPlotter:
"""
SpecPlotter class. Takes in a spectrum or data cube, plotting properties,
and a velocity axis determination function. Look at splat_1d for a wrapper
that might actually be useful.
Whew, needs more documentation
"""
def __init__(self, cube, axis=None, xtol=None, ytol=None, vconv=lambda
x: x, xtora=lambda x: x, ytodec=lambda x: x, specname=None,
dv=None, color='k', hdr=None, errspec=None, maskspec=None,
fig=None, fignum=1, clear=True, title=None, xunits='km/s',
erralpha=0.2, ivconv=None, autorefresh=True, reffreq=None,
gaiafignum=0, gaiafig=None, clickid=None, **kwargs ):
self.vconv = vconv
self.xtora = xtora
self.ytodec = ytodec
self.cube = cube # where(numpy.isnan(cube),0,cube)
if len(self.cube.shape) > 1:
self.spectrum = cube[:,0,0] # spectrum is what's plotted; cube is the "raw data"
else:
self.spectrum = cube # spectrum is what's plotted; cube is the "raw data"
self.specname=specname
self.dv=dv
self.reffreq=reffreq
self.scale=1.0
self.units='K'
self.xunits=xunits
self.voff=0.0
self.offset=0.0
self.continuum=0.0
self.errspec = errspec
self.erralpha=erralpha
self.plotcolor=color
self.specfit = Specfit(self)
self.fitspec = self.specfit
self.baseline = Baseline(self)
#self.fft = FFT(self)
#self.psd = self.fft.psd
self.vmin=None
self.vmax=None
self.title=title
self.ivconv=ivconv
self.autorefresh=autorefresh
self.spectrumplot=None
self.errorplot=None
self.gaiafignum = gaiafignum
self.gaiafig = gaiafig
self.clickid = clickid
self.plotkwargs = kwargs
if maskspec is not None:
self.maskspec = maskspec
else:
self.maskspec = zeros(self.cube.shape)
self.linecollections =[]
self.texts =[]
if hdr: self.header = hdr
# figure out where to put the plot
if fig is None and axis is None:
fig=figure(fignum)
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is None:
self.axis = pylab.gca()
elif fig is not None and axis is None:
if clear: fig.clf()
self.axis = fig.gca()
elif fig is None and axis is not None:
self.axis = axis
else: # if figure and axis are both set, just use axis
self.axis = axis
if clear: self.axis.clear()
def __call__(self, event):
"""
Connects map cube to specplotter...
"""
if event.inaxes:
clickX = event.xdata
clickY = event.ydata
tb = get_current_fig_manager().toolbar
#if ((self.axis is None) or (self.axis==event.inaxes)) and tb.mode=='':
if event.button==1 and tb.mode=='':
print "OverPlotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True)
elif event.button==2:
print "Plotting spectrum from point %i,%i" % (clickX,clickY)
self.plotspec(clickY,clickX,button=event.button,cube=True,clear=True)
elif event.button==3:
print "Disconnecting GAIA-like tool"
self.gaiafig.canvas.mpl_disconnect(self.clickid)
else:
print "Call failed for some reason: "
print "event: ",event
def plotspec(self, i=0, j=0, cube=False, title=None,
clear=False, color=None, continuum=None,
axis=None, offset=None, scale=None, voff=None, vmin=None,
vmax=None, units=None, xunits=None, erralpha=None, plotpix=False,
errstyle='fill', autorefresh=None, button=None, **kwargs):
"""
Plot a spectrum
Originally written to plot spectra from data cubes, hence the i,j parameter
to specify the location in the cube
Now, cube defaults to False, but you can still pass in a data cube.
Inputs:
title,color, kwargs - semi-obvious plot-related comands
axis - You can pass in a Matplotlib axis instance and it will plot on that
clear - Clear the axis before plotting?
continuum - if you've already subtracted out a continuum, you can add
it back in (only if it is a constant offset). It will be included in
the spectrum
offset - Like continuum, but ONLY for plotting purposes. Will move the
plot vertically but will NOT include values in the .spectrum
scale - multiplicative factor to scale the data by (NOT for plotting
purposes; modifies spectrum)
voff - Shift the spectrum on the velocity axis by this amount
vmin,vmax - only plot within this range
(note that these keywords passed to splat_1d MAY crop the spectrum)
units - units of the data. At the moment, no conversions are done
xunits - units of the Y axis. Can affect other procedures, like show_lines,
and some unit conversion (Hz to GHz) is done
erralpha - Transparency of the errorbars if plotted
errstyle - style of errorbars if plotted
plotpix - if set, will plot against a pixel (channel) axis instead of a
physical axis
autorefresh - automatically update the plot when fitting gaussians, labeling,
etc?
"""
if kwargs.has_key('fignum'): kwargs.pop('fignum') # HACK because I want __init__ to accept different kwargs
if kwargs.has_key('fig'): kwargs.pop('fig') # is there a better workaround?
if scale is not None: self.scale = scale
if units is not None: self.units = units
if xunits is not None: self.xunits= xunits
if voff is not None: self.voff = voff
if offset is not None: self.offset= offset
if continuum is not None: self.continuum= continuum
if color is not None: self.plotcolor=color
if erralpha is not None: self.erralpha= erralpha
if vmax is not None: self.vmax = vmax
if vmin is not None: self.vmin = vmin
if title is not None: self.title = title
if autorefresh is not None: self.autorefresh = autorefresh
if axis is None: axis=self.axis # allow spectrum to be plotted on other axis
if clear: axis.clear()
if plotpix:
self.vind = arange(self.cube.shape[0])
else:
self.vind = self.vconv(arange(self.cube.shape[0])) + self.voff
if kwargs.has_key('fignum'): kwargs.pop('fignum')
if kwargs.has_key('linewidth'):
linewidth = kwargs.pop('linewidth')
else:
linewidth=0.5
if cube or len(self.cube.shape) == 3:
self.spectrum = self.cube[:,i,j]*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
if self.maskspec.sum() > 0:
nanmask = where(self.maskspec,numpy.nan,1)
self.spectrum = self.cube*self.scale*nanmask+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
else:
self.spectrum = self.cube*self.scale+self.continuum-self.baseline.basespec
self.spectrumplot = axis.plot(self.vind,self.spectrum+self.offset,color=self.plotcolor,
linestyle='steps-mid',linewidth=linewidth,
**kwargs)
if self.errspec is not None:
if errstyle == 'fill':
self.errorplot = [axis.fill_between(steppify(self.vind,isX=True,sign=sign(self.dv)),
steppify(self.spectrum+self.offset-self.errspec*self.scale),
steppify(self.spectrum+self.offset+self.errspec*self.scale),
facecolor=self.plotcolor, alpha=self.erralpha, **kwargs)]
elif errstyle == 'bars':
self.errorplot = axis.errorbar(self.vind, self.spectrum+self.offset,
yerr=self.errspec*self.scale, ecolor=self.plotcolor, fmt=None,
**kwargs)
if vmin is not None: xlo = self.vmin
else: xlo=self.vind.min()
if vmax is not None: xhi = self.vmax
else: xhi=self.vind.max()
axis.set_xlim(xlo,xhi)
if self.title is not None:
axis.set_title(self.title)
elif self.xtora and self.ytodec:
axis.set_title("Spectrum at %s %s" %
(ratos(self.xtora(i)),dectos(self.ytodec(j))))
elif self.specname:
axis.set_title("Spectrum of %s" % self.specname)
if isinstance(self.xunits,str):
axis.set_xlabel(self.xunits)
else:
axis.set_xlabel("V$_{LSR}$ (km s$^{-1}$)")
self.xunits = 'km/s'
if units in ['Ta*','Tastar','K']:
axis.set_ylabel("$T_A^*$ (K)")
elif units == 'mJy':
axis.set_ylabel("$S_\\nu$ (mJy)")
elif units == 'Jy':
axis.set_ylabel("$S_\\nu$ (Jy)")
else:
axis.set_ylabel(self.units)
if self.autorefresh: self.refresh()
def save(self,fname,**kwargs):
"""
Save the current spectrum (useful for saving baselined data)
"""
newfile = pyfits.PrimaryHDU(data=self.cube,header=self.header)
newfile.writeto(fname,**kwargs)
def savefig(self,fname,bbox_inches='tight',**kwargs):
"""
simple wrapper of maplotlib's savefig.
"""
self.axis.figure.savefig(fname,bbox_inches=bbox_inches,**kwargs)
def showlines(self,linefreqs,linenames,ctype='freq',cunit='hz',yscale=0.8,vofflines=0.0,
voffunit='km/s',**kwargs):
"""
Overplot vertical lines and labels at the frequencies (or velocities) of each line
yscale - fraction of maximum at which to label
"""
self.clearlines()
if ctype != 'freq':
print "Sorry, non-frequency units not implemented yet."
return
speedoflight=2.99792458e5
if self.reffreq and self.xunits in ('km/s','m/s'):
linefreqs = -(array(linefreqs)-self.reffreq)/self.reffreq * speedoflight
if 'hz' in cunit or 'Hz' in cunit:
linefreqs *= (1.0 + vofflines / speedoflight)
else:
linefreqs += vofflines
ymax = (self.spectrum[self.spectrum==self.spectrum]).max()
for lf,ln in zip(linefreqs,linenames):
if lf < self.vind.max() and lf > self.vind.min():
self.linecollections.append(vlines(lf,0,ymax,**kwargs))
self.texts.append(text(lf,ymax*yscale,ln,rotation='vertical',**kwargs))
if self.autorefresh: self.refresh()
def clearlines(self):
if len(self.texts) > 0:
for T in self.texts:
if T in self.axis.texts:
self.axis.texts.remove(T)
if len(self.linecollections) > 0:
for LC in self.linecollections:
if LC in self.axis.collections:
self.axis.collections.remove(LC)
def refresh(self):
self.axis.figure.canvas.draw()
class FFT:
def __init__(self,specplotter,fignum=3,axis=None, color='k'):
self.specplotter=specplotter
if axis is None:
self.fignum=fignum
self.figure=figure(self.fignum)
self.axis=gca()
else:
self.axis=axis
self.figure=self.axis.figure
self.fignum=None
#self.axis.clear()
self.color=color
self.fftplot=None
self.setspec()
self.setshift()
self.clear()
def __call__(self,psd=False,shift=True):
self.setspec()
if psd:
self.psd(shift=shift)
else:
self.fft(shift=shift)
def fft(self,shift=True,logplot=False,**kwargs):
self.clear()
self.setshift(shift)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.fftspec = fft(self.spectofft)
self.realfft = self.fftspec.real
self.imagfft = self.fftspec.imag
self.fftplot = self.axis.plot(self.shiftfunc(self.realfft),
drawstyle='steps-mid',color=self.color,**kwargs)
self.refresh()
def psd(self,logplot=True,shift=True,**kwargs):
self.clear()
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.setshift(shift)
self.psdspec = fft(self.spectofft) * fft(self.spectofft[::-1])
self.psdreal = abs(self.psdspec)
self.fftplot = self.axis.plot(self.shiftfunc(self.psdreal),
drawstyle='steps-mid',color=self.color,**kwargs)
if logplot: self.axis.set_yscale('log')
else: self.axis.set_yscale('linear')
self.refresh()
def setshift(self,shift=True):
if shift: self.shiftfunc = fftshift
else: self.shiftfunc = lambda x: x
def setspec(self):
self.spectofft = copy(self.specplotter.spectrum)
OKmask = (self.spectofft==self.spectofft)
self.spectofft[(True-OKmask)] = 0
def clear(self):
if self.fftplot is not None:
for p in self.fftplot:
p.set_visible(False)
if p in self.axis.lines: self.axis.lines.remove(p)
self.axis.clear()
self.refresh()
def refresh(self):
self.axis.figure.canvas.draw()
class PSD(FFT):
def __call__(self,shift=True):
self.setspec()
self.setshift(shift)
self.clear()
self.psd()
self.refresh()
class Baseline:
def __init__(self,specplotter):
self.baselinepars = None
self.order = None
self.basespec = zeros(specplotter.spectrum.shape[0])
self.excludemask = zeros(specplotter.spectrum.shape[0],dtype='bool')
self.OKmask = ones(specplotter.spectrum.shape[0],dtype='bool')
self.specplotter = specplotter
self.blleg = None
self.click = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.fitregion=[]
self.excludevelo = []
self.excludepix = []
def __call__(self, order=1, annotate=False, excludefit=False, save=True,
exclude=None, exclusionlevel=0.01,
interactive=False, **kwargs):
"""
Fit and remove a polynomial from the spectrum.
It will be saved in the variable "self.basespec"
and the fit parameters will be saved in "self.order"
function baseline(spectrum,xarr=None,xmin=None,xmax=None,order=1,quiet=True,exclude=None):
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
excludefit creates a mask based on the fitted gaussian model (assuming
that it has a zero-height) using an exclusion level of (exclusionlevel)
* the smallest gaussian peak that was fit
"basespec" is added back to the spectrum before fitting so you can run this
procedure multiple times without losing information
"""
specfit = self.specplotter.specfit
self.order = order
fitp = zeros(self.order+1)
self.spectofit = self.specplotter.spectrum+self.basespec
self.OKmask = (self.spectofit==self.spectofit)
if exclude == 'interactive' or interactive:
self.excludemask[:] = True
self.excludevelo = []
self.excludepix = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.selectregion)
else:
if excludefit and specfit.modelpars is not None:
#vlo = self.specplotter.specfit.modelpars[1] - 2*self.specplotter.specfit.modelpars[2]
#vhi = self.specplotter.specfit.modelpars[1] + 2*self.specplotter.specfit.modelpars[2]
#exclude = [argmin(abs(self.specplotter.vind-vlo)),argmin(abs(self.specplotter.vind-vhi))]
specfit.fullsizemodel() # make sure the spectrum is the right size
self.excludemask = abs(specfit.model) > exclusionlevel*abs(min(specfit.modelpars[0::3]))
else:
self.excludemask[:] = False
self.dofit(exclude=exclude,annotate=annotate,**kwargs)
if save: self.savefit()
def dofit(self, exclude=None, excludeunits='velo', annotate=False,
**kwargs):
"""
Do the baseline fitting and save and plot the results.
Can specify a region to exclude using velocity units or pixel units
"""
if exclude is not None and excludeunits in ['velo','km/s']:
if len(exclude) % 2 == 0:
self.excludevelo = exclude
self.excludepix = []
for vl,vu in zip(exclude[::2],exclude[1::2]):
xl = argmin(abs(self.specplotter.vind-vl))
xu = argmin(abs(self.specplotter.vind-vu))
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
elif excludeunits in ['pix','pixel','chan','channel']:
if len(exclude) % 2 == 0:
self.excludepix = []
for xl,xu in zip(exclude[::2],exclude[1::2]):
if xl > xu: xl,xu=xu,xl
self.excludemask[xl:xu] = True
self.excludepix += [xl,xu]
self.specplotter.spectrum, self.baselinepars = baseline(
self.spectofit,
xarr=self.specplotter.vind,
order=self.order, exclude=None,
mask=(True-self.OKmask)+self.excludemask,
**kwargs)
self.basespec = poly1d(self.baselinepars)(self.specplotter.vind)
if self.specplotter.spectrumplot is not None:
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.spectrumplot]
if self.specplotter.errorplot is not None:
[self.specplotter.axis.collections.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.collections.PolyCollection)]
[self.specplotter.axis.lines.remove(p) for p in self.specplotter.errorplot if isinstance(p,matplotlib.lines.Line2D)]
self.specplotter.plotspec(**self.specplotter.plotkwargs)
self.specplotter.axis.set_ylim(
abs(self.specplotter.spectrum[self.OKmask].min())*1.1*sign(self.specplotter.spectrum[self.OKmask].min()),
abs(self.specplotter.spectrum[self.OKmask].max())*1.1*sign(self.specplotter.spectrum[self.OKmask].max()))
if annotate: self.annotate() # refreshes automatically
elif self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event,annotate=False):
"""
select regions for baseline fitting
"""
if event.button == 1:
if self.nclicks_b1 == 0:
self.bx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx1]
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.bx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.bx1 > self.bx2: self.bx1,self.bx2 = self.bx2,self.bx1
self.fitregion += self.specplotter.axis.plot(
self.specplotter.vind[self.bx1:self.bx2],
self.specplotter.spectrum[self.bx1:self.bx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='g',alpha=0.5)
self.specplotter.refresh()
self.excludemask[self.bx1:self.bx2] = False
self.excludevelo += [self.specplotter.vind]
self.excludepix += [self.bx2]
if event.button in [2,3]:
disconnect(self.click)
self.dofit(exclude=None,annotate=annotate)
for p in self.fitregion:
p.set_visible(False)
self.specplotter.axis.lines.remove(p)
self.fitregion=[] # I should be able to just remove from the list... but it breaks the loop...
self.specplotter.refresh()
def annotate(self,loc='upper left'):
bltext = "bl: $y=$"+"".join(["$%+6.3gx^{%i}$" % (f,self.order-i)
for i,f in enumerate(self.baselinepars)])
#self.blleg = text(xloc,yloc ,bltext,transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.blleg = self.specplotter.axis.legend(
(pl,),
(bltext,),loc=loc,markerscale=0.001,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.specplotter.axis.add_artist(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.blleg is not None:
self.blleg.set_visible(False)
if self.blleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.blleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.baselinepars is not None:
for ii,p in enumerate(self.baselinepars):
self.specplotter.header.update('BLCOEF%0.2i' % (ii),p,comment="Baseline power-law best-fit coefficient x^%i" % (self.order-ii-1))
class Specfit:
def __init__(self,specplotter):
self.model = None
self.modelpars = None
self.modelerrs = None
self.modelplot = None
self.guessplot = []
self.fitregion = []
self.ngauss = 0
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.gx1 = 0
self.gx2 = specplotter.spectrum.shape[0]
self.guesses = []
self.click = 0
self.fitkwargs = {}
self.auto = False
self.autoannotate = True
self.specplotter = specplotter
self.gaussleg=None
self.residuals=None
self.setfitspec()
#self.seterrspec()
def __call__(self, interactive=False, usemoments=True, fitcolor='r',
multifit=False, guesses=None, annotate=True, save=True,
**kwargs):
"""
Fit gaussians to a spectrum
guesses = [height,amplitude,center,width]
"""
self.fitcolor = fitcolor
self.clear()
self.ngauss = 0
self.fitkwargs = kwargs
if interactive:
print "Left-click twice to select a fitting range, then middle-click twice to select a peak and width"
self.nclicks_b1 = 0
self.nclicks_b2 = 0
self.guesses = []
self.click = self.specplotter.axis.figure.canvas.mpl_connect('button_press_event',self.makeguess)
self.autoannotate = annotate
elif multifit:
if guesses is None:
print "You must input guesses when using multifit. Also, baseline first!"
else:
self.guesses = guesses
self.multifit()
self.autoannotate = annotate
else:
#print "Non-interactive, 1D fit with automatic guessing"
if self.specplotter.baseline.order is None:
self.specplotter.baseline.order=0
self.onedfit(usemoments=usemoments,annotate=annotate,**kwargs)
else:
self.onedfit(usemoments=usemoments,annotate=annotate,
vheight=False,height=0.0,**kwargs)
if self.specplotter.autorefresh: self.specplotter.refresh()
if save: self.savefit()
def seterrspec(self,usestd=None,useresiduals=True):
if self.specplotter.errspec is not None and not usestd:
self.errspec = self.specplotter.errspec
elif self.residuals is not None and useresiduals:
self.errspec = ones(self.spectofit.shape[0]) * self.residuals.std()
else: self.errspec = ones(self.spectofit.shape[0]) * self.spectofit.std()
def setfitspec(self):
self.spectofit = copy(self.specplotter.spectrum)
OKmask = (self.spectofit==self.spectofit)
self.spectofit[(True-OKmask)] = 0
self.seterrspec()
self.errspec[(True-OKmask)] = 1e10
def multifit(self):
self.ngauss = len(self.guesses)/3
self.setfitspec()
if self.fitkwargs.has_key('negamp'): self.fitkwargs.pop('negamp')
mpp,model,mpperr,chi2 = gaussfitter.multigaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
ngauss=self.ngauss,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3
self.model = model
self.modelpars = mpp.tolist()
self.modelerrs = mpperr.tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
if self.autoannotate:
self.annotate()
def onedfit(self, usemoments=True, annotate=True, vheight=True, height=0, negamp=None,**kwargs):
self.ngauss = 1
self.auto = True
self.setfitspec()
if usemoments: # this can be done within gaussfit but I want to save them
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=vheight,negamp=negamp,**kwargs)
if vheight is False: self.guesses = [height]+self.guesses
else:
if negamp: self.guesses = [height,-1,0,1]
else: self.guesses = [height,1,0,1]
mpp,model,mpperr,chi2 = gaussfitter.onedgaussfit(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
err=self.errspec[self.gx1:self.gx2],
vheight=vheight,
params=self.guesses,
**self.fitkwargs)
self.chi2 = chi2
self.dof = self.gx2-self.gx1-self.ngauss*3-vheight
if vheight:
self.specplotter.baseline.baselinepars = mpp[:1] # first item in list form
self.model = model - mpp[0]
else: self.model = model
self.residuals = self.spectofit[self.gx1:self.gx2] - self.model
self.modelpars = mpp[1:].tolist()
self.modelerrs = mpperr[1:].tolist()
self.modelplot = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.model+self.specplotter.offset, color=self.fitcolor, linewidth=0.5)
if annotate:
self.annotate()
if vheight: self.specplotter.baseline.annotate()
def fullsizemodel(self):
"""
If the gaussian was fit to a sub-region of the spectrum,
expand it (with zeros) to fill the spectrum. You can
always recover the original by:
origmodel = model[gx1:gx2]
"""
if self.model.shape != self.specplotter.spectrum.shape:
temp = zeros(self.specplotter.spectrum.shape)
temp[self.gx1:self.gx2] = self.model
self.model = temp
self.residuals = self.spectofit - self.model
def plotresiduals(self,fig=None,axis=None,clear=True,**kwargs):
"""
Plot residuals of the fit. Specify a figure or
axis; defaults to figure(2).
kwargs are passed to matplotlib plot
"""
if axis is None:
fig=figure(2)
self.residualaxis = gca()
if clear: self.residualaxis.clear()
else:
self.residualaxis = axis
if clear: self.residualaxis.clear()
self.residualplot = self.residualaxis.plot(self.specplotter.vind[self.gx1:self.gx2],
self.residuals,drawstyle='steps-mid',
linewidth=0.5, color='k', **kwargs)
if self.specplotter.vmin is not None and self.specplotter.vmax is not None:
self.residualaxis.set_xlim(self.specplotter.vmin,self.specplotter.vmax)
self.residualaxis.figure.canvas.draw()
def annotate(self,loc='upper right'):
#text(xloc,yloc ,"c=%g" % self.modelpars[1],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.05,"w=%g" % self.modelpars[2],transform = self.specplotter.axis.transAxes)
#text(xloc,yloc-0.10,"a=%g" % self.modelpars[0],transform = self.specplotter.axis.transAxes)
self.clearlegend()
pl = matplotlib.collections.CircleCollection([0],edgecolors=['k'])
self.gaussleg = self.specplotter.axis.legend(
tuple([pl]*3*self.ngauss),
tuple(flatten(
[("c%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[1+jj*3],self.modelerrs[1+jj*3]),
"w%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[2+jj*3],self.modelerrs[2+jj*3]),
"a%i=%6.4g $\\pm$ %6.4g" % (jj,self.modelpars[0+jj*3],self.modelerrs[0+jj*3]))
for jj in range(self.ngauss)])),
loc=loc,markerscale=0.01,
borderpad=0.1, handlelength=0.1, handletextpad=0.1
)
self.gaussleg.draggable(True)
self.specplotter.axis.add_artist(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def selectregion(self,event):
if self.nclicks_b1 == 0:
self.gx1 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 += 1
elif self.nclicks_b1 == 1:
self.gx2 = argmin(abs(event.xdata-self.specplotter.vind))
self.nclicks_b1 -= 1
if self.gx1 > self.gx2: self.gx1,self.gx2 = self.gx2,self.gx1
if abs(self.gx1-self.gx2) > 3: # can't fit w/ fewer data than pars
self.fitregion = self.specplotter.axis.plot(
self.specplotter.vind[self.gx1:self.gx2],
self.specplotter.spectrum[self.gx1:self.gx2]+self.specplotter.offset,
drawstyle='steps-mid',
color='c')
if self.guesses == []:
self.guesses = gaussfitter.onedmoments(
self.specplotter.vind[self.gx1:self.gx2],
self.spectofit[self.gx1:self.gx2],
vheight=0)
self.ngauss = 1
self.auto = True
else:
print "Fitting region is too small (channels %i:%i). Try again." % (self.gx1,self.gx2)
def guesspeakwidth(self,event):
if self.nclicks_b2 % 2 == 0:
if self.auto:
self.guesses[:2] = [event.ydata,event.xdata]
else:
self.guesses += [event.ydata,event.xdata,1]
self.ngauss += 1
self.nclicks_b2 += 1
self.guessplot += [self.specplotter.axis.scatter(event.xdata,event.ydata,marker='x',c='r')]
elif self.nclicks_b2 % 2 == 1:
self.guesses[-1] = abs(event.xdata-self.guesses[-2])
self.nclicks_b2 += 1
self.guessplot += self.specplotter.axis.plot([event.xdata,
2*self.guesses[-2]-event.xdata],[event.ydata]*2,
color='r')
if self.auto:
self.auto = False
if self.nclicks_b2 / 2 > self.ngauss:
print "There have been %i middle-clicks but there are only %i gaussians" % (self.nclicks_b2,self.ngauss)
self.ngauss += 1
def clear(self,legend=True):
if self.modelplot is not None:
for p in self.modelplot:
p.set_visible(False)
if legend: self.clearlegend()
def makeguess(self,event):
if event.button == 1:
self.selectregion(event)
elif event.button == 2:
self.guesspeakwidth(event)
elif event.button == 3:
disconnect(self.click)
if self.ngauss > 0:
print len(self.guesses)/3," Guesses: ",self.guesses," X channel range: ",self.gx1,self.gx2
if len(self.guesses) % 3 == 0:
self.multifit()
for p in self.guessplot + self.fitregion:
p.set_visible(False)
else:
print "error, wrong # of pars"
if self.specplotter.autorefresh: self.specplotter.refresh()
def clearlegend(self):
if self.gaussleg is not None:
self.gaussleg.set_visible(False)
if self.gaussleg in self.specplotter.axis.artists:
self.specplotter.axis.artists.remove(self.gaussleg)
if self.specplotter.autorefresh: self.specplotter.refresh()
def savefit(self):
if self.modelpars is not None:
for ii,p in enumerate(self.modelpars):
if ii % 3 == 0: self.specplotter.header.update('AMP%1i' % (ii/3),p,comment="Gaussian best fit amplitude #%i" % (ii/3))
if ii % 3 == 1: self.specplotter.header.update('CEN%1i' % (ii/3),p,comment="Gaussian best fit center #%i" % (ii/3))
if ii % 3 == 2: self.specplotter.header.update('WID%1i' % (ii/3),p,comment="Gaussian best fit width #%i" % (ii/3))
def mapplot(plane,cube,vconv=lambda x: x,xtora=lambda x: x,ytodec=lambda x: x, gaiafignum=0, specfignum=1):
gaiafig = figure(gaiafignum)
gaiafig.clf()
gaiaax = gaiafig.add_subplot(111)
gaiaax.imshow(plane)
sp = SpecPlotter(cube, vconv=vconv, xtora=xtora, ytodec=ytodec,
gaiafignum=gaiafignum, fignum=specfignum, gaiafig=gaiafig)
sp.clickid = gaiafig.canvas.mpl_connect('button_press_event',sp)
#connect('button_press_event',sp)
def splat_3d(filename,xi=0,yi=0,vmin=None,vmax=None,button=1,dobaseline=False,exclude=None,
smooth=None,smoothto=None,smoothtype='gaussian',order=1,savepre=None,**kwargs):
"""
Inputs:
vmin,vmax - range over which to baseline and plottransform = ax.transAxes
exclude - (internal) range to exclude from baseline fit
"""
dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units = open_3d(filename)
if units is None: units="UNITS"
if xunits is None: xunits="km/s"
if conversion_factor == 0 or conversion_factor is None: conversion_factor=1.0
sp = splat_1d(vpars=[dv, v0, p3], hdr=hdr, spec=cube[:, yi, xi],
xtora=xtora, ytodec=ytodec, vconv=vconv, units=units,
conversion_factor=conversion_factor, xunits=xunits, **kwargs)
sp.cube = cube
return sp
def gaia(filename,estimator='max',axis=0):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if axis > 0:
cube = cube.swapaxes(0,axis)
if estimator == 'max':
p = where(isnan(cube),0,cube).max(axis=0)
elif estimator == 'int':
p = where(isnan(cube),0,cube).sum(axis=0) * dv
elif estimator == 'intdivmax':
cut = MAD(cube.ravel()) + nanmedian(cube.ravel())
if cut < 0:
cut = 0
m = where(isnan(cube),0,cube).max(axis=0)
i = where(isnan(cube),0,cube).sum(axis=0) * dv
p = where(i<0,0,i)/where(m<=cut,numpy.inf,m)
elif estimator[-5:] == ".fits":
p = pyfits.open(estimator)[0].data
mapplot(p,cube,vconv,xtora,ytodec)
def baseline_file(filename,outfilename,vmin=None,vmax=None,order=1,crop=False):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data.squeeze()
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
vconv = lambda v: (v-p3+1)*dv+v0
varr = vconv(arange(cube.shape[-1]))
if vmin is None: argvmin = None
else: argvmin = argmin(abs(varr-vmin))
if vmax is None: argvmax = None
else: argvmax = argmin(abs(varr-vmax))
bspec,bfit = baseline(cube,vmin=argvmin,vmax=argvmax,order=order)
def baseline(spectrum,xarr=None,xmin='default',xmax='default',order=1,quiet=True,exclude=None,
mask=None):
"""
Subtract a baseline from a spectrum
If xmin,xmax are not specified, defaults to ignoring first and last 10% of spectrum
*unless* order > 1, in which case ignoring the ends tends to cause strange effects
exclude is a set of start/end indices to ignore when baseline fitting
(ignored by setting error to infinite in fitting procedure)
"""
if xmin == 'default':
if order <= 1: xmin = floor( spectrum.shape[-1]*0.1 )
else: xmin = 0
elif xmin is None:
xmin = 0
if xmax == 'default':
if order <= 1: xmax = ceil( spectrum.shape[-1]*0.9 )
else: xmax = spectrum.shape[-1]
elif xmax is None:
xmax = spectrum.shape[-1]
pguess = [1]*(order+1)
if xarr is None:
xarr = indices(spectrum.shape).squeeze()
subxarr = xarr[xmin:xmax]
def mpfitfun(data,err):
def f(p,fjac=None): return [0,numpy.ravel((poly1d(p)(subxarr)-data)/err)]
return f
err = ones(spectrum.shape)
if exclude is not None:
err[exclude[0]:exclude[1]] = 1e10
if mask is not None:
if mask.dtype.name != 'bool': mask = mask.astype('bool')
err[mask] = 1e10
spectrum[mask] = 0
if (spectrum!=spectrum).sum() > 0:
print "There is an error in baseline: some values are NaN"
import pdb; pdb.set_trace()
mp = mpfit(mpfitfun(spectrum[xmin:xmax],err[xmin:xmax]),xall=pguess,quiet=quiet)
fitp = mp.params
bestfit = poly1d(fitp)(xarr).squeeze()
return (spectrum-bestfit),fitp
def open_3d(filename):
f = pyfits.open(filename)
hdr = f[0].header
cube = f[0].data
if len(cube.shape) == 4: cube=cube[0,:,:,:]
#cube = reshape(cube.mean(axis=2).mean(axis=1),[cube.shape[0],1,1])
dv,v0,p3 = hdr.get('CD3_3'),hdr.get('CRVAL3'),hdr.get('CRPIX3')
dr,r0,p1 = hdr.get('CD1_1'),hdr.get('CRVAL1'),hdr.get('CRPIX1')
dd,d0,p2 = hdr.get('CD2_2'),hdr.get('CRVAL2'),hdr.get('CRPIX2')
if dv is None: dv = hdr.get('CDELT3')
if dr is None: dr = hdr.get('CDELT1')
if dd is None: dd = hdr.get('CDELT2')
xtora = lambda x: (x-p1+1)*dr+r0 # convert pixel coordinates to RA/Dec/Velocity
ytodec = lambda y: (y-p2+1)*dd+d0
vconv = lambda v: (v-p3+1)*dv+v0
if hdr.get('CUNIT3') in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT3')
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
units = hdr.get('BUNIT')
return dv,v0,p3,hdr,cube,xtora,ytodec,vconv,xunits,conversion_factor,units
def open_1d(filename,specnum=0,wcstype='',errspecnum=None,maskspecnum=None):
"""
Grabs all the relevant pieces of a 1d spectrum for plotting
wcstype is the suffix on the WCS type to get to velocity/frequency/whatever
"""
f = pyfits.open(filename)
hdr = f[0].header
spec = f[0].data
errspec = None
maskspec = None
if hdr.get('NAXIS') == 2:
if errspecnum is not None:
errspec = spec[errspecnum,:]
if maskspecnum is not None:
maskspec = spec[maskspecnum,:]
if isinstance(specnum,list):
spec = spec[specnum,:].mean(axis=0)
elif isinstance(specnum,int):
spec = spec[specnum,:]
else:
raise TypeError("Specnum is of wrong type (not a list of integers or an integer). Type: %s" %
str(type(specnum)))
elif hdr.get('NAXIS') > 2:
raise ValueError("Too many axes for open_1d (splat_1d) - use cube instead")
if hdr.get('CD1_1'+wcstype):
dv,v0,p3 = hdr['CD1_1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
else:
dv,v0,p3 = hdr['CDELT1'+wcstype],hdr['CRVAL1'+wcstype],hdr['CRPIX1'+wcstype]
if hdr.get('OBJECT'):
specname = hdr.get('OBJECT')
elif hdr.get('GLON') and hdr.get('GLAT'):
specname = "%s %s" % (hdr.get('GLON'),hdr.get('GLAT'))
else:
specname = filename.rstrip(".fits")
if hdr.get('CUNIT1'+wcstype) in ['m/s','M/S']:
conversion_factor = 1000.0
xunits = 'km/s' # change to km/s because you're converting units
else:
xunits = hdr.get('CUNIT1'+wcstype)
if xunits in ("hz","Hz"):
print "Converting from Hz to GHz"
xunits = "GHz"
conversion_factor = 1.0e9
else:
conversion_factor = 1.0
vconv = lambda v: ((v-p3+1)*dv+v0)/conversion_factor
xtora=None
ytodec=None
units = hdr.get('BUNIT').strip()
if hdr.get('CTYPE1'+wcstype):
xtype = hdr.get('CTYPE1'+wcstype)
else:
xtype = 'VLSR'
if hdr.get('REFFREQ'+wcstype):
reffreq = hdr.get('REFFREQ'+wcstype)
else:
reffreq = None
return dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname,units,xunits,errspec,maskspec,reffreq
def splat_1d(filename=None,vmin=None,vmax=None,button=1,dobaseline=False,
exclude=None,smooth=None,order=1,savepre=None,vcrop=True,
vconv=None,vpars=None,hdr=None,spec=None,xtora=None,ytodec=None,
specname=None,quiet=True,specnum=0,errspecnum=None,wcstype='',
offset=0.0, continuum=0.0, annotatebaseline=False, plotspectrum=True,
smoothto=None, xunits=None, units=None, conversion_factor=None,
smoothtype='gaussian',convmode='valid',maskspecnum=None,**kwargs):
"""
Wrapper for specplotter creation. Works nicely with 1D spectra with well-defined
FITS headers (i.e., CRVAL1, CRPIX1, CDELT1, and optionally CUNIT1 and CTYPE1)
This documentation needs to be updated a lot... I implemented a lot of features
without documenting them, which was a mistake
Inputs:
vmin,vmax - range over which to baseline and plot
exclude - (internal) range to exclude from baseline fit
vcrop - will vmin/vmax crop out data, or just set the plot limits?
"""
if (vpars and vconv and hdr and spec is not None and xtora and ytodec
and units and xunits and conversion_factor):
dv,v0,p3 = vpars
errspec = None
maskspec = None
reffreq = None
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
else:
dv,v0,p3,conversion_factor,hdr,spec,vconv,xtora,ytodec,specname_file,units,xunits,errspec,maskspec,reffreq = \
open_1d(filename,specnum=specnum,wcstype=wcstype,errspecnum=errspecnum,maskspecnum=maskspecnum)
if specname is None: specname=specname_file
if units is None and kwargs.has_key('units'): units = kwargs.pop('units')
if type(continuum)==type('str'):
if hdr.get(continuum) is not None:
continuum = hdr.get(continuum)
else:
raise ValueError("Continuum specified but none present.")
varr = vconv(arange(spec.shape[0]))
if vmin is None or vcrop==False: argvmin = 0
else:
argvmin = argmin(abs(varr-vmin))
if dv > 0:
hdr.update('CRPIX1'+wcstype,p3-argvmin)
if vmax is None or vcrop==False: argvmax = spec.shape[0]
else:
argvmax = argmin(abs(varr-vmax))
if dv < 0:
hdr.update('CRPIX1'+wcstype,p3-argvmax)
if argvmin > argvmax:
argvmin,argvmax = argvmax,argvmin
#if exclude is not None: exclude = exclude[::-1]
elif argvmin == argvmax:
raise Exception("Error: no data in velocity range %g:%g for source %s."
% (vmin,vmax,filename))
# these lines were meant to automatically put "exclude" into velocity
# units; this is now done in the baseline code
#if exclude is not None:
# exclude[0] = argmin(abs(varr-exclude[0]))
# exclude[1] = argmin(abs(varr-exclude[1]))
# exclude = array(exclude) - argvmin
vconv = lambda v: ((v-p3+argvmin+1)*dv+v0) / conversion_factor
ivconv = lambda V: p3-1-argvmin+(V*conversion_factor-v0)/dv
specplot = spec[argvmin:argvmax]
if errspec is not None: errspec=errspec[argvmin:argvmax]
if maskspec is not None: maskspec=maskspec[argvmin:argvmax]
if smoothto:
smooth = abs(smoothto/dv)
if smooth:
roundsmooth = round(smooth) # can only downsample by integers
# change fitter first
if smoothtype == 'hanning':
specplot = convolve(specplot,hanning(2+roundsmooth)/hanning(2+roundsmooth).sum(),convmode)[::roundsmooth]
kernsize = smooth
ones_sameshape = zeros(smooth+2)
ones_sameshape[1:-1] = 1
elif smoothtype == 'boxcar':
specplot = convolve(specplot,ones(roundsmooth)/float(roundsmooth),convmode)[::roundsmooth]
kernsize = roundsmooth
ones_sameshape = ones(roundsmooth)
elif smoothtype == 'gaussian':
speclen = specplot.shape[0]
xkern = linspace(-1*smooth,smooth,smooth*3)
kernel = exp(-xkern**2/(2*(smooth/sqrt(8*log(2)))**2))
kernel /= kernel.sum()
kernsize = len(kernel)
specplot = convolve(specplot,kernel,convmode)[::roundsmooth]
ones_sameshape = zeros(roundsmooth*3)
ones_sameshape[roundsmooth:-roundsmooth] = 1
if errspec is not None:
errspec = sqrt(convolve(errspec**2,ones_sameshape,convmode)[::roundsmooth]) / float(roundsmooth)
if maskspec is not None:
maskspec = array(convolve(maskspec,ones_sameshape,convmode)[::roundsmooth],dtype='bool')
if maskspec.shape != specplot.shape: import pdb; pdb.set_trace()
# this bit of code may also make sense, but I'm shifting the center pixel instead
# b/c it's easier (?) to deal with velocity range
#v0 += (abs(dv)*smooth - abs(dv))/2.0 # pixel center moves by half the original pixel size
dv *= roundsmooth
if convmode == 'same':
newrefpix = (p3-argvmin)/roundsmooth
elif convmode == 'full':
newrefpix = (p3-0.5-argvmin+kernsize/2.0)/roundsmooth
elif convmode == 'valid':
newrefpix = (p3-0.5-argvmin-kernsize/2.0)/roundsmooth
# this was resolved by advanced guess-and check
# but also, sort of makes sense: FITS refers to the *center* of a pixel. You want to
# shift 1/2 pixel to the right so that the first pixel goes from 0 to 1
vconv = lambda v: ((v-newrefpix)*dv+v0)/conversion_factor
ivconv = lambda V: newrefpix+(V*conversion_factor-v0)/dv
hdr.update('CRPIX1'+wcstype,newrefpix+1)
hdr.update('CDELT1'+wcstype,dv)
sp = SpecPlotter(specplot, vconv=vconv, xtora=xtora, ytodec=ytodec,
specname=specname, dv=dv/conversion_factor, hdr=hdr, reffreq=reffreq,
errspec=errspec, maskspec=maskspec, xunits=xunits, **kwargs)
if plotspectrum:
sp.plotspec(button=button, cube=False, vmin=vmin, vmax=vmax,
units=units, offset=offset, continuum=continuum,
**kwargs)
if dobaseline:
sp.baseline(exclude=exclude,order=order,quiet=quiet,annotate=annotatebaseline)
if plotspectrum: sp.refresh()
if hdr.get('GLON') and hdr.get('GLAT'):
sp.glon = hdr.get('GLON')
sp.glat = hdr.get('GLAT')
if savepre is not None:
glon,glat = sp.glon,sp.glat
if glat < 0: pm=""
else: pm = "+"
savename = savepre + "G%07.3f%0s%07.3f_" % (glon,pm,glat) + hdr.get('MOLECULE').replace(' ','') + hdr.get('TRANSITI').replace(' ','')
savefig(savename+'.png')
return sp
def splat_tspec(filename,specnum=0,**kwargs):
"""
Same as splat_1d for tspec data
"""
tdata = pyfits.getdata(filename)
theader = pyfits.getheader(filename)
if len(tdata.shape) == 3:
tdata = tdata[specnum,:,:]
wavelength = tdata[0,:]
spectrum = tdata[1,:]
error = tdata[2,:]
vconv = lambda x: wavelength[x]
ivconv = lambda x: argmin(abs(wavelength-x))
specname='TSPEC'
dv = median(wavelength[1:] - wavelength[:-1])
sp = SpecPlotter(spectrum,vconv=vconv,specname=specname,dv=dv,hdr=theader)
sp.plotspec(cube=False,units=theader.get('YUNITS'),xunits=theader.get('XUNITS'),**kwargs)
return sp
| mit |
lazywei/scikit-learn | examples/decomposition/plot_kernel_pca.py | 353 | 2011 | """
==========
Kernel PCA
==========
This example shows that Kernel PCA is able to find a projection of the data
that makes data linearly separable.
"""
print(__doc__)
# Authors: Mathieu Blondel
# Andreas Mueller
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, KernelPCA
from sklearn.datasets import make_circles
np.random.seed(0)
X, y = make_circles(n_samples=400, factor=.3, noise=.05)
kpca = KernelPCA(kernel="rbf", fit_inverse_transform=True, gamma=10)
X_kpca = kpca.fit_transform(X)
X_back = kpca.inverse_transform(X_kpca)
pca = PCA()
X_pca = pca.fit_transform(X)
# Plot results
plt.figure()
plt.subplot(2, 2, 1, aspect='equal')
plt.title("Original space")
reds = y == 0
blues = y == 1
plt.plot(X[reds, 0], X[reds, 1], "ro")
plt.plot(X[blues, 0], X[blues, 1], "bo")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
X1, X2 = np.meshgrid(np.linspace(-1.5, 1.5, 50), np.linspace(-1.5, 1.5, 50))
X_grid = np.array([np.ravel(X1), np.ravel(X2)]).T
# projection on the first principal component (in the phi space)
Z_grid = kpca.transform(X_grid)[:, 0].reshape(X1.shape)
plt.contour(X1, X2, Z_grid, colors='grey', linewidths=1, origin='lower')
plt.subplot(2, 2, 2, aspect='equal')
plt.plot(X_pca[reds, 0], X_pca[reds, 1], "ro")
plt.plot(X_pca[blues, 0], X_pca[blues, 1], "bo")
plt.title("Projection by PCA")
plt.xlabel("1st principal component")
plt.ylabel("2nd component")
plt.subplot(2, 2, 3, aspect='equal')
plt.plot(X_kpca[reds, 0], X_kpca[reds, 1], "ro")
plt.plot(X_kpca[blues, 0], X_kpca[blues, 1], "bo")
plt.title("Projection by KPCA")
plt.xlabel("1st principal component in space induced by $\phi$")
plt.ylabel("2nd component")
plt.subplot(2, 2, 4, aspect='equal')
plt.plot(X_back[reds, 0], X_back[reds, 1], "ro")
plt.plot(X_back[blues, 0], X_back[blues, 1], "bo")
plt.title("Original space after inverse transform")
plt.xlabel("$x_1$")
plt.ylabel("$x_2$")
plt.subplots_adjust(0.02, 0.10, 0.98, 0.94, 0.04, 0.35)
plt.show()
| bsd-3-clause |
bachiraoun/fullrmc | Constraints/StructureFactorConstraints.py | 1 | 64342 | """
StructureFactorConstraints contains classes for all constraints related experimental static structure factor functions.
.. inheritance-diagram:: fullrmc.Constraints.StructureFactorConstraints
:parts: 1
"""
# standard libraries imports
from __future__ import print_function
import itertools, re
# external libraries imports
import numpy as np
from pdbparser.Utilities.Database import is_element_property, get_element_property
from pdbparser.Utilities.Collection import get_normalized_weighting
# fullrmc imports
from ..Globals import INT_TYPE, FLOAT_TYPE, PI, PRECISION, LOGGER
from ..Globals import str, long, unicode, bytes, basestring, range, xrange, maxint
from ..Core.Collection import is_number, is_integer, get_path
from ..Core.Collection import reset_if_collected_out_of_date, get_real_elements_weight
from ..Core.Collection import get_caller_frames
from ..Core.Constraint import Constraint, ExperimentalConstraint
from ..Core.pairs_histograms import multiple_pairs_histograms_coords, full_pairs_histograms_coords
class StructureFactorConstraint(ExperimentalConstraint):
"""
Controls the Structure Factor noted as S(Q) and also called
total-scattering structure function or Static Structure Factor.
S(Q) is a dimensionless quantity and normalized such as the average
value :math:`<S(Q)>=1`.
It is worth mentioning that S(Q) is nothing other than the normalized and
corrected diffraction pattern if all experimental artefacts powder.
The computation of S(Q) is done through an inverse Sine Fourier transform
of the computed pair distribution function G(r).
.. math::
S(Q) = 1+ \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
From an atomistic model and histogram point of view, G(r) is computed as
the following:
.. math::
G(r) = 4 \\pi r (\\rho_{r} - \\rho_{0})
= 4 \\pi \\rho_{0} r (g(r)-1)
= \\frac{R(r)}{r} - 4 \\pi \\rho_{0}
g(r) is calculated after binning all pair atomic distances into a
weighted histograms as the following:
.. math::
g(r) = \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{\\rho_{i,j}(r)}{\\rho_{0}}
= \\sum \\limits_{i,j}^{N} w_{i,j} \\frac{n_{i,j}(r) / v(r)}{N_{i,j} / V}
Where:\n
:math:`Q` is the momentum transfer. \n
:math:`r` is the distance between two atoms. \n
:math:`\\rho_{i,j}(r)` is the pair density function of atoms i and j. \n
:math:`\\rho_{0}` is the average number density of the system. \n
:math:`w_{i,j}` is the relative weighting of atom types i and j. \n
:math:`R(r)` is the radial distribution function (rdf). \n
:math:`N` is the total number of atoms. \n
:math:`V` is the volume of the system. \n
:math:`n_{i,j}(r)` is the number of atoms i neighbouring j at a distance r. \n
:math:`v(r)` is the annulus volume at distance r and of thickness dr. \n
:math:`N_{i,j}` is the total number of atoms i and j in the system. \n
+----------------------------------------------------------------------+
|.. figure:: reduced_structure_factor_constraint_plot_method.png |
| :width: 530px |
| :height: 400px |
| :align: left |
| |
| Reduced structure factor of memory shape Nickel-Titanium alloy. |
+----------------------------------------------------------------------+
:Parameters:
#. experimentalData (numpy.ndarray, string): Experimental data as
numpy.ndarray or string path to load data using numpy.loadtxt
method.
#. dataWeights (None, numpy.ndarray): Weights array of the same number
of points of experimentalData used in the constraint's standard
error computation. Therefore particular fitting emphasis can be
put on different data points that might be considered as more or less
important in order to get a reasonable and plausible modal.\n
If None is given, all data points are considered of the same
importance in the computation of the constraint's standard error.\n
If numpy.ndarray is given, all weights must be positive and all
zeros weighted data points won't contribute to the total
constraint's standard error. At least a single weight point is
required to be non-zeros and the weights array will be automatically
scaled upon setting such as the the sum of all the weights
is equal to the number of data points.
#. weighting (string): The elements weighting scheme. It must be any
atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius) defined
in pdbparser database. In case of xrays or neutrons experimental
weights, one can simply set weighting to 'xrays' or 'neutrons'
and the value will be automatically adjusted to respectively
'atomicNumber' and 'neutronCohb'. If attribute values are
missing in the pdbparser database, atomic weights must be
given in atomsWeight dictionary argument.
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
#. scaleFactor (number): A normalization scale factor used to normalize
the computed data to the experimental ones.
#. adjustScaleFactor (list, tuple): Used to adjust fit or guess
the best scale factor during stochastic engine runtime.
It must be a list of exactly three entries.\n
#. The frequency in number of generated moves of finding the best
scale factor. If 0 frequency is given, it means that the scale
factor is fixed.
#. The minimum allowed scale factor value.
#. The maximum allowed scale factor value.
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
#. limits (None, tuple, list): The distance limits to compute the
histograms. If None is given, the limits will be automatically
set the the min and max distance of the experimental data.
Otherwise, a tuple of exactly two items where the first is the
minimum distance or None and the second is the maximum distance
or None.
**NB**: If adjustScaleFactor first item (frequency) is 0, the scale factor
will remain untouched and the limits minimum and maximum won't be checked.
.. code-block:: python
# import fullrmc modules
from fullrmc.Engine import Engine
from fullrmc.Constraints.StructureFactorConstraints import StructureFactorConstraint
# create engine
ENGINE = Engine(path='my_engine.rmc')
# set pdb file
ENGINE.set_pdb('system.pdb')
# create and add constraint
SFC = StructureFactorConstraint(experimentalData="sq.dat", weighting="atomicNumber")
ENGINE.add_constraints(SFC)
"""
def __init__(self, experimentalData, dataWeights=None,
weighting="atomicNumber", atomsWeight=None,
rmin=None, rmax=None, dr=None,
scaleFactor=1.0, adjustScaleFactor=(0, 0.8, 1.2),
windowFunction=None, limits=None):
# initialize variables
self.__experimentalQValues = None
self.__experimentalSF = None
self.__rmin = None
self.__rmax = None
self.__dr = None
self.__minimumDistance = None
self.__maximumDistance = None
self.__bin = None
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
self.__Gr2SqMatrix = None
# initialize constraint
super(StructureFactorConstraint, self).__init__( experimentalData=experimentalData, dataWeights=dataWeights, scaleFactor=scaleFactor, adjustScaleFactor=adjustScaleFactor)
# set atomsWeight
self.set_atoms_weight(atomsWeight)
# set elements weighting
self.set_weighting(weighting)
self.__set_weighting_scheme()
# set window function
self.set_window_function(windowFunction)
# set r parameters
self.set_rmin(rmin)
self.set_rmax(rmax)
self.set_dr(dr)
# set frame data
FRAME_DATA = [d for d in self.FRAME_DATA]
FRAME_DATA.extend(['_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__atomsWeight',
'_StructureFactorConstraint__qmin',
'_StructureFactorConstraint__qmax',
'_StructureFactorConstraint__rmin',
'_StructureFactorConstraint__rmax',
'_StructureFactorConstraint__dr',
'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin',
'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__windowFunction',
'_elementsWeight',] )
RUNTIME_DATA = [d for d in self.RUNTIME_DATA]
RUNTIME_DATA.extend( [] )
object.__setattr__(self, 'FRAME_DATA', tuple(FRAME_DATA) )
object.__setattr__(self, 'RUNTIME_DATA', tuple(RUNTIME_DATA) )
def _codify_update__(self, name='constraint', addDependencies=True):
dependencies = []
code = []
if addDependencies:
code.extend(dependencies)
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name}.set_used({val})".format(name=name, val=self.used))
code.append("{name}.set_scale_factor({val})".format(name=name, val=self.scaleFactor))
code.append("{name}.set_adjust_scale_factor({val})".format(name=name, val=self.adjustScaleFactor))
code.append("{name}.set_data_weights(dw)".format(name=name))
code.append("{name}.set_atoms_weight({val})".format(name=name, val=self.atomsWeight))
code.append("{name}.set_window_function(wf)".format(name=name))
code.append("{name}.set_rmin({val})".format(name=name, val=self.rmin))
code.append("{name}.set_rmax({val})".format(name=name, val=self.rmax))
code.append("{name}.set_dr({val})".format(name=name, val=self.dr))
code.append("{name}.set_limits({val})".format(name=name, val=self.limits))
# return
return dependencies, '\n'.join(code)
def _codify__(self, engine, name='constraint', addDependencies=True):
assert isinstance(name, basestring), LOGGER.error("name must be a string")
assert re.match('[a-zA-Z_][a-zA-Z0-9_]*$', name) is not None, LOGGER.error("given name '%s' can't be used as a variable name"%name)
klass = self.__class__.__name__
dependencies = ['import numpy as np','from fullrmc.Constraints import StructureFactorConstraints']
code = []
if addDependencies:
code.extend(dependencies)
x = list(self.experimentalData[:,0])
y = list(self.experimentalData[:,1])
code.append("x = {x}".format(x=x))
code.append("y = {y}".format(y=y))
code.append("d = np.transpose([x,y]).astype(np.float32)")
dw = self.dataWeights
if dw is not None:
dw = list(dw)
code.append("dw = {dw}".format(dw=dw))
wf = self.windowFunction
if isinstance(wf, np.ndarray):
code.append("wf = np.array({wf})".format(wf=list(wf)))
else:
code.append("wf = {wf}".format(wf=wf))
code.append("{name} = {klass}s.{klass}\
(experimentalData=d, dataWeights=dw, weighting='{weighting}', atomsWeight={atomsWeight}, \
rmin={rmin}, rmax={rmax}, dr={dr}, scaleFactor={scaleFactor}, adjustScaleFactor={adjustScaleFactor}, \
shapeFuncParams=sfp, windowFunction=wf, limits={limits})".format(name=name, klass=klass,
weighting=self.weighting, atomsWeight=self.atomsWeight, rmin=self.rmin,
rmax=self.rmax, dr=self.dr, scaleFactor=self.scaleFactor,
adjustScaleFactor=self.adjustScaleFactor, limits=self.limits))
code.append("{engine}.add_constraints([{name}])".format(engine=engine, name=name))
# return
return dependencies, '\n'.join(code)
#def __getstate__(self):
# # make sure that __Gr2SqMatrix is not pickled but saved to the disk as None
# state = super(StructureFactorConstraint, self).__getstate__()
# state["_StructureFactorConstraint__Gr2SqMatrix"] = None
# return state
#
#def __setstate__(self, state):
# # make sure to regenerate G(r) to S(q) matrix at loading time
# self.__dict__.update( state )
# self.__set_Gr_2_Sq_matrix()
#
def __set_Gr_2_Sq_matrix(self):
if self.__experimentalQValues is None or self.__shellCenters is None:
self.__Gr2SqMatrix = None
else:
Qs = self.__experimentalQValues
Rs = self.__shellCenters
dr = self.__shellCenters[1]-self.__shellCenters[0]
qr = Rs.reshape((-1,1))*(np.ones((len(Rs),1), dtype=FLOAT_TYPE)*Qs)
sinqr = np.sin(qr)
sinqr_q = sinqr/Qs
self.__Gr2SqMatrix = dr*sinqr_q
def __set_weighting_scheme(self):
if self.engine is not None:
self.__elementsPairs = sorted(itertools.combinations_with_replacement(self.engine.elements,2))
#elementsWeight = dict([(el,float(get_element_property(el,self.__weighting))) for el in self.engine.elements])
#self._elementsWeight = dict([(el,self.__atomsWeight.get(el, float(get_element_property(el,self.__weighting)))) for el in self.engine.elements])
self._elementsWeight = get_real_elements_weight(elements=self.engine.elements, weightsDict=self.__atomsWeight, weighting=self.__weighting)
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight)
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
else:
self.__elementsPairs = None
self.__weightingScheme = None
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__elementsPairs' : self.__elementsPairs,
'_StructureFactorConstraint__weightingScheme': self.__weightingScheme})
def __set_histogram(self):
if self.__minimumDistance is None or self.__maximumDistance is None or self.__bin is None:
self.__shellCenters = None
self.__histogramSize = None
self.__shellVolumes = None
else:
# compute edges
if self.engine is not None and self.rmax is None:
minHalfBox = np.min( [np.linalg.norm(v)/2. for v in self.engine.basisVectors])
self.__edges = np.arange(self.__minimumDistance,minHalfBox, self.__bin).astype(FLOAT_TYPE)
else:
self.__edges = np.arange(self.__minimumDistance, self.__maximumDistance+self.__bin, self.__bin).astype(FLOAT_TYPE)
# adjust rmin and rmax
self.__minimumDistance = self.__edges[0]
self.__maximumDistance = self.__edges[-1]
# compute shellCenters
self.__shellCenters = (self.__edges[0:-1]+self.__edges[1:])/FLOAT_TYPE(2.)
# set histogram size
self.__histogramSize = INT_TYPE( len(self.__edges)-1 )
# set shell centers and volumes
self.__shellVolumes = FLOAT_TYPE(4.0/3.)*PI*((self.__edges[1:])**3 - self.__edges[0:-1]**3)
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__minimumDistance': self.__minimumDistance,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance,
'_StructureFactorConstraint__shellCenters' : self.__shellCenters,
'_StructureFactorConstraint__histogramSize' : self.__histogramSize,
'_StructureFactorConstraint__shellVolumes' : self.__shellVolumes})
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def _on_collector_reset(self):
pass
@property
def rmin(self):
""" Histogram minimum distance. """
return self.__rmin
@property
def rmax(self):
""" Histogram maximum distance. """
return self.__rmax
@property
def dr(self):
""" Histogram bin size."""
return self.__dr
@property
def bin(self):
""" Computed histogram distance bin size."""
return self.__bin
@property
def minimumDistance(self):
""" Computed histogram minimum distance. """
return self.__minimumDistance
@property
def maximumDistance(self):
""" Computed histogram maximum distance. """
return self.__maximumDistance
@property
def qmin(self):
""" Experimental data reciprocal distances minimum. """
return self.__qmin
@property
def qmax(self):
""" Experimental data reciprocal distances maximum. """
return self.__qmax
@property
def dq(self):
""" Experimental data reciprocal distances bin size. """
return self.__experimentalQValues[1]-self.__experimentalQValues[0]
@property
def experimentalQValues(self):
""" Experimental data used q values. """
return self.__experimentalQValues
@property
def histogramSize(self):
""" Histogram size"""
return self.__histogramSize
@property
def shellCenters(self):
""" Shells center array"""
return self.__shellCenters
@property
def shellVolumes(self):
""" Shells volume array"""
return self.__shellVolumes
@property
def experimentalSF(self):
""" Experimental Structure Factor or S(q)"""
return self.__experimentalSF
@property
def elementsPairs(self):
""" Elements pairs """
return self.__elementsPairs
@property
def atomsWeight(self):
"""Custom atoms weight"""
return self.__atomsWeight
@property
def weighting(self):
""" Elements weighting definition. """
return self.__weighting
@property
def weightingScheme(self):
""" Elements weighting scheme. """
return self.__weightingScheme
@property
def windowFunction(self):
""" Convolution window function. """
return self.__windowFunction
@property
def Gr2SqMatrix(self):
""" G(r) to S(q) transformation matrix."""
return self.__Gr2SqMatrix
@property
def _experimentalX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
@property
def _experimentalY(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalSF
@property
def _modelX(self):
"""For internal use only to interface
ExperimentalConstraint.get_constraints_properties"""
return self.__experimentalQValues
def listen(self, message, argument=None):
"""
Listens to any message sent from the Broadcaster.
:Parameters:
#. message (object): Any python object to send to constraint's
listen method.
#. argument (object): Any type of argument to pass to the
listeners.
"""
if message in ("engine set","update pdb","update molecules indexes","update elements indexes","update names indexes"):
self.__set_weighting_scheme()
# reset histogram
if self.engine is not None:
self.__set_histogram()
self.reset_constraint() # ADDED 2017-JAN-08
elif message in("update boundary conditions",):
self.reset_constraint()
def set_rmin(self, rmin):
"""
Set rmin value.
:parameters:
#. rmin (None, number): The minimum distance value to compute G(r)
histogram. If None is given, rmin is computed as
:math:`2 \\pi / Q_{max}`.
"""
if rmin is None:
minimumDistance = FLOAT_TYPE( 2.*PI/self.__qmax )
else:
assert is_number(rmin), LOGGER.error("rmin must be None or a number")
minimumDistance = FLOAT_TYPE(rmin)
if self.__maximumDistance is not None:
assert minimumDistance<self.__maximumDistance, LOGGER.error("rmin must be smaller than rmax %s"%self.__maximumDistance)
self.__rmin = rmin
self.__minimumDistance = minimumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmin': self.__rmin,
'_StructureFactorConstraint__minimumDistance': self.__minimumDistance})
# reset histogram
self.__set_histogram()
def set_rmax(self, rmax):
"""
Set rmax value.
:Parameters:
#. rmax (None, number): The maximum distance value to compute G(r)
histogram. If None is given, rmax is computed as
:math:`2 \\pi / dQ`.
"""
if rmax is None:
dq = self.__experimentalQValues[1]-self.__experimentalQValues[0]
maximumDistance = FLOAT_TYPE( 2.*PI/dq )
else:
assert is_number(rmax), LOGGER.error("rmax must be None or a number")
maximumDistance = FLOAT_TYPE(rmax)
if self.__minimumDistance is not None:
assert maximumDistance>self.__minimumDistance, LOGGER.error("rmax must be bigger than rmin %s"%self.__minimumDistance)
self.__rmax = rmax
self.__maximumDistance = maximumDistance
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__rmax': self.__rmax,
'_StructureFactorConstraint__maximumDistance': self.__maximumDistance})
# reset histogram
self.__set_histogram()
def set_dr(self, dr):
"""
Set dr value.
:Parameters:
#. dr (None, number): The distance bin value to compute G(r)
histogram. If None is given, bin is computed as
:math:`2 \\pi / (Q_{max}-Q_{min})`.
"""
if dr is None:
bin = 2.*PI/self.__qmax
rbin = round(bin,1)
if rbin>bin:
rbin -= 0.1
bin = FLOAT_TYPE( rbin )
else:
assert is_number(dr), LOGGER.error("dr must be None or a number")
bin = FLOAT_TYPE(dr)
self.__dr = dr
self.__bin = bin
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__dr': self.__dr,
'_StructureFactorConstraint__bin': self.__bin})
# reset histogram
self.__set_histogram()
def set_weighting(self, weighting):
"""
Set elements weighting. It must be a valid entry of pdbparser atom's
database.
:Parameters:
#. weighting (string): The elements weighting scheme. It must be
any atomic attribute (atomicNumber, neutronCohb, neutronIncohb,
neutronCohXs, neutronIncohXs, atomicWeight, covalentRadius)
defined in pdbparser database. In case of xrays or neutrons
experimental weights, one can simply set weighting to 'xrays'
or 'neutrons' and the value will be automatically adjusted to
respectively 'atomicNumber' and 'neutronCohb'. If attribute
values are missing in the pdbparser database, atomic weights
must be given in atomsWeight dictionary argument.
"""
if weighting.lower() in ["xrays","x-rays","xray","x-ray"]:
LOGGER.fixed("'%s' weighting is set to atomicNumber"%weighting)
weighting = "atomicNumber"
elif weighting.lower() in ["neutron","neutrons"]:
LOGGER.fixed("'%s' weighting is set to neutronCohb"%weighting)
weighting = "neutronCohb"
assert is_element_property(weighting),LOGGER.error( "weighting is not a valid pdbparser atoms database entry")
assert weighting != "atomicFormFactor", LOGGER.error("atomicFormFactor weighting is not allowed")
self.__weighting = weighting
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__weighting': self.__weighting})
def set_atoms_weight(self, atomsWeight):
"""
Custom set atoms weight. This is the way to setting a atoms weights
different than the given weighting scheme.
:Parameters:
#. atomsWeight (None, dict): Atoms weight dictionary where keys are
atoms element and values are custom weights. If None is given
or partially given, missing elements weighting will be fully set
given weighting scheme.
"""
if atomsWeight is None:
AW = {}
else:
assert isinstance(atomsWeight, dict),LOGGER.error("atomsWeight must be None or a dictionary")
AW = {}
for k in atomsWeight:
assert isinstance(k, basestring),LOGGER.error("atomsWeight keys must be strings")
try:
val = float(atomsWeight[k])
except:
raise LOGGER.error( "atomsWeight values must be numerical")
AW[k]=val
# set atomsWeight
self.__atomsWeight = AW
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__atomsWeight': self.__atomsWeight})
def set_window_function(self, windowFunction):
"""
Set convolution window function.
:Parameters:
#. windowFunction (None, numpy.ndarray): The window function to
convolute with the computed pair distribution function of the
system prior to comparing it with the experimental data. In
general, the experimental pair distribution function G(r) shows
artificial wrinkles, among others the main reason is because
G(r) is computed by applying a sine Fourier transform to the
experimental structure factor S(q). Therefore window function is
used to best imitate the numerical artefacts in the experimental
data.
"""
if windowFunction is not None:
assert isinstance(windowFunction, np.ndarray), LOGGER.error("windowFunction must be a numpy.ndarray")
assert windowFunction.dtype.type is FLOAT_TYPE, LOGGER.error("windowFunction type must be %s"%FLOAT_TYPE)
assert len(windowFunction.shape) == 1, LOGGER.error("windowFunction must be of dimension 1")
assert len(windowFunction) <= self.experimentalData.shape[0], LOGGER.error("windowFunction length must be smaller than experimental data")
# normalize window function
windowFunction /= np.sum(windowFunction)
# check window size
# set windowFunction
self.__windowFunction = windowFunction
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__windowFunction': self.__windowFunction})
def set_experimental_data(self, experimentalData):
"""
Set constraint's experimental data.
:Parameters:
#. experimentalData (numpy.ndarray, string): The experimental
data as numpy.ndarray or string path to load data using
numpy.loadtxt function.
"""
# get experimental data
super(StructureFactorConstraint, self).set_experimental_data(experimentalData=experimentalData)
# set limits
self.set_limits(self.limits)
def set_limits(self, limits):
"""
Set the reciprocal distance limits (qmin, qmax).
:Parameters:
#. limits (None, tuple, list): Distance limits to bound
experimental data and compute histograms.
If None is given, the limits will be automatically set to
min and max reciprocal distance recorded in experimental data.
If given, a tuple of minimum reciprocal distance (qmin) or None
and maximum reciprocal distance (qmax) or None should be given.
"""
self._ExperimentalConstraint__set_limits(limits)
# set qvalues
self.__experimentalQValues = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,0].astype(FLOAT_TYPE)
self.__experimentalSF = self.experimentalData[self.limitsIndexStart:self.limitsIndexEnd+1,1].astype(FLOAT_TYPE)
# set qmin and qmax
self.__qmin = self.__experimentalQValues[0]
self.__qmax = self.__experimentalQValues[-1]
assert self.__qmin>0, LOGGER.error("qmin must be bigger than 0. Experimental null q values are ambigous. Try setting limits.")
# dump to repository
self._dump_to_repository({'_StructureFactorConstraint__experimentalQValues': self.__experimentalQValues,
'_StructureFactorConstraint__experimentalSF' : self.__experimentalSF,
'_StructureFactorConstraint__qmin' : self.__qmin,
'_StructureFactorConstraint__qmax' : self.__qmax})
# set used dataWeights
self._set_used_data_weights(limitsIndexStart=self.limitsIndexStart, limitsIndexEnd=self.limitsIndexEnd)
# reset constraint
self.reset_constraint()
# reset sq matrix
self.__set_Gr_2_Sq_matrix()
def update_standard_error(self):
""" Compute and set constraint's standardError."""
# set standardError
totalSQ = self.get_constraint_value()["total_no_window"]
self.set_standard_error(self.compute_standard_error(modelData = totalSQ))
def check_experimental_data(self, experimentalData):
"""
Check whether experimental data is correct.
:Parameters:
#. experimentalData (object): The experimental data to check.
:Returns:
#. result (boolean): Whether it is correct or not.
#. message (str): Checking message that explains whats's wrong
with the given data
"""
if not isinstance(experimentalData, np.ndarray):
return False, "experimentalData must be a numpy.ndarray"
if experimentalData.dtype.type is not FLOAT_TYPE:
return False, "experimentalData type must be %s"%FLOAT_TYPE
if len(experimentalData.shape) !=2:
return False, "experimentalData must be of dimension 2"
if experimentalData.shape[1] !=2:
return False, "experimentalData must have only 2 columns"
# check distances order
inOrder = (np.array(sorted(experimentalData[:,0]), dtype=FLOAT_TYPE)-experimentalData[:,0])<=PRECISION
if not np.all(inOrder):
return False, "experimentalData distances are not sorted in order"
if experimentalData[0][0]<0:
return False, "experimentalData distances min value is found negative"
# data format is correct
return True, ""
def compute_standard_error(self, modelData):
"""
Compute the standard error (StdErr) as the squared deviations
between model computed data and the experimental ones.
.. math::
StdErr = \\sum \\limits_{i}^{N} W_{i}(Y(X_{i})-F(X_{i}))^{2}
Where:\n
:math:`N` is the total number of experimental data points. \n
:math:`W_{i}` is the data point weight. It becomes equivalent to 1 when dataWeights is set to None. \n
:math:`Y(X_{i})` is the experimental data point :math:`X_{i}`. \n
:math:`F(X_{i})` is the computed from the model data :math:`X_{i}`. \n
:Parameters:
#. modelData (numpy.ndarray): The data to compare with the
experimental one and compute the squared deviation.
:Returns:
#. standardError (number): The calculated constraint's
standardError.
"""
# compute difference
diff = self.__experimentalSF-modelData
# return standard error
if self._usedDataWeights is None:
return np.add.reduce((diff)**2)
else:
return np.add.reduce(self._usedDataWeights*((diff)**2))
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.__Gr2SqMatrix, axis=0)+1
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*(Sq-1) + 1
return Sq
def __get_total_Sq(self, data, rho0):
"""This method is created just to speed up the computation of
the total Sq upon fitting."""
Gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["inter"][idi,idj,:]
Gr += wij*nij/Dij
else:
Nij = ni*nj
Dij = Nij/self.engine.volume
nij = data["intra"][idi,idj,:]+data["intra"][idj,idi,:] + data["inter"][idi,idj,:]+data["inter"][idj,idi,:]
Gr += wij*nij/Dij
# Devide by shells volume
Gr /= self.shellVolumes
# compute total G(r)
#rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0)*(Gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# Multiply by scale factor
self._fittedScaleFactor = self.get_adjusted_scale_factor(self.__experimentalSF, Sq, self._usedDataWeights)
# apply scale factor
Sq = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
Sq = self._apply_multiframe_prior(Sq)
# convolve total with window function
if self.__windowFunction is not None:
Sq = np.convolve(Sq, self.__windowFunction, 'same')
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
"""Overload to reduce S(q) prior to fitting scale factor.
S(q) -> 1 at high q and this will create a wrong scale factor.
Overloading can be avoided but it's better to for performance reasons
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData-1, modelData-1, dataWeights)
return SF
def _get_constraint_value(self, data, applyMultiframePrior=True):
# http://erice2011.docking.org/upload/Other/Billinge_PDF/03-ReadingMaterial/BillingePDF2011.pdf page 6
#import time
#startTime = time.clock()
output = {}
for pair in self.__elementsPairs:
output["sf_intra_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_inter_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
output["sf_total_%s-%s" % pair] = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
gr = np.zeros(self.__histogramSize, dtype=FLOAT_TYPE)
for pair in self.__elementsPairs:
# get weighting scheme
wij = self.__weightingScheme.get(pair[0]+"-"+pair[1], None)
if wij is None:
wij = self.__weightingScheme[pair[1]+"-"+pair[0]]
# get number of atoms per element
ni = self.engine.numberOfAtomsPerElement[pair[0]]
nj = self.engine.numberOfAtomsPerElement[pair[1]]
# get index of element
idi = self.engine.elements.index(pair[0])
idj = self.engine.elements.index(pair[1])
# get Nij
if idi == idj:
Nij = ni*(ni-1)/2.0
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:]
else:
Nij = ni*nj
output["sf_intra_%s-%s" % pair] += data["intra"][idi,idj,:] + data["intra"][idj,idi,:]
output["sf_inter_%s-%s" % pair] += data["inter"][idi,idj,:] + data["inter"][idj,idi,:]
# compute g(r)
nij = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
dij = nij/self.__shellVolumes
Dij = Nij/self.engine.volume
gr += wij*dij/Dij
# calculate intensityFactor
intensityFactor = (self.engine.volume*wij)/(Nij*self.__shellVolumes)
# divide by factor
output["sf_intra_%s-%s" % pair] *= intensityFactor
output["sf_inter_%s-%s" % pair] *= intensityFactor
output["sf_total_%s-%s" % pair] = output["sf_intra_%s-%s" % pair] + output["sf_inter_%s-%s" % pair]
# Compute S(q) from G(r)
output["sf_intra_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_intra_%s-%s" % pair])
output["sf_inter_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_inter_%s-%s" % pair])
output["sf_total_%s-%s" % pair] = self._get_Sq_from_Gr(output["sf_total_%s-%s" % pair])
# compute total G(r)
rho0 = (self.engine.numberOfAtoms/self.engine.volume).astype(FLOAT_TYPE)
Gr = (FLOAT_TYPE(4.)*PI*self.__shellCenters*rho0) * (gr-1)
# Compute S(q) from G(r)
Sq = self._get_Sq_from_Gr(Gr)
# multiply by scale factor
output["total_no_window"] = self._apply_scale_factor(Sq, self._fittedScaleFactor)
# apply multiframe prior and weight
if applyMultiframePrior:
output["total_no_window"] = self._apply_multiframe_prior(output["total_no_window"])
# convolve total with window function
if self.__windowFunction is not None:
output["total"] = np.convolve(output["total_no_window"], self.__windowFunction, 'same').astype(FLOAT_TYPE)
else:
output["total"] = output["total_no_window"]
return output
def get_constraint_value(self, applyMultiframePrior=True):
"""
Compute all partial Structure Factor (SQs).
:Parameters:
#. applyMultiframePrior (boolean): Whether to apply subframe weight
and prior to the total. This will only have an effect when used
frame is a subframe and in case subframe weight and prior is
defined.
:Returns:
#. SQs (dictionary): The SQs dictionnary, where keys are the
element wise intra and inter molecular SQs and values are
the computed SQs.
"""
if self.data is None:
LOGGER.warn("data must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.data, applyMultiframePrior=applyMultiframePrior)
def get_constraint_original_value(self):
"""
Compute all partial Pair Distribution Functions (PDFs).
:Returns:
#. PDFs (dictionary): The PDFs dictionnary, where keys are the
element wise intra and inter molecular PDFs and values are the
computed PDFs.
"""
if self.originalData is None:
LOGGER.warn("originalData must be computed first using 'compute_data' method.")
return {}
return self._get_constraint_value(self.originalData)
@reset_if_collected_out_of_date
def compute_data(self, update=True):
""" Compute constraint's data.
:Parameters:
#. update (boolean): whether to update constraint data and
standard error with new computation. If data is computed and
updated by another thread or process while the stochastic
engine is running, this might lead to a state alteration of
the constraint which will lead to a no additional accepted
moves in the run
:Returns:
#. data (dict): constraint data dictionary
#. standardError (float): constraint standard error
"""
intra,inter = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# create data and compute standard error
data = {"intra":intra, "inter":inter}
totalSQ = self.__get_total_Sq(data, rho0=self.engine.numberDensity)
stdError = self.compute_standard_error(modelData = totalSQ)
# update
if update:
self.set_data(data)
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
self.set_standard_error(stdError)
# set original data
if self.originalData is None:
self._set_original_data(self.data)
# return
return data, stdError
def compute_before_move(self, realIndexes, relativeIndexes):
"""
Compute constraint before move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
"""
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
self.set_active_atoms_data_before_move( {"intra":intraM-intraF, "inter":interM-interF} )
self.set_active_atoms_data_after_move(None)
def compute_after_move(self, realIndexes, relativeIndexes, movedBoxCoordinates):
"""
Compute constraint after move is executed
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Group atoms relative index
the move will be applied to.
#. movedBoxCoordinates (numpy.ndarray): The moved atoms new coordinates.
"""
# change coordinates temporarily
boxData = np.array(self.engine.boxCoordinates[relativeIndexes], dtype=FLOAT_TYPE)
self.engine.boxCoordinates[relativeIndexes] = movedBoxCoordinates
# calculate pair distribution function
intraM,interM = multiple_pairs_histograms_coords( indexes = relativeIndexes,
boxCoords = self.engine.boxCoordinates,
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex,
elementIndex = self.engine.elementsIndex,
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
allAtoms = True,
ncores = self.engine._runtime_ncores )
intraF,interF = full_pairs_histograms_coords( boxCoords = self.engine.boxCoordinates[relativeIndexes],
basis = self.engine.basisVectors,
isPBC = self.engine.isPBC,
moleculeIndex = self.engine.moleculesIndex[relativeIndexes],
elementIndex = self.engine.elementsIndex[relativeIndexes],
numberOfElements = self.engine.numberOfElements,
minDistance = self.__minimumDistance,
maxDistance = self.__maximumDistance,
histSize = self.__histogramSize,
bin = self.__bin,
ncores = self.engine._runtime_ncores )
# set active atoms data
self.set_active_atoms_data_after_move( {"intra":intraM-intraF, "inter":interM-interF} )
# reset coordinates
self.engine.boxCoordinates[relativeIndexes] = boxData
# compute standardError after move
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
totalSQ = self.__get_total_Sq({"intra":dataIntra, "inter":dataInter}, rho0=self.engine.numberDensity)
self.set_after_move_standard_error( self.compute_standard_error(modelData = totalSQ) )
# increment tried
self.increment_tried()
def accept_move(self, realIndexes, relativeIndexes):
"""
Accept move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]+self.activeAtomsDataAfterMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]+self.activeAtomsDataAfterMove["inter"]
# change permanently _data
self.set_data( {"intra":dataIntra, "inter":dataInter} )
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_standard_error( self.afterMoveStandardError )
self.set_after_move_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
# increment accepted
self.increment_accepted()
def reject_move(self, realIndexes, relativeIndexes):
"""
Reject move
:Parameters:
#. realIndexes (numpy.ndarray): Not used here.
#. relativeIndexes (numpy.ndarray): Not used here.
"""
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
self.set_active_atoms_data_after_move(None)
# update standardError
self.set_after_move_standard_error( None )
def compute_as_if_amputated(self, realIndex, relativeIndex):
"""
Compute and return constraint's data and standard error as if
given atom is amputated.
:Parameters:
#. realIndex (numpy.ndarray): Atom's index as a numpy array
of a single element.
#. relativeIndex (numpy.ndarray): Atom's relative index as a
numpy array of a single element.
"""
# compute data
self.compute_before_move(realIndexes=realIndex, relativeIndexes=relativeIndex)
dataIntra = self.data["intra"]-self.activeAtomsDataBeforeMove["intra"]
dataInter = self.data["inter"]-self.activeAtomsDataBeforeMove["inter"]
data = {"intra":dataIntra, "inter":dataInter}
# temporarily adjust self.__weightingScheme
weightingScheme = self.__weightingScheme
relativeIndex = relativeIndex[0]
selectedElement = self.engine.allElements[relativeIndex]
self.engine.numberOfAtomsPerElement[selectedElement] -= 1
self.__weightingScheme = get_normalized_weighting(numbers=self.engine.numberOfAtomsPerElement, weights=self._elementsWeight )
for k in self.__weightingScheme:
self.__weightingScheme[k] = FLOAT_TYPE(self.__weightingScheme[k])
## END OF ADDED 08 FEB 2017
# compute standard error
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
SF = self.adjustScaleFactorFrequency
self._set_adjust_scale_factor_frequency(0)
rho0 = ((self.engine.numberOfAtoms-1)/self.engine.volume).astype(FLOAT_TYPE)
totalSQ = self.__get_total_Sq(data, rho0=rho0)
standardError = self.compute_standard_error(modelData = totalSQ)
if not self.engine._RT_moveGenerator.allowFittingScaleFactor:
self._set_adjust_scale_factor_frequency(SF)
# reset activeAtoms data
self.set_active_atoms_data_before_move(None)
# set amputation
self.set_amputation_data( {'data':data, 'weightingScheme':self.__weightingScheme} )
# compute standard error
self.set_amputation_standard_error( standardError )
# reset weightingScheme and number of atoms per element
self.__weightingScheme = weightingScheme
self.engine.numberOfAtomsPerElement[selectedElement] += 1
def accept_amputation(self, realIndex, relativeIndex):
"""
Accept amputated atom and sets constraints data and standard error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
#self.set_data( self.amputationData ) ## COMMENTED 08 FEB 2017
self.set_data( self.amputationData['data'] )
self.__weightingScheme = self.amputationData['weightingScheme']
self.set_standard_error( self.amputationStandardError )
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
# set new scale factor
self._set_fitted_scale_factor_value(self._fittedScaleFactor)
def reject_amputation(self, realIndex, relativeIndex):
"""
Reject amputated atom and set constraint's data and standard
error accordingly.
:Parameters:
#. realIndex (numpy.ndarray): Not used here.
#. relativeIndex (numpy.ndarray): Not used here.
"""
self.set_amputation_data( None )
self.set_amputation_standard_error( None )
def _on_collector_collect_atom(self, realIndex):
pass
def _on_collector_release_atom(self, realIndex):
pass
def _constraint_copy_needs_lut(self):
return {'_StructureFactorConstraint__elementsPairs' :'_StructureFactorConstraint__elementsPairs',
'_StructureFactorConstraint__histogramSize' :'_StructureFactorConstraint__histogramSize',
'_StructureFactorConstraint__weightingScheme' :'_StructureFactorConstraint__weightingScheme',
'_StructureFactorConstraint__shellVolumes' :'_StructureFactorConstraint__shellVolumes',
'_StructureFactorConstraint__shellCenters' :'_StructureFactorConstraint__shellCenters',
'_StructureFactorConstraint__windowFunction' :'_StructureFactorConstraint__windowFunction',
'_StructureFactorConstraint__experimentalQValues' :'_StructureFactorConstraint__experimentalQValues',
'_StructureFactorConstraint__experimentalSF' :'_StructureFactorConstraint__experimentalSF',
'_StructureFactorConstraint__Gr2SqMatrix' :'_StructureFactorConstraint__Gr2SqMatrix',
'_StructureFactorConstraint__minimumDistance' :'_StructureFactorConstraint__minimumDistance',
'_StructureFactorConstraint__maximumDistance' :'_StructureFactorConstraint__maximumDistance',
'_StructureFactorConstraint__bin' :'_StructureFactorConstraint__bin',
'_ExperimentalConstraint__scaleFactor' :'_ExperimentalConstraint__scaleFactor',
'_ExperimentalConstraint__dataWeights' :'_ExperimentalConstraint__dataWeights',
'_ExperimentalConstraint__multiframePrior' :'_ExperimentalConstraint__multiframePrior',
'_ExperimentalConstraint__multiframeWeight' :'_ExperimentalConstraint__multiframeWeight',
'_ExperimentalConstraint__limits' :'_ExperimentalConstraint__limits',
'_ExperimentalConstraint__limitsIndexStart' :'_ExperimentalConstraint__limitsIndexStart',
'_ExperimentalConstraint__limitsIndexEnd' :'_ExperimentalConstraint__limitsIndexEnd',
'_Constraint__used' :'_Constraint__used',
'_Constraint__data' :'_Constraint__data',
'_Constraint__state' :'_Constraint__state',
'_Constraint__standardError' :'_Constraint__standardError',
'_fittedScaleFactor' :'_fittedScaleFactor',
'_usedDataWeights' :'_usedDataWeights',
'_Engine__state' :'_Engine__state',
'_Engine__boxCoordinates' :'_Engine__boxCoordinates',
'_Engine__basisVectors' :'_Engine__basisVectors',
'_Engine__isPBC' :'_Engine__isPBC',
'_Engine__moleculesIndex' :'_Engine__moleculesIndex',
'_Engine__elementsIndex' :'_Engine__elementsIndex',
'_Engine__numberOfAtomsPerElement' :'_Engine__numberOfAtomsPerElement',
'_Engine__elements' :'_Engine__elements',
'_Engine__numberDensity' :'_Engine__numberDensity',
'_Engine__volume' :'_Engine__volume',
'_Engine__realCoordinates' :'_Engine__realCoordinates',
'_atomsCollector' :'_atomsCollector',
('engine','_atomsCollector') :'_atomsCollector',
}
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
class ReducedStructureFactorConstraint(StructureFactorConstraint):
"""
The Reduced Structure Factor that we will also note S(Q)
is exactly the same quantity as the Structure Factor but with
the slight difference that it is normalized to 0 rather than 1
and therefore :math:`<S(Q)>=0`.
The computation of S(Q) is done through a Sine inverse Fourier transform
of the computed pair distribution function noted as G(r).
.. math::
S(Q) = \\frac{1}{Q} \\int_{0}^{\\infty} G(r) sin(Qr) dr
The only reason why the Reduced Structure Factor is implemented, is because
many experimental data are treated in this form. And it is just convenient
not to manipulate the experimental data every time.
"""
def _get_Sq_from_Gr(self, Gr):
return np.sum(Gr.reshape((-1,1))*self.Gr2SqMatrix, axis=0)
def _apply_scale_factor(self, Sq, scaleFactor):
if scaleFactor != 1:
Sq = scaleFactor*Sq
return Sq
def get_adjusted_scale_factor(self, experimentalData, modelData, dataWeights):
""" dummy overload that does exactly the same thing
"""
SF = self.scaleFactor
# check to update scaleFactor
if self.adjustScaleFactorFrequency:
if not self.engine.accepted%self.adjustScaleFactorFrequency:
SF = self.fit_scale_factor(experimentalData, modelData, dataWeights)
return SF
def plot(self, xlabelParams={'xlabel':'$Q(\\AA^{-1})$', 'size':10},
ylabelParams={'ylabel':'$S(Q)-1$', 'size':10},
**kwargs):
"""
Alias to ExperimentalConstraint.plot with additional parameters
:Additional/Adjusted Parameters:
#. xlabelParams (None, dict): modified matplotlib.axes.Axes.set_xlabel
parameters.
#. ylabelParams (None, dict): modified matplotlib.axes.Axes.set_ylabel
parameters.
"""
return super(StructureFactorConstraint, self).plot(xlabelParams= xlabelParams,
ylabelParams= ylabelParams,
**kwargs)
| agpl-3.0 |
asdf123101/HDPG1D | hdpg1d/adaptation.py | 1 | 8070 | import numpy as np
from numpy import concatenate as cat
from scipy.sparse import csr_matrix
import scipy.sparse.linalg as spla
from copy import copy
import matplotlib.pyplot as plt
import warnings
from .preprocess import shape, discretization, boundaryCondition
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# supress the deprecation warning
warnings.filterwarnings("ignore", ".*GUI is implemented.*")
class hdpg1d(object):
"""
1D HDG solver
"""
def __init__(self, coeff):
self.numEle = coeff.numEle
self.numBasisFuncs = coeff.pOrder + 1
self.coeff = coeff
self.mesh = np.linspace(0, 1, self.numEle + 1)
self.enrichOrder = 1
self.primalSoln = None
self.adjointSoln = None
self.estErrorList = [[], []]
self.trueErrorList = [[], []]
def separateSoln(self, soln):
"""Separate gradState (q and u), stateFace from the given soln"""
gradState, stateFace = np.split(
soln, [len(soln) - self.numEle + 1])
return gradState, stateFace
def plotState(self, counter):
"""Plot solution u with smooth higher oredr quadrature"""
stateSmooth = np.array([])
stateNode = np.zeros(self.numEle + 1)
xSmooth = np.array([])
gradState, _ = self.separateSoln(self.primalSoln)
halfLenState = int(len(gradState) / 2)
state = gradState[halfLenState:2 * halfLenState]
# quadrature rule
gorder = 10 * self.numBasisFuncs
xi, wi = np.polynomial.legendre.leggauss(gorder)
shp, shpx = shape(xi, self.numBasisFuncs)
for j in range(1, self.numEle + 1):
xSmooth = np.hstack((xSmooth, (self.mesh[(j - 1)] + self.mesh[j]) / 2 + (
self.mesh[j] - self.mesh[j - 1]) / 2 * xi))
stateSmooth = np.hstack(
(stateSmooth, shp.T.dot(state[(j - 1) * self.numBasisFuncs:j * self.numBasisFuncs])))
stateNode[j - 1] = state[(j - 1) * self.numBasisFuncs]
stateNode[-1] = state[-1]
plt.figure(1)
plt.plot(xSmooth, stateSmooth, '-', color='C3')
plt.plot(self.mesh, stateNode, 'C3.')
plt.xlabel('$x$', fontsize=17)
plt.ylabel('$u$', fontsize=17)
# plt.axis([-0.05, 1.05, 0, 1.3])
plt.grid()
plt.pause(5e-1)
def meshAdapt(self, index):
"""Given the index list, adapt the mesh"""
inValue = np.zeros(len(index))
for i in np.arange(len(index)):
inValue[i] = (self.mesh[index[i]] +
self.mesh[index[i] - 1]) / 2
self.mesh = np.sort(np.insert(self.mesh, 0, inValue))
def solvePrimal(self):
"""Solve the primal problem"""
if 'matLocal' in locals():
# if matLocal exists,
# only change the mesh instead of initializing again
matLocal.mesh = self.mesh
else:
matLocal = discretization(self.coeff, self.mesh)
matGroup = matLocal.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# solve by exploiting the local global separation
K = -cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]]))
.dot(cat((C, E)))) + H
sK = csr_matrix(K)
F_hat = np.array([L]).T - cat((C.T, G), axis=1)\
.dot(np.linalg.inv(np.bmat([[A, -B], [B.T, D]])))\
.dot(np.array([cat((R, F))]).T)
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sK, vec)
return matVec
n = len(F_hat)
preconditioner = spla.LinearOperator((n, n), invRHS)
stateFace = spla.gmres(sK, F_hat, M=preconditioner)[0]
# stateFace = np.linalg.solve(K, F_hat)
gradState = np.linalg.inv(np.asarray(np.bmat([[A, -B], [B.T, D]]))).dot(
cat((R, F)) - cat((C, E)).dot(stateFace))
self.primalSoln = cat((gradState, stateFace))
def solveAdjoint(self):
"""Solve the adjoint problem"""
# solve in the enriched space
_coeff = copy(self.coeff)
_coeff.pOrder = _coeff.pOrder + 1
if 'matAdjoint' in locals():
matAdjoint.mesh = self.mesh
else:
matAdjoint = discretization(_coeff, self.mesh)
matGroup = matAdjoint.matGroup()
A, B, _, C, D, E, F, G, H, L, R = matGroup
# add adjoint LHS conditions
F = np.zeros(len(F))
R[-1] = -boundaryCondition('adjoint')[1]
# assemble global matrix LHS
LHS = np.bmat([[A, -B, C],
[B.T, D, E],
[C.T, G, H]])
sLHS = csr_matrix(LHS)
RHS = cat((R, F, L))
# solve in one shoot using GMRES
def invRHS(vec):
"""Construct preconditioner"""
matVec = spla.spsolve(sLHS, vec)
return matVec
n = len(RHS)
preconditioner = spla.LinearOperator((n, n), invRHS)
soln = spla.gmres(sLHS, RHS, M=preconditioner)[0]
# soln = np.linalg.solve(LHS.T, RHS)
self.adjointSoln = soln
def DWResidual(self):
if 'matResidual' in locals():
matResidual.mesh = self.mesh
else:
matResidual = discretization(
self.coeff, self.mesh, self.enrichOrder)
matGroup = matResidual.matGroup()
A, B, BonQ, C, D, E, F, G, H, L, R = matGroup
LHS = np.bmat([[A, -B, C],
[BonQ, D, E]])
RHS = cat((R, F))
residual = np.zeros(self.numEle)
numEnrich = self.numBasisFuncs + self.enrichOrder
adjointGradState, adjointStateFace = self.separateSoln(
self.adjointSoln)
for i in np.arange(self.numEle):
primalResidual = (LHS.dot(self.primalSoln) - RHS).A1
uLength = self.numEle * numEnrich
stepLength = i * numEnrich
uDWR = primalResidual[stepLength:stepLength + numEnrich].dot(
(1 - adjointGradState)[stepLength:stepLength + numEnrich])
qDWR = primalResidual[uLength + stepLength:uLength +
stepLength + numEnrich]\
.dot((1 - adjointGradState)[uLength + stepLength:uLength +
stepLength + numEnrich])
residual[i] = uDWR + qDWR
# sort residual index
residualIndex = np.argsort(np.abs(residual))
# select top \theta% elements with the largest error
theta = 0.15
refineIndex = residualIndex[
int(self.numEle * (1 - theta)):len(residual)] + 1
return np.abs(np.sum(residual)), refineIndex
def adaptive(self):
TOL = self.coeff.TOL
estError = 10
nodeCount = 0
maxCount = self.coeff.MAXIT
while estError > TOL and nodeCount < maxCount:
# solve
self.solvePrimal()
self.solveAdjoint()
# plot the solution at certain counter
if nodeCount in [0, 4, 9, 19, maxCount]:
plt.clf()
self.plotState(nodeCount)
# record error
self.trueErrorList[0].append(self.numEle)
self.trueErrorList[1].append(
self.primalSoln[self.numEle * self.numBasisFuncs - 1])
estError, index = self.DWResidual()
self.estErrorList[0].append(self.numEle)
self.estErrorList[1].append(estError)
# adapt
index = index.tolist()
self.meshAdapt(index)
self.numEle = self.numEle + len(index)
nodeCount += 1
print("Iteration {}. Estimated target function error {:.3e}."
.format(nodeCount, estError))
if nodeCount == maxCount:
print("Max iteration number is reached "
"while the convergence criterion is not satisfied.\n"
"Check the problem statement or "
"raise the max iteration number, then try again.\n")
| mit |
apdjustino/DRCOG_Urbansim | src/opus_gui/results_manager/run/indicator_framework/visualizer/visualizers/matplotlib_lorenzcurve.py | 1 | 10890 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import os, re, sys, time, traceback
from copy import copy
from opus_gui.results_manager.run.indicator_framework.visualizer.visualizers.abstract_visualization import Visualization
from opus_core.logger import logger
from numpy import array, arange
from numpy import ones, zeros, hstack, vstack
from numpy import trapz, trim_zeros
from pylab import subplot, plot, show
from pylab import xlabel, ylabel, title, text
from pylab import MultipleLocator, FormatStrFormatter
from pylab import savefig, clf, close
class LorenzCurve(Visualization):
def __init__(self, source_data, dataset_name,
attribute = None,
years = None, operation = None, name = None, scale = None,
storage_location = None):
Visualizer.__init__(self, source_data, dataset_name, [attribute],
years, operation, name,
storage_location)
self._values = None
self._ginicoeff = None
def is_single_year_indicator_image_type(self):
return True
def get_file_extension(self):
return 'png'
def get_visualization_shorthand(self):
return 'lorenzcurve'
def get_additional_metadata(self):
return {}
def _create_indicator(self, year):
"""Create a Lorenz Curve for the given indicator,
save it to the cache directory's 'indicators' sub-directory.
"""
attribute_short = self.get_attribute_alias(attribute = self.attributes[0],
year = year)
title = attribute_short + ' ' + str(year)
if self.run_description is not None:
title += '\n' + self.run_description
# Do calculation
# Make fresh copy with dtype float64 to avoid overflows
self._values = array(self._get_indicator(year, wrap = False).astype('float64'))
self._compute_lorenz()
file_path = self.get_file_path(year = year)
self._plot(attribute_short, file_path );
return file_path
def _compute_lorenz(self ):
''' Do the lorenz curve computation and save the result in the corresponding
class variables
'''
self._values.sort()
#remove 0 values from array
self._values = trim_zeros(self._values,'f')
num_values = self._values.size
F = arange(1, num_values + 1, 1, "float64")/num_values
L = self._values.cumsum(dtype="float64")/sum(self._values)
# Add (0, 0) as the first point for completeness (e.g. plotting)
origin = array([[0], [0]])
self._values = vstack((F, L))
self._values = hstack((origin, self._values))
# This is the simple form of (0.5 - integral) / 0.5
self._ginicoeff = 1 - 2 * trapz(self._values[1], self._values[0])
def _plot(self, attribute_name, file_path=None ):
clf() # Clear existing plot
a = self._values[0] * 100
b = self._values[1] * 100
ax = subplot(111)
plot(a, a, 'k--', a, b, 'r')
ax.set_ylim([0,100])
ax.grid(color='0.5', linestyle=':', linewidth=0.5)
xlabel('population')
ylabel(attribute_name)
title('Lorenz curve')
font = {'fontname' : 'Courier',
'color' : 'r',
'fontweight' : 'bold',
'fontsize' : 11
}
box = { 'pad' : 6,
'facecolor' : 'w',
'linewidth' : 1,
'fill' : True
}
text(5, 90, 'Gini coefficient: %(gini)f' % {'gini' : self._ginicoeff}, font, color='k', bbox=box )
majorLocator = MultipleLocator(20)
majorFormatter = FormatStrFormatter('%d %%')
minorLocator = MultipleLocator(5)
ax.xaxis.set_major_locator( majorLocator )
ax.xaxis.set_major_formatter( majorFormatter)
ax.xaxis.set_minor_locator( minorLocator )
ax.yaxis.set_major_locator( majorLocator )
ax.yaxis.set_major_formatter( majorFormatter)
ax.yaxis.set_minor_locator( minorLocator )
if file_path:
savefig(file_path)
close()
else:
show()
import os
from opus_core.tests import opus_unittest
from numpy import allclose
from opus_gui.results_manager.run.indicator_framework.test_classes.abstract_indicator_test import AbstractIndicatorTest
class Tests(AbstractIndicatorTest):
def skip_test_create_indicator(self):
indicator_path = os.path.join(self.temp_cache_path, 'indicators')
self.assert_(not os.path.exists(indicator_path))
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
lorenzcurve.create(False)
self.assert_(os.path.exists(indicator_path))
self.assert_(os.path.exists(os.path.join(indicator_path, 'test__lorenzcurve__attribute__1980.png')))
def skip_test_perfect_equality(self):
"""Perfect equality is when everybody has the same amount of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = ones(100)
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = vstack((arange(0, 101) / 100., arange(0, 101) / 100.))
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_perfect_inequality(self):
"""Perfect inequality is when one person has all of something"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = zeros(100)
incomes[0] = 42
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
#We strip all the zero values, so the result consists of only two values
wanted_result = [[0.,1.],[0.,1.]]
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_lorenz(self):
"""Test case for less than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result = array(
[[ 0, 1/6., 2/6., 3/6., 4/6., 5/6., 6/6. ],
[ 0, 1/16., 2/16., 4/16., 7/16., 11/16., 16/16. ]])
self.assert_(allclose(lorenzcurve._values, wanted_result))
def skip_test_small_gini(self):
"""Test case for gini coefficient for the small case"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([1, 1, 2, 3, 4, 5])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
self.assertAlmostEqual(lorenzcurve._ginicoeff, 0.3125)
def skip_test_large_lorenz(self):
"""Test case for more than 100 people"""
lorenzcurve = LorenzCurve(
source_data = self.source_data,
attribute = 'opus_core.test.attribute',
dataset_name = 'test',
years = None
)
incomes = array([731, 700, 619, 450, 419, 512, 232, 266, 131, 188,
498, 293, 935, 177, 160, 380, 538, 783, 256, 280,
731, 362, 870, 970, 674, 211, 524, 207, 513, 461,
280, 275, 410, 282, 144, 682, 573, 252, 382, 909,
719, 666, 236, 636, 628, 542, 630, 484, 629, 974,
747, 509, 281, 725, 377, 565, 495, 840, 391, 191,
929, 679, 217, 179, 336, 562, 293, 881, 271, 172,
426, 697, 293, 576, 203, 390, 522, 948, 312, 491,
531, 959, 646, 495, 306, 631, 722, 322, 876, 586,
316, 124, 796, 250, 456, 112, 661, 294, 749, 619,
134, 582, 996, 413, 421, 219, 796, 923, 832, 557])
lorenzcurve._values = incomes
lorenzcurve._compute_lorenz()
wanted_result_F = arange(0, 111) / 110.
wanted_result_L = array([ 0, 0.00202803, 0.00427335, 0.00664542, 0.00907181, 0.01167928,
0.01457647, 0.01769094, 0.02089595, 0.02413718, 0.02754138,
0.03099989, 0.0346757 , 0.03842393, 0.04224459, 0.0461739 ,
0.05013943, 0.05434035, 0.0586137 , 0.06314055, 0.06770362,
0.07233912, 0.07715569, 0.0820628 , 0.08704234, 0.09211241,
0.09718249, 0.10227067, 0.10737696, 0.11268243, 0.1179879 ,
0.12329338, 0.12861696, 0.13415782, 0.13980734, 0.14552928,
0.15135987, 0.15744396, 0.16399884, 0.17082534, 0.17770615,
0.18462318, 0.19168508, 0.19876507, 0.20618911, 0.21366748,
0.22125448, 0.2288777 , 0.23659146, 0.2447398 , 0.25299678,
0.26134429, 0.27010828, 0.27899902, 0.28796219, 0.29692536,
0.30594285, 0.31515953, 0.32443052, 0.33371962, 0.34317169,
0.35265998, 0.36227502, 0.3720168 , 0.38183102, 0.39191685,
0.40209322, 0.41232391, 0.42269945, 0.43312932, 0.44366784,
0.45427878, 0.46548727, 0.47669576, 0.48806721, 0.49945678,
0.51086445, 0.52229023, 0.53380654, 0.54550393, 0.55747293,
0.56953247, 0.58173686, 0.5940318 , 0.60638105, 0.61900192,
0.63167711, 0.64469634, 0.65776989, 0.67089777, 0.68413428,
0.6973708 , 0.71089704, 0.72445949, 0.7386376 , 0.7530511 ,
0.7674646 , 0.78252997, 0.79774019, 0.81349364, 0.82935574,
0.84530837, 0.86176801, 0.87848115, 0.89530294, 0.91223337,
0.9293992 , 0.94676421, 0.9643284 , 0.98196502, 1. ])
self.assert_(allclose(lorenzcurve._values, vstack((wanted_result_F, wanted_result_L))))
if __name__ == '__main__':
try:
import matplotlib
except:
print 'could not import matplotlib'
else:
opus_unittest.main()
| agpl-3.0 |
IshankGulati/scikit-learn | benchmarks/bench_plot_svd.py | 72 | 2914 | """Benchmarks of Singular Value Decomposition (Exact and Approximate)
The data is mostly low rank but is a fat infinite tail.
"""
import gc
from time import time
import numpy as np
from collections import defaultdict
import six
from scipy.linalg import svd
from sklearn.utils.extmath import randomized_svd
from sklearn.datasets.samples_generator import make_low_rank_matrix
def compute_bench(samples_range, features_range, n_iter=3, rank=50):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
X = make_low_rank_matrix(n_samples, n_features,
effective_rank=rank,
tail_strength=0.2)
gc.collect()
print("benchmarking scipy svd: ")
tstart = time()
svd(X, full_matrices=False)
results['scipy svd'].append(time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=0")
tstart = time()
randomized_svd(X, rank, n_iter=0)
results['scikit-learn randomized_svd (n_iter=0)'].append(
time() - tstart)
gc.collect()
print("benchmarking scikit-learn randomized_svd: n_iter=%d "
% n_iter)
tstart = time()
randomized_svd(X, rank, n_iter=n_iter)
results['scikit-learn randomized_svd (n_iter=%d)'
% n_iter].append(time() - tstart)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(2, 1000, 4).astype(np.int)
features_range = np.linspace(2, 1000, 4).astype(np.int)
results = compute_bench(samples_range, features_range)
label = 'scikit-learn singular value decomposition benchmark results'
fig = plt.figure(label)
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbg', sorted(six.iteritems(results))):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.legend()
plt.show()
| bsd-3-clause |
yancz1989/cancer | utilities.py | 1 | 4491 | import SimpleITK as sitk
import numpy as np
import csv
import os
import json
from PIL import Image
import matplotlib.pyplot as plt
import SimpleITK as sitk
from cv2 import imread, imwrite
def load_itk_image(filename):
itkimage = sitk.ReadImage(filename)
numpyImage = sitk.GetArrayFromImage(itkimage)
numpyOrigin = np.array(list(reversed(itkimage.GetOrigin())))
numpySpacing = np.array(list(reversed(itkimage.GetSpacing())))
return numpyImage, numpyOrigin, numpySpacing
def readCSV(filename):
lines = []
with open(filename, "rb") as f:
csvreader = csv.reader(f)
for line in csvreader:
lines.append(line)
return lines
def voxel_2_world(voxel_coord, itkimage):
world_coord = list(reversed(
itkimage.TransformContinuousIndexToPhysicalPoint(list(reversed(voxel_coord)))))
return world_coord
def voxelCoordToWorld(voxelCoord, origin, spacing):
stretchedVoxelCoord = voxelCoord * spacing
worldCoord = stretchedVoxelCoord + origin
return worldCoord
def worldToVoxelCoord(worldCoord, origin, spacing):
stretchedVoxelCoord = np.absolute(worldCoord - origin)
voxelCoord = stretchedVoxelCoord / spacing
return voxelCoord
def normalizePlanes(npzarray):
maxHU = 400.
minHU = -1000.
npzarray = (npzarray - minHU) / (maxHU - minHU)
npzarray[npzarray > 1] = 1.
npzarray[npzarray < 0] = 0.
return npzarray
def readFileNameMap(map_filename):
file_map = {}
with open(map_filename) as map_file:
file_name_list = json.load(map_file)
for it in file_name_list:
file_map[it['ID_name']] = it['long_name']
return file_map
def parse_image_file(filename):
cols = filename.split("-")
subset = cols[0]
key = cols[1]
z_axis = int(cols[2])
return key, subset[:subset.index('/')], z_axis
def filterBoxes(boxes, threshold):
filtered_boxes = []
for box in boxes:
if box[4] >= threshold:
filtered_boxes.append(box)
return filtered_boxes
def readResultMap(result_filename, file_map, threshold):
result_map = {}
with open(result_filename) as result_file:
result_list = json.load(result_file)
for it in result_list:
filename = it['file']
key = file_map[filename]
key = os.path.splitext(key)[0]
boxes = it['box']
boxes = filterBoxes(boxes, threshold)
if not result_map.get(key):
result_map[key] = []
cols = filename.split('_')
index = int(cols[2])
result_map[key].append((index, boxes))
for key, val in result_map.iteritems():
val.sort()
return result_map
def readImageMap(filename):
lines = readCSV(filename)
result = {}
for line in lines[1:]:
worldCoord = np.asarray(
[float(line[3]), float(line[2]), float(line[1])])
radius = float(line[4]) / 2.0 + 1.0
if not result.get(line[0]):
result[line[0]] = []
result[line[0]].append((worldCoord, radius))
return result
def trans(boxes, H, confs, thr = -1.0):
gw = H['grid_width']
gh = H['grid_height']
cell_pix_size = H['region_size']
rnnl = H['rnn_len']
ncls = H['num_classes']
boxes = np.reshape(boxes, (-1, gh, gw, rnnl, 4))
confs = np.reshape(confs, (-1, gh, gw, rnnl, ncls))
ret = []
for i in range(rnnl):
for y in range(gh):
for x in range(gw):
if np.max(confs[0, y, x, i, 1:]) > thr:
box = boxes[0, y, x, i, :]
abs_cx = int(box[0]) + cell_pix_size/2 + cell_pix_size * x
abs_cy = int(box[1]) + cell_pix_size/2 + cell_pix_size * y
w = box[2]
h = box[3]
ret.append([abs_cx, abs_cy, w, h, np.max(confs[0, y, x, i, 1: ])])
return np.array(ret)
def split(meta_root, samples):
np.random.seed(2012310818)
l = len(samples)
idxes = np.random.permutation(np.arange(l))
train = [dat[i] for i in idxes[0 : int(l * 0.7)]]
vals = [dat[i] for i in idxes[int(l * 0.7) : ]]
with open(meta_root + 'train.json', 'w') as g:
json.dump(train, g)
with open(meta_root + 'vals.json', 'w') as g:
json.dump(vals, g)
def writeCSV(filename, lines):
with open(filename, "wb") as f:
csvwriter = csv.writer(f)
csvwriter.writerows(lines)
def tryFloat(value):
try:
value = float(value)
except:
value = value
return value
def getColumn(lines, columnid, elementType=''):
column = []
for line in lines:
try:
value = line[columnid]
except:
continue
if elementType == 'float':
value = tryFloat(value)
column.append(value)
return column
def mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
| mit |
mjudsp/Tsallis | examples/tree/plot_tree_regression.py | 95 | 1516 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="darkorange", label="data")
plt.plot(X_test, y_1, color="cornflowerblue", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, color="yellowgreen", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
legacysurvey/pipeline | py/obiwan/decals_sim_randoms.py | 2 | 11774 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import os
import pickle
def add_scatter(ax,x,y,c='b',m='o',lab='',s=80,drawln=False,alpha=1):
ax.scatter(x,y, s=s, lw=2.,facecolors='none',edgecolors=c, marker=m,label=lab,alpha=alpha)
if drawln: ax.plot(x,y, c=c,ls='-')
def draw_unit_sphere(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000,seed=2015):
'''# from https://github.com/desihub/imaginglss/master/scripts/imglss-mpi-make-random.py'''
rng = np.random.RandomState(seed)
u1,u2= rng.uniform(size=(2, Nran))
#
cmin = np.sin(dcmin*np.pi/180)
cmax = np.sin(dcmax*np.pi/180)
#
RA = ramin + u1*(ramax-ramin)
DEC = 90-np.arccos(cmin+u2*(cmax-cmin))*180./np.pi
return RA,DEC
class QuickRandoms(object):
'''Draw randomly from unit sphere
Example:
qran= QuickRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
qran.get_randoms()
# save and plot
qran.save_randoms()
qran.plot(xlim=(244.,244.1),ylim=(8.,8.1)) #,xlim=(244.,244.+10./360),ylim=(8.,8.+10./360.))
'''
def __init__(self,ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000):
self.ramin=ramin
self.ramax=ramax
self.dcmin=dcmin
self.dcmax=dcmax
self.Nran=Nran
def get_randoms(self, fn='quick_randoms.pickle'):
if os.path.exists(fn):
ra,dec= self.read_randoms()
else:
ra,dec=draw_unit_sphere(ramin=self.ramin,ramax=self.ramax,\
dcmin=self.dcmin,dcmax=self.dcmax,Nran=self.Nran)
self.ra,self.dec=ra,dec
def save_randoms(self,fn='quick_randoms.pickle'):
if not os.path.exists(fn):
fout=open(fn, 'w')
pickle.dump((self.ra,self.dec),fout)
fout.close()
print("Wrote randoms to: %s" % fn)
else:
print("WARNING: %s exists, not overwritting it" % fn)
def read_randoms(self,fn='quick_randoms.pickle'):
print("Reading randoms from %s" % fn)
fobj=open(fn, 'r')
ra,dec= pickle.load(fobj)
fobj.close()
return ra,dec
def plot(self,xlim=None,ylim=None,text=''):
fig,ax=plt.subplots()
add_scatter(ax,self.ra,self.dec,c='b',m='.',alpha=0.5)
ax.set_xlabel('RA')
ax.set_ylabel('DEC')
if xlim is not None and ylim is not None:
ax.set_xlim(xlim)
ax.set_ylim(ylim)
text='_xlim%0.5f_%.5f_ylim%.5f_%.5f' % (xlim[0],xlim[1],ylim[0],ylim[1])
plt.savefig("quick_randoms%s.png" % text)
plt.close()
class DesiRandoms(object):
'''Draw randomly from unit sphere & provide 2 masks:
mask1: inbricks -- indices where ra,dec pts are in LegacySurvey bricks
mask2: inimages -- union with inbricks and where we have legacy survey imaging data at these ra,dec pts
Example:
ran= DesiRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
ran.get_randoms()
# save randoms if file not exist and plot
ran.save_randoms()
ran.plot(xlim=(244.,244.1),ylim=(8.,8.1)) #,xlim=(244.,244.+10./360),ylim=(8.,8.+10./360.))
'''
def __init__(self,ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000):
self.ramin=ramin
self.ramax=ramax
self.dcmin=dcmin
self.dcmax=dcmax
self.Nran=Nran
def get_randoms(self,fn='desi_randoms.pickle'):
if os.path.exists(fn):
self.ra,self.dec,self.i_inbricks,self.i_inimages= self.read_randoms()
else:
self.ra,self.dec,self.i_inbricks,self.i_inimages= self.make_randoms()
def save_randoms(self,fn='desi_randoms.pickle'):
if not os.path.exists(fn):
fout=open(fn, 'w')
pickle.dump((self.ra,self.dec,self.i_inbricks,self.i_inimages),fout)
fout.close()
print("Wrote: %s" % fn)
else:
print "WARNING: not saving randoms b/c file already exists: %s" % fn
def make_randoms(self):
'''Nran -- # of randoms'''
import imaginglss
from imaginglss.model import dataproduct
from imaginglss.model.datarelease import contains
import h5py
print "Creating %d Randoms" % self.Nran
# dr2 cache
decals = imaginglss.DECALS('/project/projectdirs/desi/users/burleigh/dr3_testdir_for_bb/imaginglss/dr2.conf.py')
foot= decals.datarelease.create_footprint((self.ramin,self.ramax,self.dcmin,self.dcmax))
print('Total sq.deg. covered by Bricks= ',foot.area)
# Sample full ra,dec box
ra,dec=draw_unit_sphere(ramin=self.ramin,ramax=self.ramax,\
dcmin=self.dcmin,dcmax=self.dcmax,Nran=self.Nran)
#randoms = np.empty(len(ra), dtype=dataproduct.RandomCatalogue)
#randoms['RA'] = ra
#randoms['DEC'] = dec
# Get indices of inbrick points, copied from def filter()
coord= np.array((ra,dec))
bid = foot.brickindex.query_internal(coord)
i_inbricks = contains(foot._covered_brickids, bid)
i_inbricks = np.where(i_inbricks)[0]
print('Number Density in bricks= ',len(ra)/foot.area)
# Union of inbricks and have imaging data, evaluate depths at ra,dec
coord= coord[:, i_inbricks]
cat_lim = decals.datarelease.read_depths(coord, 'grz')
depth= cat_lim['DECAM_DEPTH'] ** -0.5 / cat_lim['DECAM_MW_TRANSMISSION']
nanmask= np.isnan(depth)
nanmask=np.all(nanmask[:,[1,2,4]],axis=1) # shape (Nran,)
i_inimages= i_inbricks[nanmask == False]
print('ra.size=%d, i_inbricks.size=%d, i_inimages.size=%d' % (ra.size, i_inbricks.size, i_inimages.size))
# We are not using Yu's randoms dtype=dataproduct.RandomCatalogue
#randoms['INTRINSIC_NOISELEVEL'][:, :6] = (cat_lim['DECAM_DEPTH'] ** -0.5 / cat_lim['DECAM_MW_TRANSMISSION'])
#randoms['INTRINSIC_NOISELEVEL'][:, 6:] = 0
#nanmask = np.isnan(randoms['INTRINSIC_NOISELEVEL'])
#randoms['INTRINSIC_NOISELEVEL'][nanmask] = np.inf
print('Total sq.deg. where have imaging data approx.= ',foot.area*(len(ra[i_inimages]))/len(ra))
print('Number Density for sources where have images= ',len(ra[i_inimages])/foot.area)
# save ra,dec,mask to file
#with h5py.File('eboss_randoms.hdf5', 'w') as ff:
# ds = ff.create_dataset('_HEADER', shape=(0,))
# ds.attrs['FootPrintArea'] = decals.datarelease.footprint.area
# ds.attrs['NumberDensity'] = 1.0 * len(randoms) / decals.datarelease.footprint.area
# for column in randoms.dtype.names:
# ds = ff.create_dataset(column, data=randoms[column])
# ds = ff.create_dataset('nanmask', data=nanmask)
return ra,dec, i_inbricks,i_inimages
def plot(self,name='desirandoms.png'):
fig,ax=plt.subplots(1,3,sharey=True,sharex=True,figsize=(15,5))
add_scatter(ax[0],self.ra, self.dec, c='b',m='o')
add_scatter(ax[1],self.ra[self.i_inbricks], self.dec[self.i_inbricks], c='b',m='.')
add_scatter(ax[2],self.ra[self.i_inimages], self.dec[self.i_inimages], c='b',m='.')
for i,title in zip(range(3),['All','in Bricks','in Images']):
ti=ax[i].set_title(title)
xlab=ax[i].set_xlabel('ra')
ax[i].set_ylim((self.dec.min(),self.dec.max()))
ax[i].set_xlim((self.ra.min(),self.ra.max()))
ylab=ax[0].set_ylabel('dec')
plt.savefig(name, bbox_extra_artists=[ti,xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
print "wrote: %s" % name
class Angular_Correlator(object):
'''Compute w(theta) from observed ra,dec and random ra,dec
uses landy szalay estimator: DD - 2DR + RR / RR
two numerical methods: 1) Yu Feng's kdcount, 2) astroML
Example:
ac= Angular_Correlator(gal_ra,gal_dec,ran_ra,ran_dec)
ac.compute()
ac.plot()
'''
def __init__(self,gal_ra,gal_dec,ran_ra,ran_dec,ncores=1):
self.gal_ra=gal_ra
self.gal_dec=gal_dec
self.ran_ra=ran_ra
self.ran_dec=ran_dec
self.ncores=ncores
def compute(self):
self.theta,self.w={},{}
for key in ['astroML','yu']:
self.theta[key],self.w[key]= self.get_angular_corr(whos=key)
self.plot()
def get_angular_corr(self,whos='yu'):
if whos == 'yu': return self.ac_yu()
elif whos == 'astroML': return self.ac_astroML()
else: raise ValueError()
def ac_astroML(self):
'''from two_point_angular() in astroML/correlation.py'''
from astroML.correlation import two_point,ra_dec_to_xyz,angular_dist_to_euclidean_dist
# 3d project
data = np.asarray(ra_dec_to_xyz(self.gal_ra, self.gal_dec), order='F').T
data_R = np.asarray(ra_dec_to_xyz(self.ran_ra, self.ran_dec), order='F').T
# convert spherical bins to cartesian bins
bins = 10 ** np.linspace(np.log10(1. / 60.), np.log10(6), 16)
bins_transform = angular_dist_to_euclidean_dist(bins)
w= two_point(data, bins_transform, method='landy-szalay',data_R=data_R)
bin_centers = 0.5 * (bins[1:] + bins[:-1])
return bin_centers, w
def ac_yu(self):
from kdcount import correlate
from kdcount import sphere
abin = sphere.AngularBinning(np.logspace(-4, -2.6, 10))
D = sphere.points(self.gal_ra, self.gal_dec)
R = sphere.points(self.ran_ra, self.ran_dec) #weights=wt_array
DD = correlate.paircount(D, D, abin, np=self.ncores)
DR = correlate.paircount(D, R, abin, np=self.ncores)
RR = correlate.paircount(R, R, abin, np=self.ncores)
r = D.norm / R.norm
w= (DD.sum1 - 2 * r * DR.sum1 + r ** 2 * RR.sum1) / (r ** 2 * RR.sum1)
return abin.angular_centers,w
def plot(self,name='wtheta.png'):
fig,ax=plt.subplots()
for key,col,mark in zip(['yu','astroML'],['g','b'],['o']*2):
print "%s: theta,w" % key,self.theta[key],self.w[key]
add_scatter(ax,self.theta[key], self.w[key], c=col,m=mark,lab=key,alpha=0.5)
t = np.array([0.01, 10])
plt.plot(t, 10 * (t / 0.01) ** -0.8, ':k', lw=1)
ax.legend(loc='upper right',scatterpoints=1)
xlab=ax.set_xlabel(r'$\theta$ (deg)')
ylab=ax.set_ylabel(r'$\hat{w}(\theta)$')
ax.set_xscale('log')
ax.set_yscale('log')
plt.savefig(name, bbox_extra_artists=[xlab,ylab], bbox_inches='tight',dpi=150)
plt.close()
print "wrote: %s" % name
def ac_unit_test():
'''angular correlation func unit test'''
qran= QuickRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
qran.get_randoms()
# subset
index= np.all((qran.ra >= 244.,qran.ra <= 244.5,\
qran.dec >= 8.,qran.dec <= 8.5),axis=0)
ra,dec= qran.ra[index],qran.dec[index]
# use these as Ducks for DesiRandoms
ran= DesiRandoms()
ran.ra,ran.dec= ra,dec
index= np.all((ran.ra >= 244.,ran.ra <= 244.25),axis=0)
ran.i_inbricks= np.where(index)[0]
index= np.all((index,ran.dec >= 8.1,ran.dec <= 8.4),axis=0)
ran.i_inimages= np.where(index)[0]
ran.plot()
# wtheta
ac= Angular_Correlator(ran.ra[ran.i_inimages],ran.dec[ran.i_inimages],ran.ra,ran.dec)
ac.compute()
ac.plot()
print 'finished unit_test'
if __name__ == '__main__':
#ac_unit_test()
#Nran=int(2.4e3*9.)
#ran= DesiRandoms(ramin=243.,ramax=246.,dcmin=7.,dcmax=10.,Nran=216000)
Nran=int(2.4e3*1.e2)
ran= DesiRandoms(ramin=120.,ramax=130.,dcmin=20.,dcmax=30.,Nran=Nran)
ran.get_randoms()
# save randoms if file not exist and plot
ran.save_randoms(fn='desi_randoms_qual.pickle')
ran.plot()
| gpl-2.0 |
joshuamorton/calc_three_proj | plot.py | 1 | 1969 | from matplotlib import pyplot as plt
import numpy as np
import iterative
import pascal
import power
plt.style.use('ggplot')
qr = []
lu = []
for i in range(2, 13):
q = pascal.solve_qr_b(pascal.pascal_matrix(i), pascal.harmonic_vector(i))
l = pascal.solve_lu_b(pascal.pascal_matrix(i), pascal.harmonic_vector(i))
qr.append(q)
lu.append(l)
plt.subplot(1, 1, 1)
x = range(2, 13)
y = [i[1] for i in qr]
z = [i[2] for i in qr]
plt.plot(x, y, color='blue') # error from householder
plt.plot(x, z, color='green') # solution error of qr
plt.yscale('log')
plt.savefig('./qr_err.png')
y = [i[1] for i in lu]
z = [i[2] for i in lu]
plt.clf()
plt.plot(x, y, color='blue')
plt.plot(x, z, color='green')
plt.yscale('log')
plt.savefig('./lu_err.png')
plt.clf()
jacobi, gs = iterative.generate_data()
j_vals = [i[1] for i in jacobi]
g_vals = [i[1] for i in gs]
jacobi_approx = sum(j_vals) / len(j_vals) # 2c
gs_approx = sum(g_vals) / len(g_vals)
print("Averages, jacobi then gauss-seidel, then iterations")
print(jacobi_approx)
print(gs_approx)
print(float(sum(j[2] for j in jacobi))/sum(g[2] for g in gs))
exact = np.array([9.0/190, 28.0/475, 33.0/475]).reshape(3,1)
errs_jacobi = [pascal.norm_inf(j-exact) for j in j_vals]
errs_gs = [pascal.norm_inf(g-exact) for g in g_vals]
plt.plot([j[2] for j in jacobi], errs_jacobi, 'ko', [g[2] for g in gs], errs_jacobi, 'bo')
plt.savefig('./iterative_err.png')
plt.clf()
powers = power.generate_data()
ds = [p[0] for p in powers if p[0] is not None]
ts = [p[1] for p in powers if p[1] is not None]
tis = [p[2] for p in powers if p[2] is not None]
maxs = [p[3] for p in powers if p[3] is not None]
mins = [p[4] for p in powers if p[4] is not None]
big = max(maxs)
small = max(mins)
maxs = [float(m)/big for m in maxs]
mins = [float(m)/small for m in mins]
plt.scatter(ds, ts, c=maxs)
plt.savefig('./power_mat.png')
plt.clf()
plt.scatter([1.0/d for d in ds], tis, c=mins)
plt.savefig('./power_inv.png')
plt.clf() | mit |
jayflo/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
hitszxp/scikit-learn | examples/linear_model/plot_lasso_coordinate_descent_path.py | 254 | 2639 | """
=====================
Lasso and Elastic Net
=====================
Lasso and elastic net (L1 and L2 penalisation) implemented using a
coordinate descent.
The coefficients can be forced to be positive.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, enet_path
from sklearn import datasets
diabetes = datasets.load_diabetes()
X = diabetes.data
y = diabetes.target
X /= X.std(axis=0) # Standardize data (easier to set the l1_ratio parameter)
# Compute paths
eps = 5e-3 # the smaller it is the longer is the path
print("Computing regularization path using the lasso...")
alphas_lasso, coefs_lasso, _ = lasso_path(X, y, eps, fit_intercept=False)
print("Computing regularization path using the positive lasso...")
alphas_positive_lasso, coefs_positive_lasso, _ = lasso_path(
X, y, eps, positive=True, fit_intercept=False)
print("Computing regularization path using the elastic net...")
alphas_enet, coefs_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, fit_intercept=False)
print("Computing regularization path using the positve elastic net...")
alphas_positive_enet, coefs_positive_enet, _ = enet_path(
X, y, eps=eps, l1_ratio=0.8, positive=True, fit_intercept=False)
# Display results
plt.figure(1)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_enet), coefs_enet.T, linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and Elastic-Net Paths')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'Elastic-Net'), loc='lower left')
plt.axis('tight')
plt.figure(2)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_lasso), coefs_lasso.T)
l2 = plt.plot(-np.log10(alphas_positive_lasso), coefs_positive_lasso.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Lasso and positive Lasso')
plt.legend((l1[-1], l2[-1]), ('Lasso', 'positive Lasso'), loc='lower left')
plt.axis('tight')
plt.figure(3)
ax = plt.gca()
ax.set_color_cycle(2 * ['b', 'r', 'g', 'c', 'k'])
l1 = plt.plot(-np.log10(alphas_enet), coefs_enet.T)
l2 = plt.plot(-np.log10(alphas_positive_enet), coefs_positive_enet.T,
linestyle='--')
plt.xlabel('-Log(alpha)')
plt.ylabel('coefficients')
plt.title('Elastic-Net and positive Elastic-Net')
plt.legend((l1[-1], l2[-1]), ('Elastic-Net', 'positive Elastic-Net'),
loc='lower left')
plt.axis('tight')
plt.show()
| bsd-3-clause |
partofthething/laserComm | laserComm/receiver.py | 1 | 2975 | '''
receiver runs the ADC and photoresistor to receive an input signal.
USes MCP3008 ADC via the hardware SPI interface.
Connections are:
MCP3008 VDD -> 3.3V (red)
MCP3008 VREF -> 3.3V (red)
MCP3008 AGND -> GND (orange)
MCP3008 CLK -> SCLK (yellow)
MCP3008 DOUT -> MISO (green)
MCP3008 DIN -> MOSI (yellow)
MCP3008 CS -> CE0 (red)
MCP3008 DGND -> GND (orange)
The photoresistor goes from 4 kOhms (dark) to like 90 Ohms. (flashlight).
Output is 1024*Vin/Vref.
Build a voltage divider with like a 200 Ohm resistor in series w/ the photoR and measure
Vout between them. I put photoresistor between vout and ground.
The signal is intended to be processed using signal_processor
'''
import time
import numpy
import matplotlib
matplotlib.use('Agg') # works headless (e.g. on Raspberry Pi)
import matplotlib.pyplot as plt
try:
import spidev
except ImportError:
print('no spidev')
GAP = 0.001
class ADC(object):
"""
The Analog-to-digital converter
"""
def __init__(self):
self.adc = None
def __enter__(self):
self.adc = spidev.SpiDev()
self.adc.open(0, 0)
def __exit__(self, exc_type, exc_value, traceback):
self.adc.close()
def read(self, input_number):
"""
read SPI data from MCP3008 chip
There are 8 possible channels (0 through 7)
Will return value between 0 and 1023
"""
if ((input_number > 7) or (input_number < 0)):
return -1
r = self.adc.xfer2([1, (8 + input_number) << 4, 0])
adcValue = ((r[1] & 3) << 8) + r[2]
return adcValue
class Receiver(object):
"""
Stream processor that uses adc
"""
@property
def times(self):
return numpy.linspace(0, 10, len(self.vals))
def receive(self, adc):
self.vals = []
# receive for 10 seconds
print('Receiving')
start = time.time()
while time.time() - start < 30.0:
self.vals.append(adc.read(0))
time.sleep(GAP / 10)
def plot(self, fname='adc.pdf'):
print('Plotting')
t = self.times
plt.figure(figsize=(12, 10))
plt.plot(t, self.vals, '-')
plt.xlabel('Time (s)')
plt.ylabel('ADC signal')
plt.title('ADC Signal Trace')
plt.grid(color='0.7')
if fname:
plt.savefig(fname)
def save(self, fname='adc.txt'):
"""
Save results to file
"""
print('Saving')
with open(fname, 'w') as f:
f.writelines(['{0:04d}\n'.format(vi) for vi in self.vals])
def load(self, fname='adc.txt'):
print('Loading')
with open(fname) as f:
vals = f.readlines()
self.vals = [float(vi) for vi in vals]
if __name__ == '__main__':
adc = ADC()
receiver = Receiver()
with adc:
vals = receiver.receive(adc)
receiver.plot()
receiver.save()
| mit |
oreilly-japan/deep-learning-from-scratch | ch07/apply_filter.py | 4 | 1634 | # coding: utf-8
import sys, os
sys.path.append(os.pardir) # 親ディレクトリのファイルをインポートするための設定
import numpy as np
import matplotlib.pyplot as plt
from simple_convnet import SimpleConvNet
from matplotlib.image import imread
from common.layers import Convolution
def filter_show(filters, nx=4, show_num=16):
"""
c.f. https://gist.github.com/aidiary/07d530d5e08011832b12#file-draw_weight-py
"""
FN, C, FH, FW = filters.shape
ny = int(np.ceil(show_num / nx))
fig = plt.figure()
fig.subplots_adjust(left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
for i in range(show_num):
ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
ax.imshow(filters[i, 0], cmap=plt.cm.gray_r, interpolation='nearest')
network = SimpleConvNet(input_dim=(1,28,28),
conv_param = {'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
hidden_size=100, output_size=10, weight_init_std=0.01)
# 学習後の重み
network.load_params("params.pkl")
filter_show(network.params['W1'], 16)
img = imread('../dataset/lena_gray.png')
img = img.reshape(1, 1, *img.shape)
fig = plt.figure()
w_idx = 1
for i in range(16):
w = network.params['W1'][i]
b = 0 # network.params['b1'][i]
w = w.reshape(1, *w.shape)
#b = b.reshape(1, *b.shape)
conv_layer = Convolution(w, b)
out = conv_layer.forward(img)
out = out.reshape(out.shape[2], out.shape[3])
ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
ax.imshow(out, cmap=plt.cm.gray_r, interpolation='nearest')
plt.show() | mit |
jrleja/bsfh | prospect/io/read_results.py | 3 | 20576 | import sys, os
from copy import deepcopy
import warnings
import pickle, json
import numpy as np
try:
import h5py
except:
pass
try:
from sedpy.observate import load_filters
except:
pass
"""Convenience functions for reading and reconstructing results from a fitting
run, including reconstruction of the model for making posterior samples
"""
__all__ = ["results_from", "emcee_restarter",
"get_sps", "get_model",
"traceplot", "subcorner",
"compare_paramfile"]
def unpick(pickled):
"""create a serialized object that can go into hdf5 in py2 and py3, and can be read by both
"""
try:
obj = pickle.loads(pickled, encoding='bytes')
except(TypeError):
obj = pickle.loads(pickled)
return obj
def results_from(filename, model_file=None, dangerous=True, **kwargs):
"""Read a results file with stored model and MCMC chains.
:param filename:
Name and path to the file holding the results. If ``filename`` ends in
"h5" then it is assumed that this is an HDF5 file, otherwise it is
assumed to be a pickle.
:param dangerous: (default, True)
If True, use the stored paramfile text to import the parameter file and
reconstitute the model object. This executes code in the stored
paramfile text during import, and is therefore dangerous.
:returns results:
A dictionary of various results including:
+ `"chain"` - Samples from the posterior probability (ndarray).
+ `"lnprobability"` - The posterior probability of each sample.
+ `"weights"` - The weight of each sample, if `dynesty` was used.
+ `"theta_labels"` - List of strings describing free parameters.
+ `"bestfit"` - The prediction of the data for the posterior sample with
the highest `"lnprobability"`, as a dictionary.
+ `"run_params"` - A dictionary of arguments supplied to prospector at
the time of the fit.
+ `"paramfile_text"` - Text of the file used to run prospector, string
:returns obs:
The obs dictionary
:returns model:
The models.SedModel() object, if it could be regenerated from the stored
`"paramfile_text"`. Otherwise, `None`.
"""
# Read the basic chain, parameter, and run_params info
if filename.split('.')[-1] == 'h5':
res = read_hdf5(filename, **kwargs)
if "_mcmc.h5" in filename:
mf_default = filename.replace('_mcmc.h5', '_model')
else:
mf_default = "x"
else:
with open(filename, 'rb') as rf:
res = pickle.load(rf)
mf_default = filename.replace('_mcmc', '_model')
# Now try to read the model object itself from a pickle
if model_file is None:
mname = mf_default
else:
mname = model_file
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
model, powell_results = read_model(mname, param_file=param_file,
dangerous=dangerous, **kwargs)
if dangerous:
try:
model = get_model(res)
except:
model = None
res['model'] = model
if powell_results is not None:
res["powell_results"] = powell_results
return res, res["obs"], model
def emcee_restarter(restart_from="", niter=32, **kwargs):
"""Get the obs, model, and sps objects from a previous run, as well as the
run_params and initial positions (which are determined from the end of the
last run, and inserted into the run_params dictionary)
:param restart_from:
Name of the file to restart the sampling from. An error is raised if
this does not include an emcee style chain of shape (nwalker, niter,
ndim)
:param niter: (default: 32)
Number of additional iterations to do (added toi run_params)
:returns obs:
The `obs` dictionary used in the last run.
:returns model:
The model object used in the last run.
:returns sps:
The `sps` object used in the last run.
:returns noise:
A tuple of (None, None), since it is assumed the noise model in the
last run was trivial.
:returns run_params:
A dictionary of parameters controlling the operation. This is the same
as used in the last run, but with the "niter" key changed, and a new
"initial_positions" key that gives the ending positions of the emcee
walkers from the last run. The filename from which the run is
restarted is also stored in the "restart_from" key.
"""
result, obs, model = results_from(restart_from)
noise = (None, None)
# check for emcee style outputs
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
msg = "Result file {} does not have a chain of the proper shape."
assert is_emcee, msg.format(restart_from)
sps = get_sps(result)
run_params = deepcopy(result["run_params"])
run_params["niter"] = niter
run_params["restart_from"] = restart_from
initial_positions = result["chain"][:, -1, :]
run_params["initial_positions"] = initial_positions
return obs, model, sps, noise, run_params
def read_model(model_file, param_file=('', ''), dangerous=False, **extras):
"""Read the model pickle. This can be difficult if there are user defined
functions that have to be loaded dynamically. In that case, import the
string version of the paramfile and *then* try to unpickle the model
object.
:param model_file:
String, name and path to the model pickle.
:param dangerous: (default: False)
If True, try to import the given paramfile.
:param param_file:
2-element tuple. The first element is the name of the paramfile, which
will be used to set the name of the imported module. The second
element is the param_file contents as a string. The code in this
string will be imported.
"""
model = powell_results = None
if os.path.exists(model_file):
try:
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
except(AttributeError):
# Here one can deal with module and class names that changed
with open(model_file, 'rb') as mf:
mod = load(mf)
except(ImportError, KeyError):
# here we load the parameter file as a module using the stored
# source string. Obviously this is dangerous as it will execute
# whatever is in the stored source string. But it can be used to
# recover functions (especially dependcy functions) that are user
# defined
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
if dangerous:
user_module = import_module_from_string(param_file[1], modname)
with open(model_file, 'rb') as mf:
mod = pickle.load(mf)
model = mod['model']
for k, v in list(model.theta_index.items()):
if type(v) is tuple:
model.theta_index[k] = slice(*v)
powell_results = mod['powell']
return model, powell_results
def read_hdf5(filename, **extras):
"""Read an HDF5 file (with a specific format) into a dictionary of results.
This HDF5 file is assumed to have the groups ``sampling`` and ``obs`` which
respectively contain the sampling chain and the observational data used in
the inference.
All attributes of these groups as well as top-level attributes are loaded
into the top-level of the dictionary using ``json.loads``, and therefore
must have been written with ``json.dumps``. This should probably use
JSONDecoders, but who has time to learn that.
:param filename:
Name of the HDF5 file.
"""
groups = {"sampling": {}, "obs": {},
"bestfit": {}, "optimization": {}}
res = {}
with h5py.File(filename, "r") as hf:
# loop over the groups
for group, d in groups.items():
# check the group exists
if group not in hf:
continue
# read the arrays in that group into the dictionary for that group
for k, v in hf[group].items():
d[k] = np.array(v)
# unserialize the attributes and put them in the dictionary
for k, v in hf[group].attrs.items():
try:
d[k] = json.loads(v)
except:
try:
d[k] = unpick(v)
except:
d[k] = v
# do top-level attributes.
for k, v in hf.attrs.items():
try:
res[k] = json.loads(v)
except:
try:
res[k] = unpick(v)
except:
res[k] = v
res.update(groups['sampling'])
res["bestfit"] = groups["bestfit"]
res["optimization"] = groups["optimization"]
res['obs'] = groups['obs']
try:
res['obs']['filters'] = load_filters([str(f) for f in res['obs']['filters']])
except:
pass
try:
res['rstate'] = unpick(res['rstate'])
except:
pass
#try:
# mp = [names_to_functions(p.copy()) for p in res['model_params']]
# res['model_params'] = mp
#except:
# pass
return res
def read_pickles(filename, **kwargs):
"""Alias for backwards compatability. Calls `results_from()`.
"""
return results_from(filename, **kwargs)
def get_sps(res):
"""This gets exactly the SPS object used in the fiting (modulo any
changes to FSPS itself).
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_sps` method defined in the
paramfile module.
:param res:
A results dictionary (the output of `results_from()`)
:returns sps:
An sps object (i.e. from prospect.sources)
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
sps = user_module.load_sps(**res['run_params'])
except(AttributeError):
sps = user_module.build_sps(**res['run_params'])
# Now check that the SSP libraries are consistent
flib = res['run_params'].get('sps_libraries', None)
try:
rlib = sps.ssp.libraries
except(AttributeError):
rlib = None
if (flib is None) or (rlib is None):
warnings.warn("Could not check SSP library versions.")
else:
liberr = ("The FSPS libraries used in fitting({}) are not the "
"same as the FSPS libraries that you are using now ({})".format(flib, rlib))
# If fitting and reading in are happening in different python versions,
# ensure string comparison doesn't throw error:
if type(flib[0]) == 'bytes':
flib = [i.decode() for i in flib]
if type(rlib[0]) == 'bytes':
rlib = [i.decode() for i in rlib]
assert (flib[0] == rlib[0]) and (flib[1] == rlib[1]), liberr
return sps
def get_model(res):
"""This gets exactly the model object used in the fiting.
It (scarily) imports the paramfile (stored as text in the results
dictionary) as a module and then uses the `load_model` method defined in the
paramfile module, with `run_params` dictionary passed to it.
:param res:
A results dictionary (the output of `results_from()`)
:returns model:
A prospect.models.SedModel object
"""
import os
param_file = (res['run_params'].get('param_file', ''),
res.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
try:
model = user_module.load_model(**res['run_params'])
except(AttributeError):
model = user_module.build_model(**res['run_params'])
return model
def import_module_from_string(source, name, add_to_sys_modules=True):
"""Well this seems dangerous.
"""
import imp
user_module = imp.new_module(name)
exec(source, user_module.__dict__)
if add_to_sys_modules:
sys.modules[name] = user_module
return user_module
def traceplot(results, showpars=None, start=0, chains=slice(None),
figsize=None, truths=None, **plot_kwargs):
"""Plot the evolution of each parameter value with iteration #, for each
walker in the chain.
:param results:
A Prospector results dictionary, usually the output of
``results_from('resultfile')``.
:param showpars: (optional)
A list of strings of the parameters to show. Defaults to all
parameters in the ``"theta_labels"`` key of the ``sample_results``
dictionary.
:param chains:
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for to keep the plot from getting too cluttered.
:param start: (optional, default: 0)
Integer giving the iteration number from which to start plotting.
:param **plot_kwargs:
Extra keywords are passed to the
``matplotlib.axes._subplots.AxesSubplot.plot()`` method.
:returns tracefig:
A multipaneled Figure object that shows the evolution of walker
positions in the parameters given by ``showpars``, as well as
ln(posterior probability)
"""
import matplotlib.pyplot as pl
# Get parameter names
try:
parnames = np.array(results['theta_labels'])
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([p in showpars for p in parnames], dtype=bool)
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, lnp, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start:, :]
lnp = np.atleast_2d(results['lnprobability'])[chains, start:]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start:]
nwalk = trace.shape[0]
# Set up plot windows
ndim = len(parnames) + 1
nx = int(np.floor(np.sqrt(ndim)))
ny = int(np.ceil(ndim * 1.0 / nx))
sz = np.array([nx, ny])
factor = 3.0 # size of one side of one panel
lbdim = 0.2 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.05 * factor # w/hspace size
plotdim = factor * sz + factor * (sz - 1) * whspace
dim = lbdim + plotdim + trdim
if figsize is None:
fig, axes = pl.subplots(nx, ny, figsize=(dim[1], dim[0]), sharex=True)
else:
fig, axes = pl.subplots(nx, ny, figsize=figsize, sharex=True)
axes = np.atleast_2d(axes)
#lb = lbdim / dim
#tr = (lbdim + plotdim) / dim
#fig.subplots_adjust(left=lb[1], bottom=lb[0], right=tr[1], top=tr[0],
# wspace=whspace, hspace=whspace)
# Sequentially plot the chains in each parameter
for i in range(ndim - 1):
ax = axes.flat[i]
for j in range(nwalk):
ax.plot(trace[j, :, i], **plot_kwargs)
ax.set_title(parnames[i], y=1.02)
# Plot lnprob
ax = axes.flat[-1]
for j in range(nwalk):
ax.plot(lnp[j, :], **plot_kwargs)
ax.set_title('lnP', y=1.02)
[ax.set_xlabel("iteration") for ax in axes[-1, :]]
#[ax.set_xticklabels('') for ax in axes[:-1, :].flat]
if truths is not None:
for i, t in enumerate(truths[ind_show]):
axes.flat[i].axhline(t, color='k', linestyle=':')
pl.tight_layout()
return fig
def param_evol(results, **kwargs):
"""Backwards compatability
"""
return traceplot(results, **kwargs)
def subcorner(results, showpars=None, truths=None,
start=0, thin=1, chains=slice(None),
logify=["mass", "tau"], **kwargs):
"""Make a triangle plot of the (thinned, latter) samples of the posterior
parameter space. Optionally make the plot only for a supplied subset of
the parameters.
:param showpars: (optional)
List of string names of parameters to include in the corner plot.
:param truths: (optional)
List of truth values for the chosen parameters.
:param start: (optional, default: 0)
The iteration number to start with when drawing samples to plot.
:param thin: (optional, default: 1)
The thinning of each chain to perform when drawing samples to plot.
:param chains: (optional)
If results are from an ensemble sampler, setting `chain` to an integer
array of walker indices will cause only those walkers to be used in
generating the plot. Useful for emoving stuck walkers.
:param kwargs:
Remaining keywords are passed to the ``corner`` plotting package.
:param logify:
A list of parameter names to plot in `log10(parameter)` instead of
`parameter`
"""
try:
import corner as triangle
except(ImportError):
import triangle
except:
raise ImportError("Please install the `corner` package.")
# pull out the parameter names and flatten the thinned chains
# Get parameter names
try:
parnames = np.array(results['theta_labels'], dtype='U20')
except(KeyError):
parnames = np.array(results['model'].theta_labels())
# Restrict to desired parameters
if showpars is not None:
ind_show = np.array([parnames.tolist().index(p) for p in showpars])
parnames = parnames[ind_show]
else:
ind_show = slice(None)
# Get the arrays we need (trace, wghts)
trace = results['chain'][..., ind_show]
if trace.ndim == 2:
trace = trace[None, :]
trace = trace[chains, start::thin, :]
wghts = results.get('weights', None)
if wghts is not None:
wghts = wghts[start::thin]
samples = trace.reshape(trace.shape[0] * trace.shape[1], trace.shape[2])
# logify some parameters
xx = samples.copy()
if truths is not None:
xx_truth = np.array(truths).copy()
else:
xx_truth = None
for p in logify:
if p in parnames:
idx = parnames.tolist().index(p)
xx[:, idx] = np.log10(xx[:, idx])
parnames[idx] = "log({})".format(parnames[idx])
if truths is not None:
xx_truth[idx] = np.log10(xx_truth[idx])
# mess with corner defaults
corner_kwargs = {"plot_datapoints": False, "plot_density": False,
"fill_contours": True, "show_titles": True}
corner_kwargs.update(kwargs)
fig = triangle.corner(xx, labels=parnames, truths=xx_truth,
quantiles=[0.16, 0.5, 0.84], weights=wghts, **corner_kwargs)
return fig
def subtriangle(results, **kwargs):
"""Backwards compatability
"""
return subcorner(results, **kwargs)
def compare_paramfile(res, filename):
"""Compare the runtime parameter file text stored in the `res` dictionary
to the text of some existing file with fully qualified path `filename`.
"""
from pprint import pprint
from difflib import unified_diff
a = res["paramfile_text"]
aa = a.split('\n')
with open(filename, "r") as f:
b = json.dumps(f.read())
bbl = json.loads(b)
bb = bbl.split('\n')
pprint([l for l in unified_diff(aa, bb)])
def names_to_functions(p):
"""Replace names of functions (or pickles of objects) in a parameter
description with the actual functions (or pickles).
"""
from importlib import import_module
for k, v in list(p.items()):
try:
m = import_module(v[1])
f = m.__dict__[v[0]]
except:
try:
f = pickle.loads(v)
except:
f = v
p[k] = f
return p
| mit |
AOtools/soapy | doc/source/conf.py | 4 | 13074 | # -*- coding: utf-8 -*-
#
# Soapy documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 28 11:49:44 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
from mock import Mock as MagicMock
# Add soapy to path
SOAPY_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../..')
sys.path.append(SOAPY_DIR)
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['pyfftw', 'ipython','pyfits', 'PyQt5','IPython.qt.console.rich_ipython_widget',
'IPython.qt.inprocess',
'matplotlib.backends.backend_qt5agg','sip', 'pyqtgraph','pylab', 'OpenGL',
'matplotlib.figure',
'IPython.qt.console.rich_ipython_widget',
'scipy.ndimage','scipy.optimize', 'scipy.lib.blas.fblas','scipy.fftpack','scipy.interpolate','scipy', 'scipy.signal',
'scipy.ndimage.interpolation', 'scipy.special', 'numba'
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import soapy
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
#'sphinx.ext.napoleon'
'sphinxcontrib.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Soapy'
copyright = u'2015, Andrew Reeves'
author = u'Andrew Reeves'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = soapy.__version__[1:4]
# The full version, including alpha/beta/rc tags.
release = soapy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
def setup(app):
app.add_stylesheet("theme_overrides.css")
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Soapydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Soapy.tex', u'Soapy Documentation',
u'Andrew Reeves', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'soapy', u'Soapy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Soapy', u'Soapy Documentation',
author, 'Soapy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = { 'python': ('https://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None)}
| gpl-3.0 |
akshaybabloo/Car-ND | Project_5/laneline.py | 1 | 21371 | import cv2
import glob
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from sklearn.metrics import mean_squared_error
x_cor = 9 #Number of corners to find
y_cor = 6
# Prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((y_cor*x_cor,3), np.float32)
objp[:,:2] = np.mgrid[0:x_cor, 0:y_cor].T.reshape(-1,2)
def camera_cal():
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('camera_cal/calibration*.jpg') # Make a list of paths to calibration images
# Step through the list and search for chessboard corners
corners_not_found = [] #Calibration images in which opencv failed to find corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # Conver to grayscale
ret, corners = cv2.findChessboardCorners(gray, (x_cor,y_cor), None) # Find the chessboard corners
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
else:
corners_not_found.append(fname)
print 'Corners were found on', str(len(imgpoints)), 'out of', str(len(images)), 'it is', str(len(imgpoints)*100.0/len(images)),'% of calibration images'
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
return mtx, dist
mtx, dist = camera_cal()
def undistort(img):
return cv2.undistort(img, mtx, dist, None, mtx)
def eq_Hist(img): # Histogram normalization
img[:, :, 0] = cv2.equalizeHist(img[:, :, 0])
img[:, :, 1] = cv2.equalizeHist(img[:, :, 1])
img[:, :, 2] = cv2.equalizeHist(img[:, :, 2])
return img
# Sobel
def sobel_img(img, thresh_min = 25, thresh_max = 255, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
scaled_sobelx = np.uint16(255*sobelx/np.max(sobelx))
scaled_sobely = np.uint16(255*sobely/np.max(sobely))
sobel_sum = scaled_sobelx+0.2*scaled_sobely
scaled_sobel_sum = np.uint8(255*sobel_sum/np.max(sobel_sum))
sum_binary = np.zeros_like(scaled_sobel_sum)
sum_binary[(scaled_sobel_sum >= thresh_min) & (scaled_sobel_sum <= thresh_max)] = 1
return sum_binary
# Solbel magnitude
def sobel_mag_img(img, thresh_min = 25, thresh_max = 255, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
gradmag = np.sqrt(sobelx**2 + sobely**2)
scaled_gradmag = np.uint8(255*gradmag/np.max(gradmag))
gradmag_binary = np.zeros_like(scaled_gradmag)
gradmag_binary[(scaled_gradmag >= thresh_min) & (scaled_gradmag <= thresh_max)] = 1
return gradmag_binary
# Sobel direction
def sobel_dir_img(img, thresh_min = 0.0, thresh_max = 1.5, sobel_kernel = 11):
sobelx = np.absolute(cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobely = np.absolute(cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
graddir = np.arctan2(sobely, sobelx)
graddir_binary = np.zeros_like(graddir)
graddir_binary[(graddir >= thresh_min) & (graddir <= thresh_max)] = 1
return graddir_binary
# Binary red channel threshold
def red_thres(img, thresh_min = 25, thresh_max = 255):
red = img[:,:,2]
red_binary = np.zeros_like(red)
red_binary[(red >= thresh_min) & (red <= thresh_max)] = 1
return red_binary
# Binary saturation channel threshold
def s_thres(img, thresh_min = 25, thresh_max = 255):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
s_channel = hls[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel > thresh_min) & (s_channel <= thresh_max)] = 1
return s_binary
# Return saturation channel
def s_hls(img):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
return hls[:,:,2]
IMAGE_H = 223
IMAGE_W = 1280
# Sharpen image
def sharpen_img(img):
gb = cv2.GaussianBlur(img, (5,5), 20.0)
return cv2.addWeighted(img, 2, gb, -1, 0)
# Compute linear image transformation img*s+m
def lin_img(img,s=1.0,m=0.0):
img2=cv2.multiply(img, np.array([s]))
return cv2.add(img2, np.array([m]))
# Change image contrast; s>1 - increase
def contr_img(img, s=1.0):
m=127.0*(1.0-s)
return lin_img(img, s, m)
# Create perspective image transformation matrices
def create_M():
src = np.float32([[0, 673], [1207, 673], [0, 450], [1280, 450]])
dst = np.float32([[569, 223], [711, 223], [0, 0], [1280, 0]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
return M, Minv
# Main image transformation routine to get a warped image
def transform(img, M):
undist = undistort(img)
img_size = (1280, 223)
warped = cv2.warpPerspective(undist, M, img_size)
warped = sharpen_img(warped)
warped = contr_img(warped, 1.1)
return warped
# Show original and warped image side by side
def show_warped(img, M):
f, (plot1, plot2) = plt.subplots(1, 2, figsize=(9, 3))
plot1.imshow(cv2.cvtColor(undistort(img), cv2.COLOR_BGR2RGB))
plot1.set_title('Undistorted', fontsize=20)
plot2.imshow(cv2.cvtColor(transform(img, M), cv2.COLOR_BGR2RGB))
plot2.set_title('Warped', fontsize=20)
# Show one image
def show_img(img):
if len(img.shape)==3:
plt.figure()
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
else:
plt.figure()
plt.imshow(img, cmap='gray')
M, Minv = create_M()
#Calculate coefficients of polynom in y+h coordinates, i.e. f(y) -> f(y+h)
def pol_shift(pol, h):
pol_ord = len(pol)-1 # Determinate degree of the polynomial
if pol_ord == 3:
pol0 = pol[0]
pol1 = pol[1] + 3.0*pol[0]*h
pol2 = pol[2] + 3.0*pol[0]*h*h + 2.0*pol[1]*h
pol3 = pol[3] + pol[0]*h*h*h + pol[1]*h*h + pol[2]*h
return(np.array([pol0, pol1, pol2, pol3]))
if pol_ord == 2:
pol0 = pol[0]
pol1 = pol[1] + 2.0*pol[0]*h
pol2 = pol[2] + pol[0]*h*h+pol[1]*h
return(np.array([pol0, pol1, pol2]))
if pol_ord == 1:
pol0 = pol[0]
pol1 = pol[1] + pol[0]*h
return(np.array([pol0, pol1]))
# Calculate derivative for a polynom pol in a point x
def pol_d(pol, x):
pol_ord = len(pol)-1
if pol_ord == 3:
return 3.0*pol[0]*x*x+2.0*pol[1]*x+pol[2]
if pol_ord == 2:
return 2.0*pol[0]*x+pol[1]
if pol_ord == 1:
return pol[0]#*np.ones(len(np.array(x)))
# Calculate the second derivative for a polynom pol in a point x
def pol_dd(pol, x):
pol_ord = len(pol)-1
if pol_ord == 3:
return 6.0*pol[0]*x+2.0*pol[1]
if pol_ord == 2:
return 2.0*pol[0]
if pol_ord == 1:
return 0.0
# Calculate a polinomial value in a point x
def pol_calc(pol, x):
pol_f = np.poly1d(pol)
return(pol_f(x))
xm_in_px = 3.675 / 85 # Lane width (12 ft in m) is ~85 px on image
ym_in_px = 3.048 / 24 # Dashed line length (10 ft in m) is ~24 px on image
def px_to_m(px):
return xm_in_px*px
# Calculate offset from the lane center
def lane_offset(left, right):
offset = 1280/2.0-(pol_calc(left, 1.0)+ pol_calc(right, 1.0))/2.0
return px_to_m(offset)
# Calculate radius of curvature
MAX_RADIUS = 10000
def r_curv(pol, y):
if len(pol) == 2: # If the polinomial is a linear function
return MAX_RADIUS
else:
y_pol = np.linspace(0, 1, num=EQUID_POINTS)
x_pol = pol_calc(pol, y_pol)*xm_in_px
y_pol = y_pol*IMAGE_H*ym_in_px
pol = np.polyfit(y_pol, x_pol, len(pol)-1)
d_y = pol_d(pol, y)
dd_y = pol_dd(pol, y)
r = ((np.sqrt(1+d_y**2))**3)/abs(dd_y)
if r > MAX_RADIUS:
r = MAX_RADIUS
return r
def lane_curv(left, right):
l = r_curv(left, 1.0)
r = r_curv(right, 1.0)
if l < MAX_RADIUS and r < MAX_RADIUS:
return (r_curv(left, 1.0)+r_curv(right, 1.0))/2.0
else:
if l < MAX_RADIUS:
return l
if r < MAX_RADIUS:
return r
return MAX_RADIUS
#Calculate approximated equidistant to a parabola
EQUID_POINTS = 25 # Number of points to use for the equidistant approximation
def equidistant(pol, d, max_l = 1, plot = False):
y_pol = np.linspace(0, max_l, num=EQUID_POINTS)
x_pol = pol_calc(pol, y_pol)
y_pol *= IMAGE_H # Convert y coordinates bach to [0..223] scale
x_m = []
y_m = []
k_m = []
for i in range(len(x_pol)-1):
x_m.append((x_pol[i+1]-x_pol[i])/2.0+x_pol[i]) # Calculate polints position between given points
y_m.append((y_pol[i+1]-y_pol[i])/2.0+y_pol[i])
if x_pol[i+1] == x_pol[i]:
k_m.append(1e8) # A vary big number
else:
k_m.append(-(y_pol[i+1]-y_pol[i])/(x_pol[i+1]-x_pol[i])) # Slope of perpendicular lines
x_m = np.array(x_m)
y_m = np.array(y_m)
k_m = np.array(k_m)
#Calculate equidistant points
y_eq = d*np.sqrt(1.0/(1+k_m**2))
x_eq = np.zeros_like(y_eq)
if d >= 0:
for i in range(len(x_m)):
if k_m[i] < 0:
y_eq[i] = y_m[i]-abs(y_eq[i])
else:
y_eq[i] = y_m[i]+abs(y_eq[i])
x_eq[i] = (x_m[i]-k_m[i]*y_m[i])+k_m[i]*y_eq[i]
else:
for i in range(len(x_m)):
if k_m[i] < 0:
y_eq[i] = y_m[i]+abs(y_eq[i])
else:
y_eq[i] = y_m[i]-abs(y_eq[i])
x_eq[i] = (x_m[i]-k_m[i]*y_m[i])+k_m[i]*y_eq[i]
y_eq /= IMAGE_H # Convert y coordinates back to [0..1] scale
y_pol /= IMAGE_H
y_m /= IMAGE_H
pol_eq = np.polyfit(y_eq, x_eq, len(pol)-1) # Fit equidistant with a polinomial
if plot:
plt.plot(x_pol, y_pol, color='red', linewidth=1, label = 'Original line') #Original line
plt.plot(x_eq, y_eq, color='green', linewidth=1, label = 'Equidistant') #Equidistant
plt.plot(pol_calc(pol_eq, y_pol), y_pol, color='blue',
linewidth=1, label = 'Approximation') #Approximation
plt.legend()
for i in range(len(x_m)):
plt.plot([x_m[i],x_eq[i]], [y_m[i],y_eq[i]], color='black', linewidth=1) #Draw connection lines
plt.savefig('readme_img/equid.jpg')
return pol_eq
DEV_POL = 2 # Max mean squared error of the approximation
MSE_DEV = 1.1 # Minimum mean squared error ratio to consider higher order of the polynomial
def best_pol_ord(x, y):
pol1 = np.polyfit(y,x,1)
pred1 = pol_calc(pol1, y)
mse1 = mean_squared_error(x, pred1)
if mse1 < DEV_POL:
return pol1, mse1
pol2 = np.polyfit(y,x,2)
pred2 = pol_calc(pol2, y)
mse2 = mean_squared_error(x, pred2)
if mse2 < DEV_POL or mse1/mse2 < MSE_DEV:
return pol2, mse2
else:
pol3 = np.polyfit(y,x,3)
pred3 = pol_calc(pol3, y)
mse3 = mean_squared_error(x, pred3)
if mse2/mse3 < MSE_DEV:
return pol2, mse2
else:
return pol3, mse3
# Smooth polinomial functions of different degrees
def smooth_dif_ord(pol_p, x, y, new_ord):
x_p = pol_calc(pol_p, y)
x_new = (x+x_p)/2.0
return np.polyfit(y, x_new, new_ord)
# Calculate threashold for left line
def thres_l_calc(sens):
thres = -0.0045*sens**2+1.7581*sens-115.0
if thres < 25*(382.0-sens)/382.0+5:
thres = 25*(382.0-sens)/382.0+5
return thres
# Calculate threashold for right line
def thres_r_calc(sens):
thres = -0.0411*sens**2+9.1708*sens-430.0
if sens<210:
if thres < sens/6:
thres = sens/6
else:
if thres < 20:
thres = 20
return thres
WINDOW_SIZE = 15 # Half of the sensor span
DEV = 7 # Maximum of the point deviation from the sensor center
SPEED = 2 / IMAGE_H # Pixels shift per frame
POL_ORD = 2 # Default polinomial order
RANGE = 0.0 # Fraction of the image to skip
def find(img, left=True, p_ord=POL_ORD, pol = np.zeros(POL_ORD+1), max_n = 0):
x_pos = []
y_pos = []
max_l = img.shape[0] #number of lines in the img
for i in range(max_l-int(max_l*RANGE)):
y = max_l-i #Line number
y_01 = y / float(max_l) #y in [0..1] scale
if abs(pol[-1]) > 0: #If it not a still image or the first video frame
if y_01 >= max_n + SPEED: # If we can use pol to find center of the virtual sensor from the previous frame
cent = int(pol_calc(pol, y_01-SPEED))
if y == max_l:
if left:
cent = 605
else:
cent = 690
else: # Prolong the pol tangentially
k = pol_d(pol, max_n)
b = pol_calc(pol, max_n)-k*max_n
cent = int(k*y_01+b)
if cent > 1280-WINDOW_SIZE:
cent = 1280-WINDOW_SIZE
if cent < WINDOW_SIZE:
cent = WINDOW_SIZE
else: #If it is a still image
if len(x_pos) > 0: # If there are some points detected
cent = x_pos[-1] # Use the previous point as a senser center
else: #Initial guess on line position
if left:
cent = 605
else:
cent = 690
if left: #Subsample image
sens = 0.5*s_hls(img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,:])\
+img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,2]
else:
sens = img[max_l-1-i:max_l-i,cent-WINDOW_SIZE:cent+WINDOW_SIZE,2]
if len(sens[0,:]) < WINDOW_SIZE: #If we out of the image
break
x_max = max(sens[0,:]) #Find maximal value on the sensor
sens_mean = np.mean(sens[0,:])
# Get threshold
if left:
loc_thres = thres_l_calc(sens_mean)
loc_dev = DEV
else:
loc_thres = thres_r_calc(sens_mean)
loc_dev = DEV
if len(x_pos) == 0:
loc_dev = WINDOW_SIZE
if (x_max-sens_mean) > loc_thres and (x_max>100 or left):
if left:
x = list(reversed(sens[0,:])).index(x_max)
x = cent+WINDOW_SIZE-x
else:
x = list(sens[0,:]).index(x_max)
x = cent-WINDOW_SIZE+x
if x-1 < 569.0*y_01 or x+1 > 569.0*y_01+711 or np.nonzero(sens[0,:]) < WINDOW_SIZE: #if the sensor touchs black triangle
break # We are done
if abs(pol[-1]) < 1e-4: # If there are no polynomial provided
x_pos.append(x)
y_pos.append(y_01)
else:
if abs(x-cent) < loc_dev:#*14.206*r_curv(pol, max_l)**-0.2869:
x_pos.append(x)
y_pos.append(y_01)
if len(x_pos) > 1:
return x_pos, y_pos
else:
return [0], [0.0]
RANGE = 0.0
def get_lane(img, plot=False):
warp = transform(img, M)
img = undistort(img)
ploty = np.linspace(0, 1, num=warp.shape[0])
x2, y2 = find(warp)
x, y = find(warp, False)
right_fitx = pol_calc(best_pol_ord(x,y)[0], ploty)
left_fitx = pol_calc(best_pol_ord(x2,y2)[0], ploty)
y2 = np.int16(np.array(y2)*223.0) # Convert into [0..223] scale
y = np.int16(np.array(y)*223.0)
if plot:
for i in range(len(x)): # Plot points
cv2.circle(warp, (x[i], y[i]), 1, (255,50,255))
for i in range(len(x2)):
cv2.circle(warp, (x2[i], y2[i]), 1, (255,50,250))
show_img(warp)
plt.axis('off')
plt.plot(left_fitx, ploty*IMAGE_H, color='green', linewidth=1)
plt.plot(right_fitx, ploty*IMAGE_H, color='green', linewidth=1)
cv2.imwrite('img.jpg', warp)
return img, left_fitx, right_fitx, ploty*IMAGE_H
def draw_lane_img_p(img_path):
return cv2.imread(img_path)
def draw_lane(img, video=False):
if video:
img, left_fitx, right_fitx, ploty, left, right = get_lane_video(img)
else:
img, left_fitx, right_fitx, ploty = get_lane(img, False)
warp_zero = np.zeros((IMAGE_H,IMAGE_W)).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(img, 1.0, newwarp, 0.6, 0)
if video:
# Add text information on the frame
font = cv2.FONT_HERSHEY_SIMPLEX
text_pos = 'Pos of the car: '+str(np.round(lane_offset(left, right),2))+ ' m'
radius = np.round(lane_curv(left, right),2)
if radius >= MAX_RADIUS:
radius = 'Inf'
else:
radius = str(radius)
text_rad = 'Radius: '+radius+ ' m'
cv2.putText(result,text_pos,(10,25), font, 1,(255,255,255),2)
cv2.putText(result,text_rad,(10,75), font, 1,(255,255,255),2)
return(result)
right_fit_p = np.zeros(POL_ORD+1)
left_fit_p = np.zeros(POL_ORD+1)
r_len = 0
l_len = 0
lane_w_p = 90
MIN = 60 # Minimal line separation (in px)
MAX = 95 # Maximal line separation (in px)
MIN_POINTS = 10 #Minimal points to consider a line
MAX_N = 5 # Maximal frames without line detected to use previous frame
n_count = 0 # Frame counter
r_n = 0 # Number of frames with unsuccessful line detection
l_n = 0
def get_lane_video(img):
global right_fit_p, left_fit_p, r_len, l_len, n_count, r_n, l_n
sw = False
warp = transform(img, M)
img = undistort(img)
if l_n < MAX_N and n_count > 0:
x, y = find(warp, pol = left_fit_p, max_n = l_len)
else:
x, y = find(warp)
if len(x) > MIN_POINTS:
left_fit, mse_l = best_pol_ord(x,y)
if mse_l > DEV_POL*9 and n_count > 0:
left_fit = left_fit_p
l_n += 1
else:
l_n /= 2
else:
left_fit = left_fit_p
l_n += 1
if r_n < MAX_N and n_count > 0:
x2, y2 = find(warp, False, pol = right_fit_p, max_n = r_len)
else:
x2, y2 = find(warp, False)
if len(x2) > MIN_POINTS:
right_fit, mse_r = best_pol_ord(x2, y2)
if mse_r > DEV_POL*9 and n_count > 0:
right_fit = right_fit_p
r_n += 1
else:
r_n /= 2
else:
right_fit = right_fit_p
r_n += 1
if n_count > 0: # if not the first video frame
# Apply filter
if len(left_fit_p) == len(left_fit): # If new and prev polinomial have the same order
left_fit = pol_shift(left_fit_p, -SPEED)*(1.0-len(x)/((1.0-RANGE)*IMAGE_H))+left_fit*(len(x)/((1.0-RANGE)*IMAGE_H))
else:
left_fit = smooth_dif_ord(left_fit_p, x, y, len(left_fit)-1)
l_len = y[-1]
if len(right_fit_p) == len(right_fit):
right_fit = pol_shift(right_fit_p, -SPEED)*(1.0-len(x2)/((1.0-RANGE)*IMAGE_H))+right_fit*(len(x2)/((1.0-RANGE)*IMAGE_H))
else:
right_fit = smooth_dif_ord(right_fit_p, x2, y2, len(right_fit)-1)
r_len = y2[-1]
if len(x) > MIN_POINTS and len(x2) <= MIN_POINTS: # If we have only left line
lane_w = pol_calc(right_fit_p, 1.0)-pol_calc(left_fit_p, 1.0)
right_fit = smooth_dif_ord(right_fit_p, pol_calc(equidistant(left_fit, lane_w, max_l=l_len), y),
y, len(left_fit)-1)
r_len = l_len
r_n /=2
if len(x2) > MIN_POINTS and len(x) <= MIN_POINTS: # If we have only right line
lane_w = pol_calc(right_fit_p, 1.0)-pol_calc(left_fit_p, 1.0)
#print(lane_w)
left_fit = smooth_dif_ord(left_fit_p, pol_calc(equidistant(right_fit, -lane_w, max_l=r_len), y2),
y2, len(right_fit)-1)
l_len = r_len
l_n /=2
if (l_n < MAX_N and r_n < MAX_N):
max_y = max(RANGE, l_len, r_len)
else:
max_y = 1.0#max(RANGE, l_len, r_len)
sw = True
d1 = pol_calc(right_fit, 1.0)-pol_calc(left_fit, 1.0)
dm = pol_calc(right_fit, max_y)-pol_calc(left_fit, max_y)
if (d1 > MAX or d1 < 60 or dm < 0):
left_fit = left_fit_p
right_fit = right_fit_p
l_n += 1
r_n += 1
ploty = np.linspace(max_y, 1, num=IMAGE_H)
left_fitx = pol_calc(left_fit, ploty)
right_fitx = pol_calc(right_fit, ploty)
right_fit_p = np.copy(right_fit)
left_fit_p = np.copy(left_fit)
n_count += 1
return img, left_fitx, right_fitx, ploty*223.0, left_fit, right_fit
def init_params(ran):
global right_fit_p, left_fit_p, n_count, RANGE, MIN_POINTS
right_fit_p = np.zeros(POL_ORD+1)
left_fit_p = np.zeros(POL_ORD+1)
n_count = 0
RANGE = ran
MIN_POINTS = 25-15*ran
| mit |
trankmichael/scipy | doc/source/tutorial/stats/plots/kde_plot3.py | 132 | 1229 | import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
np.random.seed(12456)
x1 = np.random.normal(size=200) # random data, normal distribution
xs = np.linspace(x1.min()-1, x1.max()+1, 200)
kde1 = stats.gaussian_kde(x1)
kde2 = stats.gaussian_kde(x1, bw_method='silverman')
fig = plt.figure(figsize=(8, 6))
ax1 = fig.add_subplot(211)
ax1.plot(x1, np.zeros(x1.shape), 'b+', ms=12) # rug plot
ax1.plot(xs, kde1(xs), 'k-', label="Scott's Rule")
ax1.plot(xs, kde2(xs), 'b-', label="Silverman's Rule")
ax1.plot(xs, stats.norm.pdf(xs), 'r--', label="True PDF")
ax1.set_xlabel('x')
ax1.set_ylabel('Density')
ax1.set_title("Normal (top) and Student's T$_{df=5}$ (bottom) distributions")
ax1.legend(loc=1)
x2 = stats.t.rvs(5, size=200) # random data, T distribution
xs = np.linspace(x2.min() - 1, x2.max() + 1, 200)
kde3 = stats.gaussian_kde(x2)
kde4 = stats.gaussian_kde(x2, bw_method='silverman')
ax2 = fig.add_subplot(212)
ax2.plot(x2, np.zeros(x2.shape), 'b+', ms=12) # rug plot
ax2.plot(xs, kde3(xs), 'k-', label="Scott's Rule")
ax2.plot(xs, kde4(xs), 'b-', label="Silverman's Rule")
ax2.plot(xs, stats.t.pdf(xs, 5), 'r--', label="True PDF")
ax2.set_xlabel('x')
ax2.set_ylabel('Density')
plt.show()
| bsd-3-clause |
CalebBell/ht | ht/conv_free_immersed.py | 1 | 58245 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
from math import exp, log
__all__ = ['Nu_vertical_plate_Churchill',
'Nu_free_vertical_plate',
'Nu_free_vertical_plate_methods',
'Nu_horizontal_plate_McAdams',
'Nu_horizontal_plate_VDI',
'Nu_horizontal_plate_Rohsenow',
'Nu_free_horizontal_plate',
'Nu_free_horizontal_plate_methods',
'Nu_sphere_Churchill',
'Nu_vertical_cylinder_Griffiths_Davis_Morgan',
'Nu_vertical_cylinder_Jakob_Linke_Morgan',
'Nu_vertical_cylinder_Carne_Morgan',
'Nu_vertical_cylinder_Eigenson_Morgan',
'Nu_vertical_cylinder_Touloukian_Morgan',
'Nu_vertical_cylinder_McAdams_Weiss_Saunders',
'Nu_vertical_cylinder_Kreith_Eckert',
'Nu_vertical_cylinder_Hanesian_Kalish_Morgan',
'Nu_vertical_cylinder_Al_Arabi_Khamis',
'Nu_vertical_cylinder_Popiel_Churchill',
'Nu_vertical_cylinder',
'Nu_vertical_cylinder_methods',
'Nu_horizontal_cylinder_Churchill_Chu',
'Nu_horizontal_cylinder_Kuehn_Goldstein',
'Nu_horizontal_cylinder_Morgan',
'Nu_horizontal_cylinder',
'Nu_horizontal_cylinder_methods',
'Nu_coil_Xin_Ebadian']
def Nu_vertical_plate_Churchill(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a vertical
plate according to the Churchill-Chu [1]_ correlation, also presented in
[2]_. Plate must be isothermal; an alternate expression exists for constant
heat flux.
.. math::
Nu_{L}=\left[0.825+\frac{0.387Ra_{L}^{1/6}}
{[1+(0.492/Pr)^{9/16}]^{8/27}}\right]^2
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number with respect to height, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source.
Can be applied to vertical cylinders as well, subject to the criteria below:
.. math::
\frac{D}{L}\ge \frac{35}{Gr_L^{1/4}}
Examples
--------
From [2]_, Example 9.2, matches:
>>> Nu_vertical_plate_Churchill(0.69, 2.63E9)
147.16185223770603
References
----------
.. [1] Churchill, Stuart W., and Humbert H. S. Chu. "Correlating Equations
for Laminar and Turbulent Free Convection from a Vertical Plate."
International Journal of Heat and Mass Transfer 18, no. 11
(November 1, 1975): 1323-29. doi:10.1016/0017-9310(75)90243-4.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
Ra = Pr*Gr
term = (0.825 + (0.387*Ra**(1/6.)*(1.0 + (Pr/0.492)**(-0.5625))**(-8.0/27.0)))
return term*term
Nu_free_vertical_plate_all_methods = ["Churchill"]
def Nu_free_vertical_plate_methods(Pr, Gr, H=None, W=None, check_ranges=True):
r'''This function returns a list of methods for calculating heat transfer
coefficient for external free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid (which require T and P to obtain).
`L` and `W` are not used by any correlations presently, but are included
for future support.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
H : float, optional
Height of vertical plate, [m]
W : float, optional
Width of the vertical plate, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs, [-]
Examples
--------
>>> Nu_free_vertical_plate_methods(0.69, 2.63E9)
['Churchill']
'''
return Nu_free_vertical_plate_all_methods
def Nu_free_vertical_plate(Pr, Gr, buoyancy=None, H=None, W=None, Method=None):
r'''This function calculates the heat transfer coefficient for external
free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid (which require T and P to obtain).
`L` and `W` are not used by any correlations presently, but are included
for future support.
If no correlation's name is provided as `Method`, the 'Churchill'
correlation is selected.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
H : float, optional
Height of vertical plate, [m]
W : float, optional
Width of the vertical plate, [m]
Returns
-------
Nu : float
Nusselt number with respect to plate height, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use;
one of ('Churchill', ).
Examples
--------
Turbulent example
>>> Nu_free_vertical_plate(0.69, 2.63E9, False)
147.16185223770603
'''
if Method is None:
Method2 = 'Churchill'
else:
Method2 = Method
if Method2 == 'Churchill':
return Nu_vertical_plate_Churchill(Pr, Gr)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
def Nu_horizontal_plate_McAdams(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the McAdams [1]_ correlations. The plate must be
isothermal. Four different equations are used, two each for laminar and
turbulent; the two sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
Examples
--------
>>> Nu_horizontal_plate_McAdams(5.54, 3.21e8, buoyancy=True)
181.73121274384457
>>> Nu_horizontal_plate_McAdams(5.54, 3.21e8, buoyancy=False)
55.44564799362829
>>> Nu_horizontal_plate_McAdams(.01, 3.21e8, buoyancy=True)
22.857041558492334
>>> Nu_horizontal_plate_McAdams(.01, 3.21e8, buoyancy=False)
11.428520779246167
References
----------
.. [1] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla:
Krieger Pub Co, 1985.
'''
Ra = Pr*Gr
if buoyancy:
if Ra <= 1E7:
Nu = .54*Ra**0.25
else:
Nu = 0.15*Ra**(1.0/3.0)
else:
if Ra <= 1E10:
Nu = .27*Ra**0.25
else:
Nu = .15*Ra**(1.0/3.0)
return Nu
def Nu_horizontal_plate_VDI(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the VDI [1]_ correlations. The plate must be
isothermal. Three different equations are used, one each for laminar and
turbulent for the heat transfer happening at upper surface case and one for
the case of heat transfer happening at the lower surface. The lower surface
correlation is recommened for the laminar flow regime.
The two different sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
The characteristic length suggested for use is as follows, with `a` and
`b` being the length and width of the plate.
.. math::
L = \frac{ab}{2(a+b)}
The buoyancy enhanced cases are from [2]_; the other is said to be from
[3]_, although the equations there not quite the same and do not include
the Prandtl number correction.
Examples
--------
>>> Nu_horizontal_plate_VDI(5.54, 3.21e8, buoyancy=True)
203.89681224927565
>>> Nu_horizontal_plate_VDI(5.54, 3.21e8, buoyancy=False)
39.16864971535617
References
----------
.. [1] Gesellschaft, V. D. I., ed. VDI Heat Atlas. 2nd ed. 2010 edition.
Berlin ; New York: Springer, 2010.
.. [2] Stewartson, Keith. "On the Free Convection from a Horizontal Plate."
Zeitschrift Für Angewandte Mathematik Und Physik ZAMP 9, no. 3
(September 1, 1958): 276-82. https://doi.org/10.1007/BF02033031.
.. [3] Schlunder, Ernst U, and International Center for Heat and Mass
Transfer. Heat Exchanger Design Handbook. Washington:
Hemisphere Pub. Corp., 1987.
'''
Ra = Pr*Gr
if buoyancy:
f2 = (1.0 + (0.322/Pr)**(0.55))**(20.0/11.0)
if Ra*f2 < 7e4:
return 0.766*(Ra*f2)**0.2
else:
return 0.15*(Ra*f2)**(1.0/3.0)
else:
f1 = (1.0 + (0.492/Pr)**(9.0/16.0))**(-16.0/9.0)
return 0.6*(Ra*f1)**0.2
def Nu_horizontal_plate_Rohsenow(Pr, Gr, buoyancy=True):
r'''Calculates the Nusselt number for natural convection above a horizontal
plate according to the Rohsenow, Hartnett, and Cho (1998) [1]_ correlations.
The plate must be isothermal. Three different equations are used, one each
for laminar and turbulent for the heat transfer happening at upper surface
case and one for the case of heat transfer happening at the lower surface.
The lower surface correlation is recommened for the laminar flow regime.
The two different sets of correlations are required because if the plate
is hot, buoyancy lifts the fluid off the plate and enhances free convection
whereas if the plate is cold, the cold fluid above it settles on it and
decreases the free convection.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
Returns
-------
Nu : float
Nusselt number with respect to length, [-]
Notes
-----
The characteristic length suggested for use is as follows, with `a` and
`b` being the length and width of the plate.
.. math::
L = \frac{ab}{2(a+b)}
Examples
--------
>>> Nu_horizontal_plate_Rohsenow(5.54, 3.21e8, buoyancy=True)
175.91054716322836
>>> Nu_horizontal_plate_Rohsenow(5.54, 3.21e8, buoyancy=False)
35.95799244863986
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
'''
Ra = Pr*Gr
if buoyancy:
C_tU = 0.14*((1.0 + 0.01707*Pr)/(1.0 + 0.01*Pr))
C_tV = 0.13*Pr**0.22/(1.0 + 0.61*Pr**0.81)**0.42
t1 = 1.0 # Ah/A # Heated to non heated area ratio
t2 = 0.0 # Lf*P/A # Lf vertical distance between lowest and highest point in body
# P is perimiter, A is area
Cl = (0.0972 - (0.0157 + 0.462*C_tV)*t1
+ (0.615*C_tV - 0.0548 - 6e-6*Pr)*t2)
Nu_T = 0.835*Cl*Ra**0.25 # average Cl
Nu_l = 1.4/(log(1.0 + 1.4/Nu_T))
Nu_t = C_tU*Ra**(1.0/3.0)
m = 10.0
Nu = ((Nu_l)**m + Nu_t**m)**(1.0/m)
return Nu
else:
# No friction/C term
Nu_T = 0.527*Ra**0.2/(1.0 + (1.9/Pr)**0.9)**(2.0/9.0)
Nu_l = 2.5/(log(1.0 + 2.5/Nu_T))
return Nu_l
conv_free_horizontal_plate_all_methods = {
'McAdams': (Nu_horizontal_plate_McAdams, ('Pr', 'Gr', 'buoyancy')),
'VDI': (Nu_horizontal_plate_VDI, ('Pr', 'Gr', 'buoyancy')),
'Rohsenow': (Nu_horizontal_plate_Rohsenow, ('Pr', 'Gr', 'buoyancy')),
}
Nu_free_horizontal_plate_all_methods = ["VDI", "McAdams", "Rohsenow"]
def Nu_free_horizontal_plate_methods(Pr, Gr, buoyancy, L=None, W=None,
check_ranges=True):
r'''This function returns a list of methods for calculating heat transfer
coefficient for external free convection from a verical plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid, temperatures, and geometry.
`L` and `W` are not used by any correlations presently, but are included
for future support.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
L : float, optional
Length of horizontal plate, [m]
W : float, optional
Width of the horizontal plate, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs, [-]
Examples
--------
>>> Nu_free_horizontal_plate_methods(0.69, 2.63E9, True)
['VDI', 'McAdams', 'Rohsenow']
'''
return Nu_free_horizontal_plate_all_methods
def Nu_free_horizontal_plate(Pr, Gr, buoyancy, L=None, W=None,
Method=None):
r'''This function calculates the heat transfer coefficient for external
free convection from a horizontal plate.
Requires at a minimum a fluid's Prandtl number `Pr`, and the Grashof
number `Gr` for the system fluid, temperatures, and geometry.
`L` and `W` are not used by any correlations presently, but are included
for future support.
If no correlation's name is provided as `Method`, the 'VDI' correlation is
selected.
Parameters
----------
Pr : float
Prandtl number with respect to fluid properties [-]
Gr : float
Grashof number with respect to fluid properties and plate - fluid
temperature difference [-]
buoyancy : bool, optional
Whether or not the plate's free convection is buoyancy assisted (hot
plate) or not, [-]
L : float, optional
Length of horizontal plate, [m]
W : float, optional
Width of the horizontal plate, [m]
Returns
-------
Nu : float
Nusselt number with respect to plate length, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
conv_free_horizontal_plate_methods
Examples
--------
Turbulent example
>>> Nu_free_horizontal_plate(5.54, 3.21e8, buoyancy=True)
203.89681224927565
>>> Nu_free_horizontal_plate(5.54, 3.21e8, buoyancy=True, Method='McAdams')
181.73121274384457
'''
if Method is None:
Method2 = "VDI"
else:
Method2 = Method
if Method2 == 'VDI':
return Nu_horizontal_plate_VDI(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
if Method2 == 'McAdams':
return Nu_horizontal_plate_McAdams(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
if Method2 == 'Rohsenow':
return Nu_horizontal_plate_Rohsenow(Pr=Pr, Gr=Gr, buoyancy=buoyancy)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
def Nu_sphere_Churchill(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a sphere
according to the Churchill [1]_ correlation. Sphere must be isothermal.
.. math::
Nu_D=2+\frac{0.589Ra_D^{1/4}} {\left[1+(0.469/Pr)^{9/16}\right]^{4/9}}
\cdot\left\{1 + \frac{7.44\times 10^{-8}Ra}
{[1+(0.469/Pr)^{9/16}]^{16/9}}\right\}^{1/12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source.
Good for Ra < 1E13. Limit of Nu is 2 at low Grashof numbers.
Examples
--------
>>> Nu_sphere_Churchill(.7, 1E7)
25.670869440317578
References
----------
.. [1] Schlunder, Ernst U, and International Center for Heat and Mass
Transfer. Heat Exchanger Design Handbook. Washington:
Hemisphere Pub. Corp., 1987.
'''
Ra = Pr*Gr
Nu = 2 + (0.589*Ra**0.25/(1 + (0.469/Pr)**(9/16.))**(4/9.)*(
1 + 7.44E-8*Ra/(1 + (0.469/Pr)**(9/16.))**(16/9.))**(1/12.))
return Nu
### Vertical cylinders
def Nu_vertical_cylinder_Griffiths_Davis_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.67 Ra_H^{0.25},\; 10^{7} < Ra < 10^{9}
.. math::
Nu_H = 0.0782 Ra_H^{0.357}, \; 10^{9} < Ra < 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameter 17.43 cm, length from 4.65 to 263.5 cm. Air as fluid.
Transition between ranges is not smooth.
If outside of range, no warning is given.
Examples
--------
>>> Nu_vertical_cylinder_Griffiths_Davis_Morgan(.7, 2E10)
327.6230596100138
References
----------
.. [1] Griffiths, Ezer, A. H. Davis, and Great Britain. The Transmission of
Heat by Radiation and Convection. London: H. M. Stationery off., 1922.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
Nu = 0.0782*Ra**0.357
else:
Nu = 0.67*Ra**0.25
return Nu
def Nu_vertical_cylinder_Jakob_Linke_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{4} < Ra < 10^{8}
.. math::
Nu_H = 0.129 Ra_H^{1/3},\; 10^{8} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameter 3.5 cm, length from L/D = 4.3. Air as fluid.
Transition between ranges is not smooth.
If outside of range, no warning is given. Results are presented rounded in
[4]_, and the second range is not shown in [3]_.
Examples
--------
>>> Nu_vertical_cylinder_Jakob_Linke_Morgan(.7, 2E10)
310.90835207860454
References
----------
.. [1] Jakob, M., and Linke, W., Warmeubergang beim Verdampfen von
Flussigkeiten an senkrechten und waagerechten Flaschen, Phys. Z.,
vol. 36, pp. 267-280, 1935.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E8 and turbulent is None):
Nu = 0.129*Ra**(1/3.)
else:
Nu = 0.555*Ra**0.25
return Nu
def Nu_vertical_cylinder_Carne_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 1.07 Ra_H^{0.28},\; 2\times 10^{6} < Ra < 2\times 10^{8}
.. math::
Nu_H = 0.152 Ra_H^{0.38},\; 2\times 10^{8} < Ra < 2\times 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127. Isothermal
boundary condition was assumed, but not verified. Transition between ranges
is not smooth. If outside of range, no warning is given. The higher range
of [1]_ is not shown in [3]_, and the formula for the first is actually for
the second in [3]_.
Examples
--------
>>> Nu_vertical_cylinder_Carne_Morgan(.7, 2E8)
204.31470629065677
References
----------
.. [1] J. B. Carne. "LIX. Heat Loss by Natural Convection from Vertical
Cylinders." The London, Edinburgh, and Dublin Philosophical Magazine and
Journal of Science 24, no. 162 (October 1, 1937): 634-53.
doi:10.1080/14786443708565140.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 2E8 and turbulent is None):
return 0.152*Ra**0.38
else:
return 1.07*Ra**0.28
def Nu_vertical_cylinder_Eigenson_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_,
presented in [3]_ and in more detail in [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.25},\; 10^{9} < Ra
.. math::
Nu_H = 51.5 + 0.0000726 Ra_H^{0.63},\; 10^{9} < Ra < 1.69 \times 10^{10}
.. math::
Nu_H = 0.148 Ra_H^{1/3} - 127.6 ,\; 1.69 \times 10^{10} < Ra
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Author presents results as appropriate for both flat plates and cylinders.
Height of 2.5 m with diameters of 2.4, 7.55, 15, 35, and 50 mm. Another
experiment of diameter 58 mm and length of 6.5 m was considered.
Cylinder of diameters 0.475 cm to 7.62 cm, L/D from 8 to 127.Transition
between ranges is not smooth. If outside of range, no warning is given.
Formulas are presented similarly in [3]_ and [4]_, but only [4]_ shows
the transition formula.
Examples
--------
>>> Nu_vertical_cylinder_Eigenson_Morgan(0.7, 2E10)
230.55946525499715
References
----------
.. [1] Eigenson L (1940). Les lois gouvernant la transmission de la chaleur
aux gaz biatomiques par les parois des cylindres verticaux dans le cas
de convection naturelle. Dokl Akad Nauk SSSR 26:440-444
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1.69E10 and turbulent is None):
return 0.148*Ra**(1/3.) - 127.6
elif 1E9 < Ra < 1.69E10 and turbulent is not False:
return 51.5 + 0.0000726*Ra**0.63
else:
return 0.48*Ra**0.25
def Nu_vertical_cylinder_Touloukian_Morgan(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by [2]_, as
presented in [3]_ and [4]_.
.. math::
Nu_H = 0.726 Ra_H^{0.25},\; 2\times 10^{8} < Ra < 4\times 10^{10}
.. math::
Nu_H = 0.0674 (Gr_H Pr^{1.29})^{1/3},\; 4\times 10^{10} < Ra < 9\times 10^{11}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Cylinder of diameters 2.75 inch, with heights of 6, 18, and 36.25 inch.
Temperature was controlled via multiple separately controlled heating
sections. Fluids were water and ethylene-glycol. Transition between ranges
is not smooth. If outside of range, no warning is given. [2]_, [3]_, and
[4]_ are in complete agreement about this formulation.
Examples
--------
>>> Nu_vertical_cylinder_Touloukian_Morgan(.7, 2E10)
249.72879961097854
References
----------
.. [1] Touloukian, Y. S, George A Hawkins, and Max Jakob. Heat Transfer by
Free Convection from Heated Vertical Surfaces to Liquids.
Trans. ASME 70, 13-18 (1948).
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 4E10 and turbulent is None):
return 0.0674*(Gr*Pr**1.29)**(1/3.)
else:
return 0.726*Ra**0.25
def Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ and [2]_ correlated by
[3]_, as presented in [4]_, [5]_, and [6]_.
.. math::
Nu_H = 0.59 Ra_H^{0.25},\; 10^{4} < Ra < 10^{9}
.. math::
Nu_H = 0.13 Ra_H^{1/3.},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given. For ranges under 10^4, a graph is provided, not included here.
Examples
--------
>>> Nu_vertical_cylinder_McAdams_Weiss_Saunders(.7, 2E10)
313.31849434277973
References
----------
.. [1] Weise, Rudolf. "Warmeubergang durch freie Konvektion an
quadratischen Platten." Forschung auf dem Gebiet des Ingenieurwesens
A 6, no. 6 (November 1935): 281-92. doi:10.1007/BF02592565.
.. [2] Saunders, O. A. "The Effect of Pressure Upon Natural Convection in
Air." Proceedings of the Royal Society of London A: Mathematical,
Physical and Engineering Sciences 157, no. 891 (November 2, 1936):
278-91. doi:10.1098/rspa.1936.0194.
.. [3] McAdams, William Henry. Heat Transmission. 3E. Malabar, Fla:
Krieger Pub Co, 1985.
.. [4] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [5] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [6] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.13*Ra**(1/3.)
else:
return 0.59*Ra**0.25
def Nu_vertical_cylinder_Kreith_Eckert(Pr, Gr, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_, [4]_, and [5]_.
.. math::
Nu_H = 0.555 Ra_H^{0.25},\; 10^{5} < Ra < 10^{9}
.. math::
Nu_H = 0.021 Ra_H^{0.4},\; 10^{9} < Ra < 10^{12}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
Transition between ranges is not smooth. If outside of range, no warning is
given.
Examples
--------
>>> Nu_vertical_cylinder_Kreith_Eckert(.7, 2E10)
240.25393473033196
References
----------
.. [1] Eckert, E. R. G., Thomas W. Jackson, and United States. Analysis of
Turbulent Free-Convection Boundary Layer on Flat Plate. National
Advisory Committee for Aeronautics, no. 2207. Washington, D.C.: National
Advisoty Committee for Aeronautics, 1950.
.. [2] Kreith, Frank, Raj Manglik, and Mark Bohn. Principles of Heat
Transfer. Cengage, 2010.
.. [3] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [4] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [5] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if turbulent or (Ra > 1E9 and turbulent is None):
return 0.021*Ra**0.4
else:
return 0.555*Ra**0.25
def Nu_vertical_cylinder_Hanesian_Kalish_Morgan(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to the results of [1]_ correlated by
[2]_, also as presented in [3]_ and [4]_.
.. math::
Nu_H = 0.48 Ra_H^{0.23},\; 10^{6} < Ra < 10^{8}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For air and fluoro-carbons. If outside of range, no warning is given.
Laminar range only!
Examples
--------
>>> Nu_vertical_cylinder_Hanesian_Kalish_Morgan(.7, 1E7)
18.014150492696604
References
----------
.. [1] Hanesian, D. and Kalish, R. "Heat Transfer by Natural Convection
with Fluorocarbon Gases." IEEE Transactions on Parts, Materials and
Packaging 6, no. 4 (December 1970): 147-148.
doi:10.1109/TPMP.1970.1136270.
.. [2] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [3] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [4] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
return 0.48*Ra**0.23
### Vertical cylinders, more complex correlations
def Nu_vertical_cylinder_Al_Arabi_Khamis(Pr, Gr, L, D, turbulent=None):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to [1]_, also as presented in [2]_ and [3]_.
.. math::
Nu_H = 2.9Ra_H^{0.25}/Gr_D^{1/12},\; 9.88 \times 10^7 \le Ra_H \le 2.7\times10^{9}
.. math::
Nu_H = 0.47 Ra_H^{0.333}/Gr_D^{1/12},\; 2.7 \times 10^9 \le Ra_H \le 2.95\times10^{10}
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float
Length of vertical cylinder, [m]
D : float
Diameter of cylinder, [m]
turbulent : bool or None, optional
Whether or not to force the correlation to return the turbulent
result; will return the laminar regime if False; leave as None for
automatic selection, [-]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For air. Local Nusselt number results also given in [1]_. D from 12.75 to
51 mm; H from 300 to 2000 mm. Temperature kept constant by steam condensing.
If outside of range, no warning is given. Applies for range of:
.. math::
1.08 \times 10^4 \le Gr_D \le 6.9 \times 10^5
Examples
--------
>>> Nu_vertical_cylinder_Al_Arabi_Khamis(.71, 2E10, 10, 1)
280.39793209114765
References
----------
.. [1] Al-Arabi, M., and M. Khamis. "Natural Convection Heat Transfer from
Inclined Cylinders." International Journal of Heat and Mass Transfer 25,
no. 1 (January 1982): 3-15. doi:10.1016/0017-9310(82)90229-0.
.. [2] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [3] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Gr_D = Gr/L**3*D**3
Ra = Pr*Gr
if turbulent or (Ra > 2.6E9 and turbulent is None):
return 0.47*Ra**(1/3.)*Gr_D**(-1/12.)
else:
return 2.9*Ra**0.25*Gr_D**(-1/12.)
def Nu_vertical_cylinder_Popiel_Churchill(Pr, Gr, L, D):
r'''Calculates Nusselt number for natural convection around a vertical
isothermal cylinder according to [1]_, also presented in [2]_.
.. math::
\frac{Nu}{Nu_{L,fp}} = 1 + B\left[32^{0.5}Gr_L^{-0.25}\frac{L}{D}\right]^C
.. math::
B = 0.0571322 + 0.20305 Pr^{-0.43}
.. math::
C = 0.9165 - 0.0043Pr^{0.5} + 0.01333\ln Pr + 0.0004809/Pr
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float
Length of vertical cylinder, [m]
D : float
Diameter of cylinder, [m]
Returns
-------
Nu : float
Nusselt number, [-]
Notes
-----
For 0.01 < Pr < 100. Requires a vertical flat plate correlation.
Both [2], [3] present a power of 2 instead of 0.5 on the 32 in the equation,
but the original has the correct form.
Examples
--------
>>> Nu_vertical_cylinder_Popiel_Churchill(0.7, 1E10, 2.5, 1)
228.89790055149896
References
----------
.. [1] Popiel, C. O., J. Wojtkowiak, and K. Bober. "Laminar Free Convective
Heat Transfer from Isothermal Vertical Slender Cylinder." Experimental
Thermal and Fluid Science 32, no. 2 (November 2007): 607-613.
doi:10.1016/j.expthermflusci.2007.07.003.
.. [2] Popiel, Czeslaw O. "Free Convection Heat Transfer from Vertical
Slender Cylinders: A Review." Heat Transfer Engineering 29, no. 6
(June 1, 2008): 521-36. doi:10.1080/01457630801891557.
.. [3] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
B = 0.0571322 + 0.20305*Pr**-0.43
C = 0.9165 - 0.0043*Pr**0.5 + 0.01333*log(Pr) + 0.0004809/Pr
Nu_fp = Nu_vertical_plate_Churchill(Pr, Gr)
return Nu_fp*(1 + B*(32**0.5*Gr**-0.25*L/D)**C)
# Nice Name : (function_call, does_turbulent, does_laminar, transition_Ra, is_only_Pr_Gr)
vertical_cylinder_correlations = {
'Churchill Vertical Plate': (Nu_vertical_plate_Churchill, True, True, None, True),
'Griffiths, Davis, & Morgan': (Nu_vertical_cylinder_Griffiths_Davis_Morgan, True, True, 1.00E+009, True),
'Jakob, Linke, & Morgan': (Nu_vertical_cylinder_Jakob_Linke_Morgan, True, True, 1.00E+008, True),
'Carne & Morgan': (Nu_vertical_cylinder_Carne_Morgan, True, True, 2.00E+008, True),
'Eigenson & Morgan': (Nu_vertical_cylinder_Eigenson_Morgan, True, True, 6.90E+011, True),
'Touloukian & Morgan': (Nu_vertical_cylinder_Touloukian_Morgan, True, True, 4.00E+010, True),
'McAdams, Weiss & Saunders': (Nu_vertical_cylinder_McAdams_Weiss_Saunders, True, True, 1.00E+009, True),
'Kreith & Eckert': (Nu_vertical_cylinder_Kreith_Eckert, True, True, 1.00E+009, True),
'Hanesian, Kalish & Morgan': (Nu_vertical_cylinder_Hanesian_Kalish_Morgan, False, True, 1.00E+008, True),
'Al-Arabi & Khamis': (Nu_vertical_cylinder_Al_Arabi_Khamis, True, True, 2.60E+009, False),
'Popiel & Churchill': (Nu_vertical_cylinder_Popiel_Churchill, False, True, 1.00E+009, False),
}
def Nu_vertical_cylinder_methods(Pr, Gr, L=None, D=None, check_ranges=True):
r'''This function returns a list of correlation names for free convetion
to a vertical cylinder.
The functions returned are 'Popiel & Churchill' for fully defined geometries,
and 'McAdams, Weiss & Saunders' otherwise.
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float, optional
Length of vertical cylinder, [m]
D : float, optional
Diameter of cylinder, [m]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs
Examples
--------
>>> Nu_vertical_cylinder_methods(0.72, 1E7)[0]
'McAdams, Weiss & Saunders'
'''
if L is None or D is None:
return ['McAdams, Weiss & Saunders', 'Churchill Vertical Plate',
'Griffiths, Davis, & Morgan', 'Jakob, Linke, & Morgan', 'Carne & Morgan',
'Eigenson & Morgan', 'Touloukian & Morgan', 'Kreith & Eckert', 'Hanesian, Kalish & Morgan']
else:
return ['Popiel & Churchill', 'Churchill Vertical Plate', 'Griffiths, Davis, & Morgan',
'Jakob, Linke, & Morgan', 'Carne & Morgan', 'Eigenson & Morgan', 'Touloukian & Morgan',
'McAdams, Weiss & Saunders', 'Kreith & Eckert', 'Hanesian, Kalish & Morgan',
'Al-Arabi & Khamis']
def Nu_vertical_cylinder(Pr, Gr, L=None, D=None, Method=None):
r'''This function handles choosing which vertical cylinder free convection
correlation is used. Generally this is used by a helper class, but can be
used directly. Will automatically select the correlation to use if none is
provided; returns None if insufficient information is provided.
Preferred functions are 'Popiel & Churchill' for fully defined geometries,
and 'McAdams, Weiss & Saunders' otherwise.
Examples
--------
>>> Nu_vertical_cylinder(0.72, 1E7)
30.562236756513943
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder height [-]
L : float, optional
Length of vertical cylinder, [m]
D : float, optional
Diameter of cylinder, [m]
Returns
-------
Nu : float
Nusselt number, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
vertical_cylinder_correlations
'''
if Method is None:
if L is None or D is None:
Method2 = 'McAdams, Weiss & Saunders'
else:
Method2 = 'Popiel & Churchill'
else:
Method2 = Method
if Method2 == 'Churchill Vertical Plate':
return Nu_vertical_plate_Churchill(Pr=Pr, Gr=Gr)
elif Method2 == 'Griffiths, Davis, & Morgan':
return Nu_vertical_cylinder_Griffiths_Davis_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Jakob, Linke, & Morgan':
return Nu_vertical_cylinder_Jakob_Linke_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Carne & Morgan':
return Nu_vertical_cylinder_Carne_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Eigenson & Morgan':
return Nu_vertical_cylinder_Eigenson_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Touloukian & Morgan':
return Nu_vertical_cylinder_Touloukian_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'McAdams, Weiss & Saunders':
return Nu_vertical_cylinder_McAdams_Weiss_Saunders(Pr=Pr, Gr=Gr)
elif Method2 == 'Kreith & Eckert':
return Nu_vertical_cylinder_Kreith_Eckert(Pr=Pr, Gr=Gr)
elif Method2 == 'Hanesian, Kalish & Morgan':
return Nu_vertical_cylinder_Hanesian_Kalish_Morgan(Pr=Pr, Gr=Gr)
elif Method2 == 'Al-Arabi & Khamis':
return Nu_vertical_cylinder_Al_Arabi_Khamis(Pr=Pr, Gr=Gr, L=L, D=D)
elif Method2 == 'Popiel & Churchill':
return Nu_vertical_cylinder_Popiel_Churchill(Pr=Pr, Gr=Gr, L=L, D=D)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
#import matplotlib.pyplot as plt
#import numpy as np
##L, D = 1.5, 0.1
#Pr, Gr = 0.72, 1E8
#methods = Nu_vertical_cylinder_methods(Pr, Gr)
#Grs = np.logspace(2, 12, 10000)
#
#for method in methods:
# Nus = [Nu_vertical_cylinder(Pr=Pr, Gr=i, Method=method) for i in Grs]
# plt.loglog(Grs, Nus, label=method)
#plt.legend()
#plt.show()
### Horizontal Cylinders
def Nu_horizontal_cylinder_Churchill_Chu(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Churchill-Chu [1]_ correlation, also presented in
[2]_. Cylinder must be isothermal; an alternate expression exists for
constant heat flux.
.. math::
Nu_{D}=\left[0.60+\frac{0.387Ra_{D}^{1/6}}
{[1+(0.559/Pr)^{9/16}]^{8/27}}\right]^2
Parameters
----------
Pr : float
Prandtl number [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Although transition from laminar to turbulent is discrete in reality, this
equation provides a smooth transition in value from laminar to turbulent.
Checked with the original source, which has its powers unsimplified but
is equivalent.
[1]_ recommends 1E-5 as the lower limit for Ra, but no upper limit. [2]_
suggests an upper limit of 1E12.
Examples
--------
From [2]_, Example 9.2, matches:
>>> Nu_horizontal_cylinder_Churchill_Chu(0.69, 2.63E9)
139.13493970073597
References
----------
.. [1] Churchill, Stuart W., and Humbert H. S. Chu. "Correlating Equations
for Laminar and Turbulent Free Convection from a Horizontal Cylinder."
International Journal of Heat and Mass Transfer 18, no. 9
(September 1975): 1049-53. doi:10.1016/0017-9310(75)90222-7.
.. [2] Bergman, Theodore L., Adrienne S. Lavine, Frank P. Incropera, and
David P. DeWitt. Introduction to Heat Transfer. 6E. Hoboken, NJ:
Wiley, 2011.
'''
Ra = Pr*Gr
return (0.6 + 0.387*Ra**(1/6.)/(1. + (0.559/Pr)**(9/16.))**(8/27.))**2
def Nu_horizontal_cylinder_Kuehn_Goldstein(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Kuehn-Goldstein [1]_ correlation, also shown in
[2]_. Cylinder must be isothermal.
.. math::
\frac{2}{Nu_D} = \ln\left[1 + \frac{2}{\left[\left\{0.518Ra_D^{0.25}
\left[1 + \left(\frac{0.559}{Pr}\right)^{3/5}\right]^{-5/12}
\right\}^{15} + (0.1Ra_D^{1/3})^{15}\right]^{1/15}}\right]
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
[1]_ suggests this expression is valid for all cases except low-Pr fluids.
[2]_ suggests no restrictions.
Examples
--------
>>> Nu_horizontal_cylinder_Kuehn_Goldstein(0.69, 2.63E9)
122.99323525628186
References
----------
.. [1] Kuehn, T. H., and R. J. Goldstein. "Correlating Equations for
Natural Convection Heat Transfer between Horizontal Circular Cylinders."
International Journal of Heat and Mass Transfer 19, no. 10
(October 1976): 1127-34. doi:10.1016/0017-9310(76)90145-9
.. [2] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
return 2./log(1 + 2./((0.518*Ra**0.25*(1. + (0.559/Pr)**0.6)**(-5/12.))**15
+ (0.1*Ra**(1/3.))**15)**(1/15.))
def Nu_horizontal_cylinder_Morgan(Pr, Gr):
r'''Calculates Nusselt number for natural convection around a horizontal
cylinder according to the Morgan [1]_ correlations, a product of a very
large review of the literature. Sufficiently common as to be shown in [2]_.
Cylinder must be isothermal.
.. math::
Nu_D = C Ra_D^n
+----------+----------+-------+-------+
| Gr min | Gr max | C | n |
+==========+==========+=======+=======+
| 10E-10 | 10E-2 | 0.675 | 0.058 |
+----------+----------+-------+-------+
| 10E-2 | 10E2 | 1.02 | 0.148 |
+----------+----------+-------+-------+
| 10E2 | 10E4 | 0.850 | 0.188 |
+----------+----------+-------+-------+
| 10E4 | 10E7 | 0.480 | 0.250 |
+----------+----------+-------+-------+
| 10E7 | 10E12 | 0.125 | 0.333 |
+----------+----------+-------+-------+
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Notes
-----
Most comprehensive review with a new proposed equation to date.
Discontinuous among the jumps in range. Blindly runs outside if upper and
lower limits without warning.
Examples
--------
>>> Nu_horizontal_cylinder_Morgan(0.69, 2.63E9)
151.3881997228419
References
----------
.. [1] Morgan, V.T., The Overall Convective Heat Transfer from Smooth
Circular Cylinders, in Advances in Heat Transfer, eds. T.F. Irvin and
J.P. Hartnett, V 11, 199-264, 1975.
.. [2] Boetcher, Sandra K. S. "Natural Convection Heat Transfer From
Vertical Cylinders." In Natural Convection from Circular Cylinders,
23-42. Springer, 2014.
'''
Ra = Pr*Gr
if Ra < 1E-2:
C, n = 0.675, 0.058
elif Ra < 1E2:
C, n = 1.02, 0.148
elif Ra < 1E4:
C, n = 0.850, 0.188
elif Ra < 1E7:
C, n = 0.480, 0.250
else:
# up to 1E12
C, n = 0.125, 0.333
return C*Ra**n
horizontal_cylinder_correlations = {
'Churchill-Chu': (Nu_horizontal_cylinder_Churchill_Chu),
'Kuehn & Goldstein': (Nu_horizontal_cylinder_Kuehn_Goldstein),
'Morgan': (Nu_horizontal_cylinder_Morgan)
}
def Nu_horizontal_cylinder_methods(Pr, Gr, check_ranges=True):
r'''This function returns a list of correlation names for free convetion
to a horizontal cylinder.
Preferred functions are 'Morgan' when discontinuous results are acceptable
and 'Churchill-Chu' otherwise.
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
check_ranges : bool, optional
Whether or not to return only correlations suitable for the provided
data, [-]
Returns
-------
methods : list[str]
List of methods which can be used to calculate `Nu` with the given
inputs
Examples
--------
>>> Nu_horizontal_cylinder_methods(0.72, 1E7)[0]
'Morgan'
'''
return ['Morgan', 'Churchill-Chu', 'Kuehn & Goldstein']
def Nu_horizontal_cylinder(Pr, Gr, Method=None):
r'''This function handles choosing which horizontal cylinder free convection
correlation is used. Generally this is used by a helper class, but can be
used directly. Will automatically select the correlation to use if none is
provided; returns None if insufficient information is provided.
Preferred functions are 'Morgan' when discontinuous results are acceptable
and 'Churchill-Chu' otherwise.
Parameters
----------
Pr : float
Prandtl number with respect to film temperature [-]
Gr : float
Grashof number with respect to cylinder diameter, [-]
Returns
-------
Nu : float
Nusselt number with respect to cylinder diameter, [-]
Other Parameters
----------------
Method : string, optional
A string of the function name to use, as in the dictionary
horizontal_cylinder_correlations
Notes
-----
All fluid properties should be evaluated at the film temperature, the
average between the outer surface temperature of the solid, and the fluid
temperature far away from the heat transfer interface - normally the same
as the temperature before any cooling or heating occurs.
.. math::
T_f = (T_{\text{surface}} + T_\infty)/2
Examples
--------
>>> Nu_horizontal_cylinder(0.72, 1E7)
24.864192615468973
'''
if Method is None:
Method2 = 'Morgan'
else:
Method2 = Method
if Method2 == 'Churchill-Chu':
return Nu_horizontal_cylinder_Churchill_Chu(Pr=Pr, Gr=Gr)
elif Method2 == 'Kuehn & Goldstein':
return Nu_horizontal_cylinder_Kuehn_Goldstein(Pr=Pr, Gr=Gr)
elif Method2 == 'Morgan':
return Nu_horizontal_cylinder_Morgan(Pr=Pr, Gr=Gr)
else:
raise ValueError("Correlation name not recognized; see the "
"documentation for the available options.")
#import matplotlib.pyplot as plt
#import numpy as np
#Pr, Gr = 0.72, 1E8
#methods = Nu_horizontal_cylinder_methods(Pr, Gr)
#Grs = np.logspace(-2, 2.5, 10000)
#
#for method in methods:
# Nus = [Nu_horizontal_cylinder(Pr=Pr, Gr=i, Method=method) for i in Grs]
# plt.semilogx(Grs, Nus, label=method)
#plt.legend()
#plt.show()
def Nu_coil_Xin_Ebadian(Pr, Gr, horizontal=False):
r'''Calculates Nusselt number for natural convection around a vertical
or horizontal helical coil suspended in a fluid without
forced convection.
For horizontal cases:
.. math::
Nu_D = 0.318 Ra_D^{0.293},\; 5 \times {10}^{3} < Ra < 1 \times {10}^5
For vertical cases:
.. math::
Nu_D = 0.290 Ra_D^{0.293},\; 5 \times {10}^{3} < Ra < 1 \times {10}^5
Parameters
----------
Pr : float
Prandtl number calculated with the film temperature -
wall and temperature very far from the coil average, [-]
Gr : float
Grashof number calculated with the film temperature -
wall and temperature very far from the coil average,
and using the outer diameter of the coil [-]
horizontal : bool, optional
Whether the coil is horizontal or vertical, [-]
Returns
-------
Nu : float
Nusselt number using the outer diameter of the coil
and the film temperature, [-]
Notes
-----
This correlation is also reviewed in [2]_.
Examples
--------
>>> Nu_coil_Xin_Ebadian(0.7, 2E4, horizontal=False)
4.755689726250451
>>> Nu_coil_Xin_Ebadian(0.7, 2E4, horizontal=True)
5.2148597687849785
References
----------
.. [1] Xin, R. C., and M. A. Ebadian. "Natural Convection Heat Transfer
from Helicoidal Pipes." Journal of Thermophysics and Heat Transfer 10,
no. 2 (1996): 297-302.
.. [2] Prabhanjan, Devanahalli G., Timothy J. Rennie, and G. S. Vijaya
Raghavan. "Natural Convection Heat Transfer from Helical Coiled Tubes."
International Journal of Thermal Sciences 43, no. 4 (April 1, 2004):
359-65.
'''
Ra = Pr*Gr
if horizontal:
return 0.318*Ra**0.293
else:
return 0.290*Ra**0.293 | mit |
pbvarga1/qimage2ndarray | qimage2ndarray/__init__.py | 1 | 14973 | import sys as _sys
import numpy as _np
from .dynqt import QtGui as _qt
from .dynqt import qt as _qt_driver
if _qt_driver.name() == 'PythonQt':
from .qimageview import QImage2ndarray as _temp
_qimageview = _temp.qimageview
else:
from .qimageview_python import qimageview as _qimageview
__version__ = "1.5"
if _sys.byteorder == 'little':
_bgra = (0, 1, 2, 3)
else:
_bgra = (3, 2, 1, 0)
_bgra_fields = {'b': (_np.uint8, _bgra[0], 'blue'),
'g': (_np.uint8, _bgra[1], 'green'),
'r': (_np.uint8, _bgra[2], 'red'),
'a': (_np.uint8, _bgra[3], 'alpha')}
bgra_dtype = _np.dtype(_bgra_fields)
"""Complex dtype offering the named fields 'r','g','b', and 'a' and
corresponding long names, conforming to QImage_'s 32-bit memory layout."""
try:
_basestring = basestring
except NameError:
# 'basestring' undefined, must be Python 3
_basestring = str
def _qimage_or_filename_view(qimage):
if isinstance(qimage, _basestring):
qimage = _qt.QImage(qimage)
return _qimageview(qimage)
def raw_view(qimage):
"""Returns raw 2D view of the given QImage_'s memory. The result
will be a 2-dimensional numpy.ndarray with an appropriately sized
integral dtype. (This function is not intented to be used
directly, but used internally by the other -- more convenient --
view creation functions.)
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_
:rtype: numpy.ndarray_ with shape (height, width)"""
return _qimage_or_filename_view(qimage)
def byte_view(qimage, byteorder = 'little'):
"""Returns raw 3D view of the given QImage_'s memory. This will
always be a 3-dimensional numpy.ndarray with dtype numpy.uint8.
Note that for 32-bit images, the last dimension will be in the
[B,G,R,A] order (if little endian) due to QImage_'s memory layout
(the alpha channel will be present for Format_RGB32 images, too).
For 8-bit (indexed) images, the array will still be 3-dimensional,
i.e. shape will be (height, width, 1).
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'little', i.e. BGRA order. You may set the
argument `byteorder` to 'big' to get ARGB, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 1 or 4) and dtype uint8"""
raw = _qimage_or_filename_view(qimage)
result = raw.view(_np.uint8).reshape(raw.shape + (-1, ))
if byteorder and byteorder != _sys.byteorder:
result = result[...,::-1]
return result
def rgb_view(qimage, byteorder = 'big'):
"""Returns RGB view of a given 32-bit color QImage_'s memory.
Similarly to byte_view(), the result is a 3D numpy.uint8 array,
but reduced to the rgb dimensions (without alpha), and reordered
(using negative strides in the last dimension) to have the usual
[R,G,B] order. The image must have 32 bit pixel size, i.e. be
RGB32, ARGB32, or ARGB32_Premultiplied. (Note that in the latter
case, the values are of course premultiplied with alpha.)
The order of channels in the last axis depends on the `byteorder`,
which defaults to 'big', i.e. RGB order. You may set the argument
`byteorder` to 'little' to get BGR, or use None which means
sys.byteorder here, i.e. return native order for the machine the
code is running on.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:param byteorder: specify order of channels in last axis
:rtype: numpy.ndarray_ with shape (height, width, 3) and dtype uint8"""
if byteorder is None:
byteorder = _sys.byteorder
bytes = byte_view(qimage, byteorder)
if bytes.shape[2] != 4:
raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
if byteorder == 'little':
return bytes[...,:3] # strip A off BGRA
else:
return bytes[...,1:] # strip A off ARGB
def alpha_view(qimage):
"""Returns alpha view of a given 32-bit color QImage_'s memory.
The result is a 2D numpy.uint8 array, equivalent to
byte_view(qimage)[...,3]. The image must have 32 bit pixel size,
i.e. be RGB32, ARGB32, or ARGB32_Premultiplied. Note that it is
not enforced that the given qimage has a format that actually
*uses* the alpha channel -- for Format_RGB32, the alpha channel
usually contains 255 everywhere.
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype uint8"""
bytes = byte_view(qimage, byteorder = None)
if bytes.shape[2] != 4:
raise ValueError("For alpha_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return bytes[...,_bgra[3]]
def recarray_view(qimage):
"""Returns recarray_ view of a given 32-bit color QImage_'s
memory.
The result is a 2D array with a complex record dtype, offering the
named fields 'r','g','b', and 'a' and corresponding long names.
Thus, each color components can be accessed either via string
indexing or via attribute lookup (through numpy.recarray_):
For your convenience, `qimage` may also be a filename, see
`Loading and Saving Images`_ in the documentation.
>>> from PyQt4.QtGui import QImage, qRgb
>>> qimg = QImage(320, 240, QImage.Format_ARGB32)
>>> qimg.fill(qRgb(12,34,56))
>>>
>>> import qimage2ndarray
>>> v = qimage2ndarray.recarray_view(qimg)
>>>
>>> red = v["r"]
>>> red[10,10]
12
>>> pixel = v[10,10]
>>> pixel["r"]
12
>>> (v.g == v["g"]).all()
True
>>> (v.alpha == 255).all()
True
:param qimage: image whose memory shall be accessed via NumPy
:type qimage: QImage_ with 32-bit pixel type
:rtype: numpy.ndarray_ with shape (height, width) and dtype :data:`bgra_dtype`"""
raw = _qimage_or_filename_view(qimage)
if raw.itemsize != 4:
raise ValueError("For rgb_view, the image must have 32 bit pixel size (use RGB32, ARGB32, or ARGB32_Premultiplied)")
return raw.view(bgra_dtype, _np.recarray)
def _normalize255(array, normalize, clip = (0, 255)):
if normalize:
if normalize is True:
normalize = array.min(), array.max()
if clip == (0, 255):
clip = None
elif _np.isscalar(normalize):
normalize = (0, normalize)
nmin, nmax = normalize
if nmin:
array = array - nmin
if nmax != nmin:
scale = 255. / (nmax - nmin)
if scale != 1.0:
array = array * scale
if clip:
low, high = clip
_np.clip(array, low, high, array)
return array
def gray2qimage(gray, normalize = False):
"""Convert the 2D numpy array `gray` into a 8-bit, indexed QImage_
with a gray colormap. The first dimension represents the vertical
image axis.
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True:
scale image values to 0..255 (same as passing (gray.min(),
gray.max()))
If the source array `gray` contains masked values, the result will
have only 255 shades of gray, and one color map entry will be used
to make the corresponding pixels transparent.
A full alpha channel cannot be supported with indexed images;
instead, use `array2qimage` to convert into a 32-bit QImage.
:param gray: image data which should be converted (copied) into a QImage_
:type gray: 2D or 3D numpy.ndarray_ or `numpy.ma.array <masked arrays>`_
:param normalize: normalization parameter (see above, default: no value changing)
:type normalize: bool, scalar, or pair
:rtype: QImage_ with RGB32 or ARGB32 format"""
if _np.ndim(gray) != 2:
raise ValueError("gray2QImage can only convert 2D arrays" +
" (try using array2qimage)" if _np.ndim(gray) == 3 else "")
h, w = gray.shape
result = _qt.QImage(w, h, _qt.QImage.Format_Indexed8)
if not _np.ma.is_masked(gray):
for i in range(256):
result.setColor(i, _qt.qRgb(i,i,i))
_qimageview(result)[:] = _normalize255(gray, normalize)
else:
# map gray value 1 to gray value 0, in order to make room for
# transparent colormap entry:
result.setColor(0, _qt.qRgb(0,0,0))
for i in range(2, 256):
result.setColor(i-1, _qt.qRgb(i,i,i))
_qimageview(result)[:] = _normalize255(gray, normalize, clip = (1, 255)) - 1
result.setColor(255, 0)
_qimageview(result)[gray.mask] = 255
return result
def array2qimage(array, normalize = False):
"""Convert a 2D or 3D numpy array into a 32-bit QImage_. The
first dimension represents the vertical image axis; the optional
third dimension is supposed to contain 1-4 channels:
========= ===================
#channels interpretation
========= ===================
1 scalar/gray
2 scalar/gray + alpha
3 RGB
4 RGB + alpha
========= ===================
Scalar data will be converted into corresponding gray RGB triples;
if you want to convert to an (indexed) 8-bit image instead, use
`gray2qimage` (which cannot support an alpha channel though).
The parameter `normalize` can be used to normalize an image's
value range to 0..255:
`normalize` = (nmin, nmax):
scale & clip image values from nmin..nmax to 0..255
`normalize` = nmax:
lets nmin default to zero, i.e. scale & clip the range 0..nmax
to 0..255
`normalize` = True:
scale image values to 0..255 (same as passing (array.min(),
array.max()))
If `array` contains masked values, the corresponding pixels will
be transparent in the result. Thus, the result will be of
QImage.Format_ARGB32 if the input already contains an alpha
channel (i.e. has shape (H,W,4)) or if there are masked pixels,
and QImage.Format_RGB32 otherwise.
:param array: image data which should be converted (copied) into a QImage_
:type array: 2D or 3D numpy.ndarray_ or `numpy.ma.array <masked arrays>`_
:param normalize: normalization parameter (see above, default: no value changing)
:type normalize: bool, scalar, or pair
:rtype: QImage_ with RGB32 or ARGB32 format"""
if _np.ndim(array) == 2:
array = array[...,None]
elif _np.ndim(array) != 3:
raise ValueError("array2qimage can only convert 2D or 3D arrays (got %d dimensions)" % _np.ndim(array))
if array.shape[2] not in (1, 2, 3, 4):
raise ValueError("array2qimage expects the last dimension to contain exactly one (scalar/gray), two (gray+alpha), three (R,G,B), or four (R,G,B,A) channels")
h, w, channels = array.shape
hasAlpha = _np.ma.is_masked(array) or channels in (2, 4)
fmt = _qt.QImage.Format_ARGB32 if hasAlpha else _qt.QImage.Format_RGB32
result = _qt.QImage(w, h, fmt)
array = _normalize255(array, normalize)
if channels >= 3:
rgb_view(result)[:] = array[...,:3]
else:
rgb_view(result)[:] = array[...,:1] # scalar data
alpha = alpha_view(result)
if channels in (2, 4):
alpha[:] = array[...,-1]
else:
alpha[:] = 255
if _np.ma.is_masked(array):
alpha[:] *= _np.logical_not(_np.any(array.mask, axis = -1))
return result
def imread(filename, masked = False):
"""Convenience function that uses the QImage_ constructor to read an
image from the given file and return an `rgb_view` of the result.
This is intentionally similar to scipy.ndimage.imread (which uses
PIL), scipy.misc.imread, or matplotlib.pyplot.imread (using PIL
for non-PNGs).
For grayscale images, return 2D array (even if it comes from a 32-bit
representation; this is a consequence of the QImage API).
For images with an alpha channel, the resulting number of channels
will be 2 (grayscale+alpha) or 4 (RGB+alpha). Alternatively, one may
pass `masked = True' in order to get `numpy.ma.array <masked
arrays>`_ back. Note that only fully transparent pixels are masked
(and that masked arrays only support binary masks). The value of
`masked` is ignored when the loaded image has no alpha channel
(i.e., one would not get a masked array in that case).
This function has been added in version 1.3.
"""
qImage = _qt.QImage(filename)
isGray = qImage.isGrayscale()
if isGray and qImage.depth() == 8:
return byte_view(qImage)[...,0]
hasAlpha = qImage.hasAlphaChannel()
if hasAlpha:
targetFormat = _qt.QImage.Format_ARGB32
else:
targetFormat = _qt.QImage.Format_RGB32
if qImage.format() != targetFormat:
qImage = qImage.convertToFormat(targetFormat)
result = rgb_view(qImage)
if isGray:
result = result[...,0]
if hasAlpha:
if masked:
mask = (alpha_view(qImage) == 0)
if _np.ndim(result) == 3:
mask = _np.repeat(mask[...,None], 3, axis = 2)
result = _np.ma.masked_array(result, mask)
else:
result = _np.dstack((result, alpha_view(qImage)))
return result
def imsave(filename, image, normalize = False, format = None, quality = -1):
"""Convenience function that uses QImage.save to save an image to the
given file. This is intentionally similar to scipy.misc.imsave.
However, it supports different optional arguments:
:param normalize: see :func:`array2qimage` (which is used internally)
:param format: image filetype (e.g. 'PNG'), (default: check filename's suffix)
:param quality: see QImage.save (0 = small .. 100 = uncompressed, -1 = default compression)
:returns: boolean success, see QImage.save
This function has been added in version 1.4.
"""
qImage = array2qimage(image, normalize = normalize)
return qImage.save(filename, format, quality)
| bsd-3-clause |
mortonjt/American-Gut | scripts/mod2_pcoa.py | 1 | 14487 | #!/usr/bin/env python
import os
import click
from matplotlib import use
use('Agg') # noqa
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from skbio import read, DistanceMatrix
from skbio.stats import isubsample
from skbio.stats.ordination import OrdinationResults
from collections import defaultdict
from collections import OrderedDict
ALPHA = 1.0
LINE_WIDTH = 0.3
LINE_WIDTH_WHITE = 2.0
LINE_WIDTH_BLACK = 1.0
@click.group()
def mod2_pcoa():
pass
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
def body_site(coords, mapping_file, output, prefix, samples):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
mf = mf.loc[o.site_ids]
if samples is None:
samples = mf.index
else:
samples = set(samples.split(',')).intersection(set(o.site_ids))
samples = mf.loc[samples].index
color_hmp_fecal = sns.color_palette('Paired', 12)[10] # light brown
color_agp_fecal = sns.color_palette('Paired', 12)[11] # dark brown
color_hmp_oral = sns.color_palette('Paired', 12)[0] # light blue
color_agp_oral = sns.color_palette('Paired', 12)[1] # dark blue
color_hmp_skin = sns.color_palette('Paired', 12)[2] # light green
color_agp_skin = sns.color_palette('Paired', 12)[3] # dark green
grp_colors = {'AGP-FECAL': color_agp_fecal,
'AGP-ORAL': color_agp_oral,
'AGP-SKIN': color_agp_skin,
'HMP-FECAL': color_hmp_fecal,
'GG-FECAL': color_hmp_fecal,
'PGP-FECAL': color_hmp_fecal,
'HMP-ORAL': color_hmp_oral,
'PGP-ORAL': color_hmp_oral,
'HMP-SKIN': color_hmp_skin,
'PGP-SKIN': color_hmp_skin}
for sample in samples:
# plot categories as 50 slices with random zorder
for grp, color in grp_colors.iteritems():
sub_coords = c_df[mf.TITLE_BODY_SITE == grp].values
for i in np.array_split(sub_coords, 50):
plt.scatter(i[:, 0], i[:, 1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA, zorder=np.random.rand())
# plot participant's dot
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample]['TITLE_BODY_SITE']],
s=250, edgecolor=np.asarray(
grp_colors[mf.loc[sample]['TITLE_BODY_SITE']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help='Input distance matrix to subsample nearest sample')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--max', required=True, type=int,
help='Max number of samples per category value')
@click.option('--category', required=True, type=str,
help='The category to subsample in (likely COUNTRY)')
@click.option('--output', required=True, type=click.Path(exists=False,
writable=True, resolve_path=True), help='Output file')
def subsample_dm(distmat, mapping_file, max, category, output):
"""Subsample the distmat to max samples per category value"""
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
id_to_cat = dict(mf[category])
def bin_f(x):
return id_to_cat[x]
dm = read(distmat, into=DistanceMatrix)
dm = dm.filter([id for _, id in isubsample(dm.ids, max, bin_f=bin_f)])
dm.to_file(output)
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
@click.option('--distmat', required=True, type=click.Path(resolve_path=True,
readable=True,
exists=True),
help=('Input distance matrix to find nearest sample (if not '
'present in the coordinates'))
def country(coords, mapping_file, output, prefix, samples, distmat):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
o_id_lookup = set(o.site_ids)
dm = read(distmat, into=DistanceMatrix)
dm_id_lookup = {i: idx for idx, i in enumerate(dm.ids)}
coord_samples_in_dm = {idx for idx, i in enumerate(dm.ids)
if i in o_id_lookup}
# we'll be computing min values, so we need to avoid catching the diagonal
np.fill_diagonal(dm._data, np.inf)
x, y = o.site[:, 0], o.site[:, 1]
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
# mf = mf.loc[o.site_ids]
if samples is None:
samples = dm.ids[:]
else:
samples = set(samples.split(',')).intersection(set(dm.ids))
samples = mf.loc[samples].index
color_Venezuela = sns.color_palette('Paired', 12)[10]
color_Malawi = sns.color_palette('Paired', 12)[1]
color_Western = sns.color_palette('Paired', 12)[4]
color_Highlight = sns.color_palette('Paired', 12)[5]
color_no_data = (0.5, 0.5, 0.5)
grp_colors = OrderedDict()
grp_colors['no_data'] = color_no_data
grp_colors['Australia'] = color_Western
grp_colors['Belgium'] = color_Western
grp_colors['Canada'] = color_Western
grp_colors['China'] = color_Western
grp_colors['Finland'] = color_Western
grp_colors['France'] = color_Western
grp_colors['Germany'] = color_Western
grp_colors['Great Britain'] = color_Western
grp_colors['Ireland'] = color_Western
grp_colors['Japan'] = color_Western
grp_colors['Netherlands'] = color_Western
grp_colors['New Zealand'] = color_Western
grp_colors['Norway'] = color_Western
grp_colors['Scotland'] = color_Western
grp_colors['Spain'] = color_Western
grp_colors['Switzerland'] = color_Western
grp_colors['Thailand'] = color_Western
grp_colors['United Arab Emirates'] = color_Western
grp_colors['United Kingdom'] = color_Western
grp_colors['United States of America'] = color_Western
grp_colors['Malawi'] = color_Malawi
grp_colors['Venezuela'] = color_Venezuela
for sample_to_plot in samples:
if sample_to_plot in o_id_lookup:
sample = sample_to_plot
else:
# find the closest sample in the distance matrix that is in the
# coordinates data
sample = None
for i in dm[dm_id_lookup[sample_to_plot]].argsort():
if i in coord_samples_in_dm:
sample = dm.ids[i]
break
# this should not ever happen
if sample is None:
raise ValueError("Unable to find a similar sample?")
# countour plot superimposed
sns.kdeplot(x, y, cmap='bone')
sns.set_context(rc={"lines.linewidth": 0.75})
# change particapant's country's color to color_Highlight unless
# country is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_Highlight
# plot each country except participant's according to colors above
for grp, color in grp_colors.iteritems():
if grp == mf.loc[sample_to_plot]['COUNTRY']:
continue
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# now plot participant's country
grp = mf.loc[sample_to_plot]['COUNTRY']
color = grp_colors[grp]
sub_coords = c_df[mf.COUNTRY == grp]
plt.scatter(sub_coords[0], sub_coords[1], color=color,
edgecolor=np.asarray(color)*0.6, lw=LINE_WIDTH,
alpha=ALPHA)
# plot participant's dot
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample_to_plot]['COUNTRY']],
s=270, edgecolor='w', zorder=1, lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=grp_colors[mf.loc[sample_to_plot]['COUNTRY']],
s=250, edgecolor=np.asarray(grp_colors[
mf.loc[sample_to_plot]['COUNTRY']])*0.6,
zorder=2, lw=LINE_WIDTH_BLACK)
# reset particapant's country's color to color_Western unless country
# is Venezuela or Malawi
if (mf.loc[sample_to_plot]['COUNTRY'] != 'Malawi') & (
mf.loc[sample_to_plot]['COUNTRY'] != 'Venezuela'):
grp_colors[mf.loc[sample_to_plot]['COUNTRY']] = color_Western
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
@mod2_pcoa.command()
@click.option('--coords', required=True, type=click.Path(resolve_path=True,
readable=True, exists=True), help='Coordinates file')
@click.option('--mapping_file', required=True, type=click.Path(
resolve_path=True, readable=True, exists=True),
help='Mapping file')
@click.option('--color', required=True, type=str,
help='Metadata category to set color by')
@click.option('--output', required=True, type=click.Path(exists=True,
writable=True, resolve_path=True), help='Output directory')
@click.option('--prefix', required=True, type=str, help='Output file prefix')
@click.option('--samples', required=False, type=str,
help='Comma separated list of samples to print')
def gradient(coords, mapping_file, color, output, prefix, samples):
"""Generates as many figures as samples in the coordinates file"""
o = read(coords, into=OrdinationResults)
# coordinates
c_df = pd.DataFrame(o.site, o.site_ids)
# mapping file
mf = pd.read_csv(mapping_file, '\t', converters=defaultdict(str),
index_col='#SampleID')
mf = mf.loc[o.site_ids]
mf[color] = mf[color].convert_objects(convert_numeric=True)
if samples is None:
samples = mf.index
else:
samples = set(samples.split(',')).intersection(set(o.site_ids))
samples = mf.loc[samples].index
numeric = mf[~pd.isnull(mf[color])]
non_numeric = mf[pd.isnull(mf[color])]
color_array = plt.cm.RdBu(numeric[color]/max(numeric[color]))
for sample in samples:
# plot numeric metadata as colored gradient
ids = numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c=numeric[color], cmap=plt.get_cmap('RdBu'),
alpha=ALPHA, lw=LINE_WIDTH, edgecolor=color_array*0.6)
# plt.colorbar()
# plot non-numeric metadata as gray
ids = non_numeric.index
x, y = c_df.loc[ids][0], c_df.loc[ids][1]
plt.scatter(x, y, c='0.5', alpha=ALPHA, lw=LINE_WIDTH, edgecolor='0.3')
# plot individual's dot
try:
color_index = numeric.index.tolist().index(sample)
except ValueError:
color_index = None
if color_index is None:
_color = (0.5, 0.5, 0.5)
else:
_color = color_array[color_index]
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=270, edgecolor='w', lw=LINE_WIDTH_WHITE)
plt.scatter(c_df.loc[sample][0], c_df.loc[sample][1],
color=_color, s=250, edgecolor=np.asarray(_color)*0.6,
lw=LINE_WIDTH_BLACK)
plt.axis('off')
my_dpi = 72
figsize = (1000 / my_dpi, 1000 / my_dpi)
out_file = os.path.join(output, '.'.join([prefix, sample, 'pdf']))
plt.savefig(out_file, figsize=figsize, dpi=my_dpi)
plt.close()
if __name__ == '__main__':
mod2_pcoa()
| bsd-3-clause |
szarvas/anc-field | examples/ie224-impulse.py | 1 | 5152 | # -*- coding: utf-8 -*-
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal, stats
import sys
sys.path.append('..')
from anc_field_py.ancfield import *
from anc_field_py.ancutil import *
def add_microphones(ancObject):
# error_mic
ancObject.AddMic([4,1.6,5.3])
# reference_front
ancObject.AddMic([4,1.6,4.5])
# reference_back
ancObject.AddMic([4,1.6,6.5])
# reference_left
ancObject.AddMic([3,1.6,5.5])
# reference_right
ancObject.AddMic([5,1.6,5.5])
# reference_bottom
ancObject.AddMic([4,0.6,5.5])
# reference_top
ancObject.AddMic([4,2.6,5.5])
return ancObject
# =========================================================================
# SIMULATION 1
# Calculating noise to microphone paths
# =========================================================================
#
# Trying to run this simulation on CPU failed on an i7-3770, compiling the
# lrs_1.cl file fails. It maybe because the scene's size is too large for
# the CPU. Compiling it for the built in integrated GPU worked though.
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# noise_source
anc.AddSource([4,1.6,1.0], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_noise) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-noise-to-error.dat', y_noise[0,:])
np.savetxt('ie224-noise-to-reference_front.dat', y_noise[1,:])
np.savetxt('ie224-noise-to-reference_back.dat', y_noise[2,:])
np.savetxt('ie224-noise-to-reference_left.dat', y_noise[3,:])
np.savetxt('ie224-noise-to-reference_right.dat', y_noise[4,:])
np.savetxt('ie224-noise-to-reference_bottom.dat', y_noise[5,:])
np.savetxt('ie224-noise-to-reference_top.dat', y_noise[6,:])
# =========================================================================
# SIMULATION 2
# Calculating actuator to microphone paths
# =========================================================================
#
# We create a simulation and immediately add microphones to it
anc = add_microphones(AncField('gpu', 'models/ie224'))
# actuator
anc.AddSource([4,1.6,5.5], 5*impulse(2000, 6*32000, 32000))
anc.Visualize(1.6)
(x,y_actuator) = anc.Run(4)
# Saving the impulse responses
np.savetxt('ie224-actuator-to-error.dat', y_actuator[0,:])
np.savetxt('ie224-actuator-to-reference_front.dat', y_actuator[1,:])
np.savetxt('ie224-actuator-to-reference_back.dat', y_actuator[2,:])
np.savetxt('ie224-actuator-to-reference_left.dat', y_actuator[3,:])
np.savetxt('ie224-actuator-to-reference_right.dat', y_actuator[4,:])
np.savetxt('ie224-actuator-to-reference_bottom.dat', y_actuator[5,:])
np.savetxt('ie224-actuator-to-reference_top.dat', y_actuator[6,:])
# =========================================================================
# GENERATING IMAGES FOR THE REPORT
# Calculating actuator to microphone paths
# =========================================================================
#
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_noise[0,:])
plt.title('ie224-noise-to-error')
fig.savefig('ie224-noise-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_noise[1,:])
plt.title('ie224-noise-to-reference_front')
fig.savefig('ie224-noise-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_noise[2,:])
plt.title('ie224-noise-to-reference_back')
fig.savefig('ie224-noise-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_noise[3,:])
plt.title('ie224-noise-to-reference_left')
fig.savefig('ie224-noise-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_noise[4,:])
plt.title('ie224-noise-to-reference_right')
fig.savefig('ie224-noise-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_noise[5,:])
plt.title('ie224-noise-to-reference_bottom')
fig.savefig('ie224-noise-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_noise[6,:])
plt.title('ie224-noise-to-reference_top')
fig.savefig('ie224-noise-to-reference_top.png')
# Saving figures for the field simulation report
fig, ax = plt.subplots()
ax.plot(y_actuator[0,:])
plt.title('ie224-actuator-to-error')
fig.savefig('ie224-actuator-to-error.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[1,:])
plt.title('ie224-actuator-to-reference_front')
fig.savefig('ie224-actuator-to-reference_front.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[2,:])
plt.title('ie224-actuator-to-reference_back')
fig.savefig('ie224-actuator-to-reference_back.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[3,:])
plt.title('ie224-actuator-to-reference_left')
fig.savefig('ie224-actuator-to-reference_left.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[4,:])
plt.title('ie224-actuator-to-reference_right')
fig.savefig('ie224-actuator-to-reference_right.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[5,:])
plt.title('ie224-actuator-to-reference_bottom')
fig.savefig('ie224-actuator-to-reference_bottom.png')
fig, ax = plt.subplots()
ax.plot(y_actuator[6,:])
plt.title('ie224-actuator-to-reference_top')
fig.savefig('ie224-actuator-to-reference_top.png')
| gpl-3.0 |
aerosara/thesis | notebooks_archive_10112014/pycse Examples.py | 1 | 2176 | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=3>
# Example from pycse 1
# <codecell>
# copied from http://kitchingroup.cheme.cmu.edu/blog/tag/events/
from pycse import odelay
import matplotlib.pyplot as plt
import numpy as np
def ode(Y,x):
y1, y2 = Y
dy1dx = y2
dy2dx = -y1
return [dy1dx, dy2dx]
def event1(Y, x):
y1, y2 = Y
value = y2 - (-1.0)
isterminal = True
direction = 0
return value, isterminal, direction
def event2(Y, x):
dy1dx, dy2dx = ode(Y,x)
value = dy1dx - 0.0
isterminal = False
direction = -1 # derivative is decreasing towards a maximum
return value, isterminal, direction
Y0 = [2.0, 1.0]
xspan = np.linspace(0, 5)
X, Y, XE, YE, IE = odelay(ode, Y0, xspan, events=[event1, event2])
plt.plot(X, Y)
for ie,xe,ye in zip(IE, XE, YE):
if ie == 1: #this is the second event
y1,y2 = ye
plt.plot(xe, y1, 'ro')
plt.legend(['$y_1$', '$y_2$'], loc='best')
#plt.savefig('images/odelay-mult-eq.png')
plt.show()
# <headingcell level=3>
# Example from pycse 2
# <codecell>
# copied from: http://kitchingroup.cheme.cmu.edu/pycse/pycse.html#sec-10-1-8
# 10.1.8 Stopping the integration of an ODE at some condition
from pycse import *
import numpy as np
k = 0.23
Ca0 = 2.3
def dCadt(Ca, t):
return -k * Ca**2
def stop(Ca, t):
isterminal = True
direction = 0
value = 1.0 - Ca
return value, isterminal, direction
tspan = np.linspace(0.0, 10.0)
t, CA, TE, YE, IE = odelay(dCadt, Ca0, tspan, events=[stop])
print 'At t = {0:1.2f} seconds the concentration of A is {1:1.2f} mol/L.'.format(t[-1], float(CA[-1]))
# <headingcell level=3>
# fsolve example
# <codecell>
from math import cos
def func(x):
return x + 2*cos(x) # finds where this is zero
def func2(x):
out = [x[0]*cos(x[1]) - 4]
out.append(x[1]*x[0] - x[1] - 5)
return out # finds where both elements of this array are zero
from scipy.optimize import fsolve
x0 = fsolve(func, 0.3) # initial guess
print x0
print func(x0)
#-1.02986652932
x02 = fsolve(func2, [1, 1]) # initial guesses
print x02
print func2(x02)
#[ 6.50409711 0.90841421]
| mit |
victorpoughon/master-thesis | python/outlier_analysis.py | 1 | 1365 | #!/usr/bin/env python3
import os
import os.path
import sys
import numpy as np
import matplotlib.pyplot as plt
from features_common import match_angle, base_plot
def outlier_frequency_plot(path, angles, threshold):
f, ax = base_plot()
ax.plot(100 * np.cumsum(np.abs(angles) > threshold) / angles.size)
ax.set_xlabel("Match number")
ax.set_ylabel("Outlier fraction (%)")
ax.set_ylim([0, 100])
f.savefig(path, bbox_inches='tight')
plt.close(f)
if __name__ == "__main__":
if len(sys.argv) < 2:
path = "."
else:
path = sys.argv[1]
# Produce outlier plots for all directories containing outlier_threshold.txt
for root, subdirs, files in os.walk(path):
if "matches.txt" in files:
shape = np.loadtxt(os.path.join(root, "shape.txt"))
matches = np.loadtxt(os.path.join(root, "matches.txt"), comments="#")
threshold = np.loadtxt(os.path.join(root, "outlier_threshold.txt"))
if threshold.size == 1:
print("outlier_analysis.py: " + root)
# Compute matches angles
angles = match_angle(matches, shape)
outlier_frequency_plot(os.path.join(root, "plot_outliers.pdf"), angles, threshold)
else:
print("outlier_analysis.py: " + root + " --- empty outlier_threshold.txt")
| mit |
SamHames/scikit-image | skimage/viewer/canvastools/base.py | 1 | 5472 | import numpy as np
try:
from matplotlib import lines
except ImportError:
pass
__all__ = ['CanvasToolBase', 'ToolHandles']
def _pass(*args):
pass
class CanvasToolBase(object):
"""Base canvas tool for matplotlib axes.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool is displayed.
on_move : function
Function called whenever a control handle is moved.
This function must accept the end points of line as the only argument.
on_release : function
Function called whenever the control handle is released.
on_enter : function
Function called whenever the "enter" key is pressed.
useblit : bool
If True, update canvas by blitting, which is much faster than normal
redrawing (turn off for debugging purposes).
"""
def __init__(self, ax, on_move=None, on_enter=None, on_release=None,
useblit=True):
self.ax = ax
self.canvas = ax.figure.canvas
self.img_background = None
self.cids = []
self._artists = []
self.active = True
if useblit:
self.connect_event('draw_event', self._blit_on_draw_event)
self.useblit = useblit
self.callback_on_move = _pass if on_move is None else on_move
self.callback_on_enter = _pass if on_enter is None else on_enter
self.callback_on_release = _pass if on_release is None else on_release
self.connect_event('key_press_event', self._on_key_press)
def connect_event(self, event, callback):
"""Connect callback with an event.
This should be used in lieu of `figure.canvas.mpl_connect` since this
function stores call back ids for later clean up.
"""
cid = self.canvas.mpl_connect(event, callback)
self.cids.append(cid)
def disconnect_events(self):
"""Disconnect all events created by this widget."""
for c in self.cids:
self.canvas.mpl_disconnect(c)
def ignore(self, event):
"""Return True if event should be ignored.
This method (or a version of it) should be called at the beginning
of any event callback.
"""
return not self.active
def set_visible(self, val):
for artist in self._artists:
artist.set_visible(val)
def _blit_on_draw_event(self, event=None):
self.img_background = self.canvas.copy_from_bbox(self.ax.bbox)
self._draw_artists()
def _draw_artists(self):
for artist in self._artists:
self.ax.draw_artist(artist)
def remove(self):
"""Remove artists and events from axes.
Note that the naming here mimics the interface of Matplotlib artists.
"""
#TODO: For some reason, RectangleTool doesn't get properly removed
self.disconnect_events()
for a in self._artists:
a.remove()
def redraw(self):
"""Redraw image and canvas artists.
This method should be called by subclasses when artists are updated.
"""
if self.useblit and self.img_background is not None:
self.canvas.restore_region(self.img_background)
self._draw_artists()
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
def _on_key_press(self, event):
if event.key == 'enter':
self.callback_on_enter(self.geometry)
self.set_visible(False)
self.redraw()
@property
def geometry(self):
"""Geometry information that gets passed to callback functions."""
return None
class ToolHandles(object):
"""Control handles for canvas tools.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Matplotlib axes where tool handles are displayed.
x, y : 1D arrays
Coordinates of control handles.
marker : str
Shape of marker used to display handle. See `matplotlib.pyplot.plot`.
marker_props : dict
Additional marker properties. See :class:`matplotlib.lines.Line2D`.
"""
def __init__(self, ax, x, y, marker='o', marker_props=None):
self.ax = ax
props = dict(marker=marker, markersize=7, mfc='w', ls='none',
alpha=0.5, visible=False)
props.update(marker_props if marker_props is not None else {})
self._markers = lines.Line2D(x, y, animated=True, **props)
self.ax.add_line(self._markers)
self.artist = self._markers
@property
def x(self):
return self._markers.get_xdata()
@property
def y(self):
return self._markers.get_ydata()
def set_data(self, pts, y=None):
"""Set x and y positions of handles"""
if y is not None:
x = pts
pts = np.array([x, y])
self._markers.set_data(pts)
def set_visible(self, val):
self._markers.set_visible(val)
def set_animated(self, val):
self._markers.set_animated(val)
def draw(self):
self.ax.draw_artist(self._markers)
def closest(self, x, y):
"""Return index and pixel distance to closest index."""
pts = np.transpose((self.x, self.y))
# Transform data coordinates to pixel coordinates.
pts = self.ax.transData.transform(pts)
diff = pts - ((x, y))
dist = np.sqrt(np.sum(diff**2, axis=1))
return np.argmin(dist), np.min(dist)
| bsd-3-clause |
kaichogami/sympy | sympy/physics/quantum/state.py | 58 | 29186 | """Dirac notation for states."""
from __future__ import print_function, division
from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt,
Tuple)
from sympy.core.compatibility import u, range
from sympy.printing.pretty.stringpict import stringPict
from sympy.physics.quantum.qexpr import QExpr, dispatch_method
__all__ = [
'KetBase',
'BraBase',
'StateBase',
'State',
'Ket',
'Bra',
'TimeDepState',
'TimeDepBra',
'TimeDepKet',
'Wavefunction'
]
#-----------------------------------------------------------------------------
# States, bras and kets.
#-----------------------------------------------------------------------------
# ASCII brackets
_lbracket = "<"
_rbracket = ">"
_straight_bracket = "|"
# Unicode brackets
# MATHEMATICAL ANGLE BRACKETS
_lbracket_ucode = u("\N{MATHEMATICAL LEFT ANGLE BRACKET}")
_rbracket_ucode = u("\N{MATHEMATICAL RIGHT ANGLE BRACKET}")
# LIGHT VERTICAL BAR
_straight_bracket_ucode = u("\N{LIGHT VERTICAL BAR}")
# Other options for unicode printing of <, > and | for Dirac notation.
# LEFT-POINTING ANGLE BRACKET
# _lbracket = u"\u2329"
# _rbracket = u"\u232A"
# LEFT ANGLE BRACKET
# _lbracket = u"\u3008"
# _rbracket = u"\u3009"
# VERTICAL LINE
# _straight_bracket = u"\u007C"
class StateBase(QExpr):
"""Abstract base class for general abstract states in quantum mechanics.
All other state classes defined will need to inherit from this class. It
carries the basic structure for all other states such as dual, _eval_adjoint
and label.
This is an abstract base class and you should not instantiate it directly,
instead use State.
"""
@classmethod
def _operators_to_state(self, ops, **options):
""" Returns the eigenstate instance for the passed operators.
This method should be overridden in subclasses. It will handle being
passed either an Operator instance or set of Operator instances. It
should return the corresponding state INSTANCE or simply raise a
NotImplementedError. See cartesian.py for an example.
"""
raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!")
def _state_to_operators(self, op_classes, **options):
""" Returns the operators which this state instance is an eigenstate
of.
This method should be overridden in subclasses. It will be called on
state instances and be passed the operator classes that we wish to make
into instances. The state instance will then transform the classes
appropriately, or raise a NotImplementedError if it cannot return
operator instances. See cartesian.py for examples,
"""
raise NotImplementedError(
"Cannot map this state to operators. Method not implemented!")
@property
def operators(self):
"""Return the operator(s) that this state is an eigenstate of"""
from .operatorset import state_to_operators # import internally to avoid circular import errors
return state_to_operators(self)
def _enumerate_state(self, num_states, **options):
raise NotImplementedError("Cannot enumerate this state!")
def _represent_default_basis(self, **options):
return self._represent(basis=self.operators)
#-------------------------------------------------------------------------
# Dagger/dual
#-------------------------------------------------------------------------
@property
def dual(self):
"""Return the dual state of this one."""
return self.dual_class()._new_rawargs(self.hilbert_space, *self.args)
@classmethod
def dual_class(self):
"""Return the class used to construt the dual."""
raise NotImplementedError(
'dual_class must be implemented in a subclass'
)
def _eval_adjoint(self):
"""Compute the dagger of this state using the dual."""
return self.dual
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _pretty_brackets(self, height, use_unicode=True):
# Return pretty printed brackets for the state
# Ideally, this could be done by pform.parens but it does not support the angled < and >
# Setup for unicode vs ascii
if use_unicode:
lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode
slash, bslash, vert = u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER RIGHT TO LOWER LEFT}'), \
u('\N{BOX DRAWINGS LIGHT DIAGONAL UPPER LEFT TO LOWER RIGHT}'), \
u('\N{BOX DRAWINGS LIGHT VERTICAL}')
else:
lbracket, rbracket = self.lbracket, self.rbracket
slash, bslash, vert = '/', '\\', '|'
# If height is 1, just return brackets
if height == 1:
return stringPict(lbracket), stringPict(rbracket)
# Make height even
height += (height % 2)
brackets = []
for bracket in lbracket, rbracket:
# Create left bracket
if bracket in set([_lbracket, _lbracket_ucode]):
bracket_args = [ ' ' * (height//2 - i - 1) +
slash for i in range(height // 2)]
bracket_args.extend(
[ ' ' * i + bslash for i in range(height // 2)])
# Create right bracket
elif bracket in set([_rbracket, _rbracket_ucode]):
bracket_args = [ ' ' * i + bslash for i in range(height // 2)]
bracket_args.extend([ ' ' * (
height//2 - i - 1) + slash for i in range(height // 2)])
# Create straight bracket
elif bracket in set([_straight_bracket, _straight_bracket_ucode]):
bracket_args = [vert for i in range(height)]
else:
raise ValueError(bracket)
brackets.append(
stringPict('\n'.join(bracket_args), baseline=height//2))
return brackets
def _sympystr(self, printer, *args):
contents = self._print_contents(printer, *args)
return '%s%s%s' % (self.lbracket, contents, self.rbracket)
def _pretty(self, printer, *args):
from sympy.printing.pretty.stringpict import prettyForm
# Get brackets
pform = self._print_contents_pretty(printer, *args)
lbracket, rbracket = self._pretty_brackets(
pform.height(), printer._use_unicode)
# Put together state
pform = prettyForm(*pform.left(lbracket))
pform = prettyForm(*pform.right(rbracket))
return pform
def _latex(self, printer, *args):
contents = self._print_contents_latex(printer, *args)
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex)
class KetBase(StateBase):
"""Base class for Kets.
This class defines the dual property and the brackets for printing. This is
an abstract base class and you should not instantiate it directly, instead
use Ket.
"""
lbracket = _straight_bracket
rbracket = _rbracket
lbracket_ucode = _straight_bracket_ucode
rbracket_ucode = _rbracket_ucode
lbracket_latex = r'\left|'
rbracket_latex = r'\right\rangle '
@classmethod
def default_args(self):
return ("psi",)
@classmethod
def dual_class(self):
return BraBase
def __mul__(self, other):
"""KetBase*other"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, BraBase):
return OuterProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*KetBase"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, BraBase):
return InnerProduct(other, self)
else:
return Expr.__rmul__(self, other)
#-------------------------------------------------------------------------
# _eval_* methods
#-------------------------------------------------------------------------
def _eval_innerproduct(self, bra, **hints):
"""Evaluate the inner product betweeen this ket and a bra.
This is called to compute <bra|ket>, where the ket is ``self``.
This method will dispatch to sub-methods having the format::
``def _eval_innerproduct_BraClass(self, **hints):``
Subclasses should define these methods (one for each BraClass) to
teach the ket how to take inner products with bras.
"""
return dispatch_method(self, '_eval_innerproduct', bra, **hints)
def _apply_operator(self, op, **options):
"""Apply an Operator to this Ket.
This method will dispatch to methods having the format::
``def _apply_operator_OperatorName(op, **options):``
Subclasses should define these methods (one for each OperatorName) to
teach the Ket how operators act on it.
Parameters
==========
op : Operator
The Operator that is acting on the Ket.
options : dict
A dict of key/value pairs that control how the operator is applied
to the Ket.
"""
return dispatch_method(self, '_apply_operator', op, **options)
class BraBase(StateBase):
"""Base class for Bras.
This class defines the dual property and the brackets for printing. This
is an abstract base class and you should not instantiate it directly,
instead use Bra.
"""
lbracket = _lbracket
rbracket = _straight_bracket
lbracket_ucode = _lbracket_ucode
rbracket_ucode = _straight_bracket_ucode
lbracket_latex = r'\left\langle '
rbracket_latex = r'\right|'
@classmethod
def _operators_to_state(self, ops, **options):
state = self.dual_class().operators_to_state(ops, **options)
return state.dual
def _state_to_operators(self, op_classes, **options):
return self.dual._state_to_operators(op_classes, **options)
def _enumerate_state(self, num_states, **options):
dual_states = self.dual._enumerate_state(num_states, **options)
return [x.dual for x in dual_states]
@classmethod
def default_args(self):
return self.dual_class().default_args()
@classmethod
def dual_class(self):
return KetBase
def __mul__(self, other):
"""BraBase*other"""
from sympy.physics.quantum.innerproduct import InnerProduct
if isinstance(other, KetBase):
return InnerProduct(self, other)
else:
return Expr.__mul__(self, other)
def __rmul__(self, other):
"""other*BraBase"""
from sympy.physics.quantum.operator import OuterProduct
if isinstance(other, KetBase):
return OuterProduct(other, self)
else:
return Expr.__rmul__(self, other)
def _represent(self, **options):
"""A default represent that uses the Ket's version."""
from sympy.physics.quantum.dagger import Dagger
return Dagger(self.dual._represent(**options))
class State(StateBase):
"""General abstract quantum state used as a base class for Ket and Bra."""
pass
class Ket(State, KetBase):
"""A general time-independent Ket in quantum mechanics.
Inherits from State and KetBase. This class should be used as the base
class for all physical, time-independent Kets in a system. This class
and its subclasses will be the main classes that users will use for
expressing Kets in Dirac notation [1]_.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Ket and looking at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> k = Ket('psi')
>>> k
|psi>
>>> k.hilbert_space
H
>>> k.is_commutative
False
>>> k.label
(psi,)
Ket's know about their associated bra::
>>> k.dual
<psi|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.Bra'>
Take a linear combination of two kets::
>>> k0 = Ket(0)
>>> k1 = Ket(1)
>>> 2*I*k0 - 4*k1
2*I*|0> - 4*|1>
Compound labels are passed as tuples::
>>> n, m = symbols('n,m')
>>> k = Ket(n,m)
>>> k
|nm>
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Bra
class Bra(State, BraBase):
"""A general time-independent Bra in quantum mechanics.
Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This
class and its subclasses will be the main classes that users will use for
expressing Bras in Dirac notation.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the
ket. This will usually be its symbol or its quantum numbers. For
time-dependent state, this will include the time.
Examples
========
Create a simple Bra and look at its properties::
>>> from sympy.physics.quantum import Ket, Bra
>>> from sympy import symbols, I
>>> b = Bra('psi')
>>> b
<psi|
>>> b.hilbert_space
H
>>> b.is_commutative
False
Bra's know about their dual Ket's::
>>> b.dual
|psi>
>>> b.dual_class()
<class 'sympy.physics.quantum.state.Ket'>
Like Kets, Bras can have compound labels and be manipulated in a similar
manner::
>>> n, m = symbols('n,m')
>>> b = Bra(n,m) - I*Bra(m,n)
>>> b
-I*<mn| + <nm|
Symbols in a Bra can be substituted using ``.subs``::
>>> b.subs(n,m)
<mm| - I*<mm|
References
==========
.. [1] http://en.wikipedia.org/wiki/Bra-ket_notation
"""
@classmethod
def dual_class(self):
return Ket
#-----------------------------------------------------------------------------
# Time dependent states, bras and kets.
#-----------------------------------------------------------------------------
class TimeDepState(StateBase):
"""Base class for a general time-dependent quantum state.
This class is used as a base class for any time-dependent state. The main
difference between this class and the time-independent state is that this
class takes a second argument that is the time in addition to the usual
label argument.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
"""
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def default_args(self):
return ("psi", "t")
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def label(self):
"""The label of the state."""
return self.args[:-1]
@property
def time(self):
"""The time of the state."""
return self.args[-1]
#-------------------------------------------------------------------------
# Printing
#-------------------------------------------------------------------------
def _print_time(self, printer, *args):
return printer._print(self.time, *args)
_print_time_repr = _print_time
_print_time_latex = _print_time
def _print_time_pretty(self, printer, *args):
pform = printer._print(self.time, *args)
return pform
def _print_contents(self, printer, *args):
label = self._print_label(printer, *args)
time = self._print_time(printer, *args)
return '%s;%s' % (label, time)
def _print_label_repr(self, printer, *args):
label = self._print_sequence(self.label, ',', printer, *args)
time = self._print_time_repr(printer, *args)
return '%s,%s' % (label, time)
def _print_contents_pretty(self, printer, *args):
label = self._print_label_pretty(printer, *args)
time = self._print_time_pretty(printer, *args)
return printer._print_seq((label, time), delimiter=';')
def _print_contents_latex(self, printer, *args):
label = self._print_sequence(
self.label, self._label_separator, printer, *args)
time = self._print_time_latex(printer, *args)
return '%s;%s' % (label, time)
class TimeDepKet(TimeDepState, KetBase):
"""General time-dependent Ket in quantum mechanics.
This inherits from ``TimeDepState`` and ``KetBase`` and is the main class
that should be used for Kets that vary with time. Its dual is a
``TimeDepBra``.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
Create a TimeDepKet and look at its attributes::
>>> from sympy.physics.quantum import TimeDepKet
>>> k = TimeDepKet('psi', 't')
>>> k
|psi;t>
>>> k.time
t
>>> k.label
(psi,)
>>> k.hilbert_space
H
TimeDepKets know about their dual bra::
>>> k.dual
<psi;t|
>>> k.dual_class()
<class 'sympy.physics.quantum.state.TimeDepBra'>
"""
@classmethod
def dual_class(self):
return TimeDepBra
class TimeDepBra(TimeDepState, BraBase):
"""General time-dependent Bra in quantum mechanics.
This inherits from TimeDepState and BraBase and is the main class that
should be used for Bras that vary with time. Its dual is a TimeDepBra.
Parameters
==========
args : tuple
The list of numbers or parameters that uniquely specify the ket. This
will usually be its symbol or its quantum numbers. For time-dependent
state, this will include the time as the final argument.
Examples
========
>>> from sympy.physics.quantum import TimeDepBra
>>> from sympy import symbols, I
>>> b = TimeDepBra('psi', 't')
>>> b
<psi;t|
>>> b.time
t
>>> b.label
(psi,)
>>> b.hilbert_space
H
>>> b.dual
|psi;t>
"""
@classmethod
def dual_class(self):
return TimeDepKet
class Wavefunction(Function):
"""Class for representations in continuous bases
This class takes an expression and coordinates in its constructor. It can
be used to easily calculate normalizations and probabilities.
Parameters
==========
expr : Expr
The expression representing the functional form of the w.f.
coords : Symbol or tuple
The coordinates to be integrated over, and their bounds
Examples
========
Particle in a box, specifying bounds in the more primitive way of using
Piecewise:
>>> from sympy import Symbol, Piecewise, pi, N
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = Symbol('x', real=True)
>>> n = 1
>>> L = 1
>>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True))
>>> f = Wavefunction(g, x)
>>> f.norm
1
>>> f.is_normalized
True
>>> p = f.prob()
>>> p(0)
0
>>> p(L)
0
>>> p(0.5)
2
>>> p(0.85*L)
2*sin(0.85*pi)**2
>>> N(p(0.85*L))
0.412214747707527
Additionally, you can specify the bounds of the function and the indices in
a more compact way:
>>> from sympy import symbols, pi, diff
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> f(L+1)
0
>>> f(L-1)
sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L)
>>> f(-1)
0
>>> f(0.85)
sqrt(2)*sin(0.85*pi*n/L)/sqrt(L)
>>> f(0.85, n=1, L=1)
sqrt(2)*sin(0.85*pi)
>>> f.is_commutative
False
All arguments are automatically sympified, so you can define the variables
as strings rather than symbols:
>>> expr = x**2
>>> f = Wavefunction(expr, 'x')
>>> type(f.variables[0])
<class 'sympy.core.symbol.Symbol'>
Derivatives of Wavefunctions will return Wavefunctions:
>>> diff(f, x)
Wavefunction(2*x, x)
"""
#Any passed tuples for coordinates and their bounds need to be
#converted to Tuples before Function's constructor is called, to
#avoid errors from calling is_Float in the constructor
def __new__(cls, *args, **options):
new_args = [None for i in args]
ct = 0
for arg in args:
if isinstance(arg, tuple):
new_args[ct] = Tuple(*arg)
else:
new_args[ct] = arg
ct += 1
return super(Function, cls).__new__(cls, *new_args, **options)
def __call__(self, *args, **options):
var = self.variables
if len(args) != len(var):
raise NotImplementedError(
"Incorrect number of arguments to function!")
ct = 0
#If the passed value is outside the specified bounds, return 0
for v in var:
lower, upper = self.limits[v]
#Do the comparison to limits only if the passed symbol is actually
#a symbol present in the limits;
#Had problems with a comparison of x > L
if isinstance(args[ct], Expr) and \
not (lower in args[ct].free_symbols
or upper in args[ct].free_symbols):
continue
if (args[ct] < lower) == True or (args[ct] > upper) == True:
return 0
ct += 1
expr = self.expr
#Allows user to make a call like f(2, 4, m=1, n=1)
for symbol in list(expr.free_symbols):
if str(symbol) in options.keys():
val = options[str(symbol)]
expr = expr.subs(symbol, val)
return expr.subs(zip(var, args))
def _eval_derivative(self, symbol):
expr = self.expr
deriv = expr._eval_derivative(symbol)
return Wavefunction(deriv, *self.args[1:])
def _eval_conjugate(self):
return Wavefunction(conjugate(self.expr), *self.args[1:])
def _eval_transpose(self):
return self
@property
def free_symbols(self):
return self.expr.free_symbols
@property
def is_commutative(self):
"""
Override Function's is_commutative so that order is preserved in
represented expressions
"""
return False
@classmethod
def eval(self, *args):
return None
@property
def variables(self):
"""
Return the coordinates which the wavefunction depends on
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x,y = symbols('x,y')
>>> f = Wavefunction(x*y, x, y)
>>> f.variables
(x, y)
>>> g = Wavefunction(x*y, x)
>>> g.variables
(x,)
"""
var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]]
return tuple(var)
@property
def limits(self):
"""
Return the limits of the coordinates which the w.f. depends on If no
limits are specified, defaults to ``(-oo, oo)``.
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, (x, 0, 1))
>>> f.limits
{x: (0, 1)}
>>> f = Wavefunction(x**2, x)
>>> f.limits
{x: (-oo, oo)}
>>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2))
>>> f.limits
{x: (-oo, oo), y: (-1, 2)}
"""
limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo)
for g in self._args[1:]]
return dict(zip(self.variables, tuple(limits)))
@property
def expr(self):
"""
Return the expression which is the functional form of the Wavefunction
Examples
========
>>> from sympy.physics.quantum.state import Wavefunction
>>> from sympy import symbols
>>> x, y = symbols('x, y')
>>> f = Wavefunction(x**2, x)
>>> f.expr
x**2
"""
return self._args[0]
@property
def is_normalized(self):
"""
Returns true if the Wavefunction is properly normalized
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.is_normalized
True
"""
return (self.norm == 1.0)
@property
@cacheit
def norm(self):
"""
Return the normalization of the specified functional form.
This function integrates over the coordinates of the Wavefunction, with
the bounds specified.
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sqrt(2/L)*sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
1
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.norm
sqrt(2)*sqrt(L)/2
"""
exp = self.expr*conjugate(self.expr)
var = self.variables
limits = self.limits
for v in var:
curr_limits = limits[v]
exp = integrate(exp, (v, curr_limits[0], curr_limits[1]))
return sqrt(exp)
def normalize(self):
"""
Return a normalized version of the Wavefunction
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x = symbols('x', real=True)
>>> L = symbols('L', positive=True)
>>> n = symbols('n', integer=True, positive=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.normalize()
Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L))
"""
const = self.norm
if const == oo:
raise NotImplementedError("The function is not normalizable!")
else:
return Wavefunction((const)**(-1)*self.expr, *self.args[1:])
def prob(self):
"""
Return the absolute magnitude of the w.f., `|\psi(x)|^2`
Examples
========
>>> from sympy import symbols, pi
>>> from sympy.functions import sqrt, sin
>>> from sympy.physics.quantum.state import Wavefunction
>>> x, L = symbols('x,L', real=True)
>>> n = symbols('n', integer=True)
>>> g = sin(n*pi*x/L)
>>> f = Wavefunction(g, (x, 0, L))
>>> f.prob()
Wavefunction(sin(pi*n*x/L)**2, x)
"""
return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
| bsd-3-clause |
acimmarusti/isl_exercises | chap4/chap4ex13.py | 1 | 5743 | from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
import scipy
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score
from pandas.tools.plotting import scatter_matrix
import statsmodels.formula.api as smf
import statsmodels.api as sm
#Load boston dataset from sklearn#
boston = load_boston()
#Columns#
#print(boston['feature_names'])
#Descriptio#
#print(boston['DESCR'])
rawdata = pd.DataFrame(boston.data, columns=boston.feature_names)
rawdata['MEDV'] = boston.target
#Convert to NaN#
data = rawdata.replace(to_replace='None', value=np.nan).copy()
#Create the binary variable CRIM01#
data['CRIM01'] = np.where(data['CRIM'] > data['CRIM'].median(), 1, 0)
#Columns#
numcols = list(data.columns)
numcols.remove('CRIM')
#Predictor without target columns#
xcols = list(numcols)
xcols.remove('CRIM01')
#Summary (mean, stdev, range, etc)#
print('\nFull data summary')
print(data.describe())
#Correlations#
print('\nData correlations')
dcorrs = data.corr()
print(dcorrs)
#Pair plot matrix#
#sns.set()
#sns.pairplot(data[numcols], hue='CRIM01')
print('\n\n### LOGISTIC REGRESSION###')
## Logistic regression with statsmodels ##
plr_form = 'CRIM01~' + '+'.join(xcols)
prelogreg = smf.glm(formula=plr_form, data=data, family=sm.families.Binomial()).fit()
#Remove predictors with high P-values from LogReg#
logit_pvals = prelogreg.pvalues
pred_keep = list(logit_pvals[logit_pvals < 0.05].index)
pred_keep.remove('Intercept')
print('\nAfter first LogReg iteration, keeping only: ', pred_keep)
# New LogReg with only low p-value predictors#
lr_form = 'CRIM01~' + '+'.join(pred_keep)
logreg = smf.glm(formula=lr_form, data=data, family=sm.families.Binomial()).fit()
print('\nLogistic regression fit summary')
print(logreg.summary())
#Splitting the data for train/test#
X_data = data[pred_keep]
Y_data = data['CRIM01']
X_train, X_test, Y_train, Y_test = train_test_split(X_data, Y_data, test_size=0.5, random_state=42, stratify=Y_data)
# Initiate logistic regression object
logit_clf = LogisticRegression()
# Fit model. Let X_train = matrix of predictors, Y_train = matrix of variables.
resLogit_clf = logit_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_logit = resLogit_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix logit:")
print(confusion_matrix(Y_test, Y_pred_logit))
#Accuracy, precision and recall#
print('\nAccuracy logit:', np.round(accuracy_score(Y_test, Y_pred_logit), 3))
print("Precision logit:", np.round(precision_score(Y_test, Y_pred_logit, pos_label=1), 3))
print("Recall logit:", np.round(recall_score(Y_test, Y_pred_logit, pos_label=1), 3))
print('\n\n### LINEAR DISCRIMINANT ANALYSIS ###')
# Initiate logistic regression object
lda_clf = LinearDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
reslda_clf = lda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_lda = reslda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(reslda_clf.classes_)
print(reslda_clf.priors_)
#Group means#
print("\nGroup means")
#print(reslda_clf.classes_)
print(reslda_clf.means_)
#Coefficients#
print("\nCoefficients")
#print(reslda_clf.classes_)
print(reslda_clf.intercept_)
print(reslda_clf.coef_)
#Confusion matrix#
print("\nConfusion matrix LDA:")
print(confusion_matrix(Y_test, Y_pred_lda))
#Accuracy, precision and recall#
print("\nAccuracy LDA:", np.round(accuracy_score(Y_test, Y_pred_lda), 3))
print("Precision LDA:", np.round(precision_score(Y_test, Y_pred_lda, pos_label=1), 3))
print("Recall LDA:", np.round(recall_score(Y_test, Y_pred_lda, pos_label=1), 3))
print('\n\n### QUADRATIC DISCRIMINANT ANALYSIS ###')
# Initiate QDA object
qda_clf = QuadraticDiscriminantAnalysis()
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resqda_clf = qda_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_qda = resqda_clf.predict(X_test)
#Prior probabilities#
print("\nPrior probabilities")
print(resqda_clf.classes_)
print(resqda_clf.priors_)
#Group means#
print("\nGroup means")
#print(resqda_clf.classes_)
print(resqda_clf.means_)
#Confusion matrix#
print("\nConfusion matrix QDA:")
print(confusion_matrix(Y_test, Y_pred_qda))
#Accuracy, precision and recall#
print("\nAccuracy QDA:", np.round(accuracy_score(Y_test, Y_pred_qda), 3))
print("Precision QDA:", np.round(precision_score(Y_test, Y_pred_qda, pos_label=1), 3))
print("Recall QDA:", np.round(recall_score(Y_test, Y_pred_qda, pos_label=1), 3))
print('\n\n### K NEAREST NEIGHBORS ###')
#K value#
kval = 15
print('\nUsing k = ' + str(kval))
# Initiate KNN object
knn_clf = KNeighborsClassifier(n_neighbors=15)
# Fit model. Let X_train = matrix of new_pred, Y_train = matrix of variables.
resknn_clf = knn_clf.fit(X_train, Y_train)
#Predicted values for training set
Y_pred_knn = resknn_clf.predict(X_test)
#Confusion matrix#
print("\nConfusion matrix KNN:")
print(confusion_matrix(Y_test, Y_pred_knn))
#Accuracy, precision and recall#
print("\nAccuracy KNN:", np.round(accuracy_score(Y_test, Y_pred_knn), 3))
print("Precision KNN:", np.round(precision_score(Y_test, Y_pred_knn, pos_label=1), 3))
print("Recall KNN:", np.round(recall_score(Y_test, Y_pred_knn, pos_label=1), 3))
#plt.show()
| gpl-3.0 |
hlin117/statsmodels | statsmodels/examples/ex_kernel_regression_dgp.py | 34 | 1202 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
seed = np.random.randint(999999)
seed = 430973
print(seed)
np.random.seed(seed)
funcs = [dgp.UnivariateFanGijbels1(),
dgp.UnivariateFanGijbels2(),
dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
dgp.UnivariateFunc1()
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
model = KernelReg(endog=[f.y], exog=[f.x], reg_type='ll',
var_type='c', bw='cv_ls')
mean, mfx = model.fit()
ax = fig.add_subplot(2, 2, i+1)
f.plot(ax=ax)
ax.plot(f.x, mean, color='r', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
fig.suptitle('Kernel Regression')
fig.show()
| bsd-3-clause |
Adai0808/BuildingMachineLearningSystemsWithPython | ch07/predict10k_en.py | 22 | 2428 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import numpy as np
from sklearn.datasets import load_svmlight_file
from sklearn.cross_validation import KFold
from sklearn.linear_model import ElasticNetCV, ElasticNet
from sklearn.metrics import mean_squared_error, r2_score
from matplotlib import pyplot as plt
data, target = load_svmlight_file('data/E2006.train')
# Edit the lines below if you want to switch method:
# from sklearn.linear_model import Lasso
# met = Lasso(alpha=0.1)
met = ElasticNet(alpha=0.1)
kf = KFold(len(target), n_folds=5)
pred = np.zeros_like(target)
for train, test in kf:
met.fit(data[train], target[train])
pred[test] = met.predict(data[test])
print('[EN 0.1] RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN 0.1] R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
print('')
# Construct an ElasticNetCV object (use all available CPUs)
met = ElasticNetCV(n_jobs=-1)
kf = KFold(len(target), n_folds=5)
pred = np.zeros_like(target)
for train, test in kf:
met.fit(data[train], target[train])
pred[test] = met.predict(data[test])
print('[EN CV] RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN CV] R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
print('')
met.fit(data, target)
pred = met.predict(data)
print('[EN CV] RMSE on training, {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN CV] R2 on training, {:.2}'.format(r2_score(target, pred)))
# Construct an ElasticNetCV object (use all available CPUs)
met = ElasticNetCV(n_jobs=-1, l1_ratio=[.01, .05, .25, .5, .75, .95, .99])
kf = KFold(len(target), n_folds=5)
pred = np.zeros_like(target)
for train, test in kf:
met.fit(data[train], target[train])
pred[test] = met.predict(data[test])
print('[EN CV l1_ratio] RMSE on testing (5 fold), {:.2}'.format(np.sqrt(mean_squared_error(target, pred))))
print('[EN CV l1_ratio] R2 on testing (5 fold), {:.2}'.format(r2_score(target, pred)))
print('')
fig, ax = plt.subplots()
y = target
ax.scatter(y, pred, c='k')
ax.plot([-5,-1], [-5,-1], 'r-', lw=2)
ax.set_xlabel('Actual value')
ax.set_ylabel('Predicted value')
fig.savefig('Figure_10k_scatter_EN_l1_ratio.png')
| mit |
rexshihaoren/scikit-learn | examples/model_selection/plot_validation_curve.py | 229 | 1823 | """
==========================
Plotting Validation Curves
==========================
In this plot you can see the training scores and validation scores of an SVM
for different values of the kernel parameter gamma. For very low values of
gamma, you can see that both the training score and the validation score are
low. This is called underfitting. Medium values of gamma will result in high
values for both scores, i.e. the classifier is performing fairly well. If gamma
is too high, the classifier will overfit, which means that the training score
is good but the validation score is poor.
"""
print(__doc__)
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import load_digits
from sklearn.svm import SVC
from sklearn.learning_curve import validation_curve
digits = load_digits()
X, y = digits.data, digits.target
param_range = np.logspace(-6, -1, 5)
train_scores, test_scores = validation_curve(
SVC(), X, y, param_name="gamma", param_range=param_range,
cv=10, scoring="accuracy", n_jobs=1)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve with SVM")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
plt.semilogx(param_range, train_scores_mean, label="Training score", color="r")
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2, color="r")
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="g")
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2, color="g")
plt.legend(loc="best")
plt.show()
| bsd-3-clause |
dean0x7d/pybinding | pybinding/support/collections.py | 1 | 3143 | import numpy as np
from matplotlib.collections import Collection
from matplotlib.artist import allow_rasterization
# noinspection PyAbstractClass
class CircleCollection(Collection):
"""Custom circle collection
The default matplotlib `CircleCollection` creates circles based on their
area in screen units. This class uses the radius in data units. It behaves
like a much faster version of a `PatchCollection` of `Circle`.
The implementation is similar to `EllipseCollection`.
"""
def __init__(self, radius, **kwargs):
super().__init__(**kwargs)
from matplotlib import path, transforms
self.radius = np.atleast_1d(radius)
self._paths = [path.Path.unit_circle()]
self.set_transform(transforms.IdentityTransform())
self._transforms = np.empty((0, 3, 3))
def _set_transforms(self):
ax = self.axes
self._transforms = np.zeros((self.radius.size, 3, 3))
self._transforms[:, 0, 0] = self.radius * ax.bbox.width / ax.viewLim.width
self._transforms[:, 1, 1] = self.radius * ax.bbox.height / ax.viewLim.height
self._transforms[:, 2, 2] = 1
@allow_rasterization
def draw(self, renderer):
self._set_transforms()
super().draw(renderer)
class Circle3DCollection(CircleCollection):
def __init__(self, radius, zs=0, zdir='z', depthshade=True, **kwargs):
super().__init__(radius, **kwargs)
self._depthshade = depthshade
self.set_3d_properties(zs, zdir)
self._A0 = self._A
def set_array(self, array):
self._A0 = array
super().set_array(array)
def set_3d_properties(self, zs, zdir):
# Force the collection to initialize the face and edgecolors
# just in case it is a scalarmappable with a colormap.
self.update_scalarmappable()
offsets = self.get_offsets()
if len(offsets) > 0:
xs, ys = list(zip(*offsets))
else:
xs = []
ys = []
from mpl_toolkits.mplot3d.art3d import juggle_axes
self._offsets3d = juggle_axes(xs, ys, np.atleast_1d(zs), zdir)
self._facecolor3d = self.get_facecolor()
self._edgecolor3d = self.get_edgecolor()
def do_3d_projection(self, renderer):
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d.art3d import zalpha
from matplotlib import colors as mcolors
# transform and sort in z direction
v = np.array(proj3d.proj_transform_clip(*self._offsets3d, M=renderer.M)[:3])
idx = v[2].argsort()[::-1]
vzs = v[2, idx]
self.set_offsets(v[:2, idx].transpose())
super().set_array(self._A0[idx])
fcs = zalpha(self._facecolor3d, vzs) if self._depthshade else self._facecolor3d
fcs = mcolors.colorConverter.to_rgba_array(fcs, self._alpha)
self.set_facecolors(fcs)
ecs = zalpha(self._edgecolor3d, vzs) if self._depthshade else self._edgecolor3d
ecs = mcolors.colorConverter.to_rgba_array(ecs, self._alpha)
self.set_edgecolors(ecs)
return min(vzs) if vzs.size > 0 else np.nan
| bsd-2-clause |
annahs/atmos_research | LEO_2D_histos_from_db.py | 1 | 3992 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#zero_crossing_posn FLOAT,
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = datetime.strptime('20120401','%Y%m%d')
end_date = datetime.strptime('20120531','%Y%m%d')
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2.lupckl'
rBC_density = 1.8
incand_sat = 3750
LF_max = 45000 #above this is unreasonable
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
min_rBC_mass = 1.63#120 2.6-#140 3.86-#160nm 0.25
max_rBC_mass = 2.6#140 3.86-160 5.5-#180nm 10.05
VED_min = 65
VED_max = 220
scat_lim = 100
begin_data = calendar.timegm(start_date.timetuple())
end_data = calendar.timegm(end_date.timetuple())
data = []
particles=0
no_scat=0
no_scat_110 =0
fit_failure=0
early_evap=0
early_evap_110=0
flat_fit=0
LF_high=0
for row in c.execute('''SELECT rBC_mass_fg, coat_thickness_nm, unix_ts_utc, LF_scat_amp, LF_baseline_pct_diff, sp2b_file, file_index, instr,actual_scat_amp
FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and rBC_mass_fg>=? and rBC_mass_fg<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle, min_rBC_mass, max_rBC_mass, begin_data,end_data)):
particles+=1
rBC_mass = row[0]
coat_thickness = row[1]
event_time = datetime.utcfromtimestamp(row[2])
LEO_amp = row[3]
LF_baseline_pctdiff = row[4]
file = row[5]
index = row[6]
instrt = row[7]
meas_scat_amp = row[8]
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
if meas_scat_amp < 6:
no_scat +=1
if rBC_VED > scat_lim:
no_scat_110+=1
data.append([rBC_VED,coat_thickness])
if LEO_amp == 0.0 and LF_baseline_pctdiff == None and meas_scat_amp >= 6:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -2:
early_evap +=1
if rBC_VED > scat_lim:
early_evap_110 +=1
if LEO_amp == -1:
fit_failure +=1
if LEO_amp == 0.0 and LF_baseline_pctdiff != None:
flat_fit +=1
if LEO_amp > LF_max:
LF_high +=1
if LEO_amp > 0:
data.append([rBC_VED,coat_thickness])
print '# of particles', particles
print 'no_scat', no_scat
print 'no_scat_110', no_scat_110
print 'fit_failure', fit_failure
print 'early_evap', early_evap
print 'early_evap_110', early_evap_110
print 'flat_fit', flat_fit
print 'LF_high', LF_high
evap_pct = (early_evap)*100.0/particles
evap_pct_110 = (early_evap_110)*100.0/particles
no_scat_pct = (no_scat)*100.0/particles
no_scat_pct_110 = no_scat_110*100./particles
print evap_pct, evap_pct_110, no_scat_pct,no_scat_pct_110
rBC_VEDs = [row[0] for row in data]
coatings = [row[1] for row in data]
median_coat = np.median (coatings)
print 'median coating',median_coat
#####hexbin coat vs core###
fig = plt.figure()
ax = fig.add_subplot(111)
#x_limits = [0,250]
#y_limits = [0,250]
#h = plt.hexbin(rBC_VEDs, coatings, cmap=cm.jet,gridsize = 50, mincnt=1)
hist = plt.hist(coatings, bins=50)
plt.xlabel('frequency')
plt.xlabel('Coating Thickness (nm)')
#cb = plt.colorbar()
#cb.set_label('frequency')
plt.show()
| mit |
bert9bert/statsmodels | statsmodels/graphics/tests/test_correlation.py | 31 | 1112 | import numpy as np
from numpy.testing import dec
from statsmodels.graphics.correlation import plot_corr, plot_corr_grid
from statsmodels.datasets import randhie
try:
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
@dec.skipif(not have_matplotlib)
def test_plot_corr():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr(corr_matrix, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr(corr_matrix, normcolor=True, title='', cmap='jet')
plt.close(fig)
@dec.skipif(not have_matplotlib)
def test_plot_corr_grid():
hie_data = randhie.load_pandas()
corr_matrix = np.corrcoef(hie_data.data.values.T)
fig = plot_corr_grid([corr_matrix] * 2, xnames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 5, xnames=[], ynames=hie_data.names)
plt.close(fig)
fig = plot_corr_grid([corr_matrix] * 3, normcolor=True, titles='', cmap='jet')
plt.close(fig)
| bsd-3-clause |
jakevdp/bokeh | bokeh/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
georgid/sms-tools | lectures/7-Sinusoidal-plus-residual-model/plots-code/stochasticSynthesisFrame.py | 2 | 2997 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, hanning, triang, blackmanharris, resample
import math
import sys, os, time
from scipy.fftpack import fft, ifft
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import utilFunctions as UF
def stochasticModelFrame(x, w, N, stocf) :
# x: input array sound, w: analysis window, N: FFT size,
# stocf: decimation factor of mag spectrum for stochastic analysis
hN = N/2 # size of positive spectrum
hM = (w.size)/2 # half analysis window size
pin = hM # initialize sound pointer in middle of analysis window
fftbuffer = np.zeros(N) # initialize buffer for FFT
yw = np.zeros(w.size) # initialize output sound frame
w = w / sum(w) # normalize analysis window
#-----analysis-----
xw = x[pin-hM:pin+hM] * w # window the input sound
X = fft(xw) # compute FFT
mX = 20 * np.log10( abs(X[:hN]) ) # magnitude spectrum of positive frequencies
mXenv = resample(np.maximum(-200, mX), mX.size*stocf) # decimate the mag spectrum
pX = np.angle(X[:hN])
#-----synthesis-----
mY = resample(mXenv, hN) # interpolate to original size
pY = 2*np.pi*np.random.rand(hN) # generate phase random values
Y = np.zeros(N, dtype = complex)
Y[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.
Y[hN+1:] = 10**(mY[:0:-1]/20) * np.exp(-1j*pY[:0:-1]) # generate negative freq.
fftbuffer = np.real( ifft(Y) ) # inverse FFT
y = fftbuffer*N/2
return mX, pX, mY, pY, y
# example call of stochasticModel function
if __name__ == '__main__':
(fs, x) = UF.wavread('../../../sounds/ocean.wav')
w = np.hanning(1024)
N = 1024
stocf = 0.1
maxFreq = 10000.0
lastbin = N*maxFreq/fs
first = 1000
last = first+w.size
mX, pX, mY, pY, y = stochasticModelFrame(x[first:last], w, N, stocf)
plt.figure(1, figsize=(9, 5))
plt.subplot(3,1,1)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), mY, 'r', lw=1.5, label="mY")
plt.axis([0, maxFreq, -78, max(mX)+0.5])
plt.title('mY (stochastic approximation of mX)')
plt.subplot(3,1,2)
plt.plot(np.arange(0, fs/2.0, fs/float(N)), pY-np.pi, 'c', lw=1.5, label="pY")
plt.axis([0, maxFreq, -np.pi, np.pi])
plt.title('pY random phases)')
plt.subplot(3,1,3)
plt.plot(np.arange(first, last)/float(fs), y, 'b', lw=1.5)
plt.axis([first/float(fs), last/float(fs), min(y), max(y)])
plt.title('yst')
plt.tight_layout()
plt.savefig('stochasticSynthesisFrame.png')
plt.show()
| agpl-3.0 |