repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
rvraghav93/scikit-learn | examples/calibration/plot_calibration_multiclass.py | 95 | 6971 | """
==================================================
Probability Calibration for 3-class classification
==================================================
This example illustrates how sigmoid calibration changes predicted
probabilities for a 3-class classification problem. Illustrated is the
standard 2-simplex, where the three corners correspond to the three classes.
Arrows point from the probability vectors predicted by an uncalibrated
classifier to the probability vectors predicted by the same classifier after
sigmoid calibration on a hold-out validation set. Colors indicate the true
class of an instance (red: class 1, green: class 2, blue: class 3).
The base classifier is a random forest classifier with 25 base estimators
(trees). If this classifier is trained on all 800 training datapoints, it is
overly confident in its predictions and thus incurs a large log-loss.
Calibrating an identical classifier, which was trained on 600 datapoints, with
method='sigmoid' on the remaining 200 datapoints reduces the confidence of the
predictions, i.e., moves the probability vectors from the edges of the simplex
towards the center. This calibration results in a lower log-loss. Note that an
alternative would have been to increase the number of base estimators which
would have resulted in a similar decrease in log-loss.
"""
print(__doc__)
# Author: Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.ensemble import RandomForestClassifier
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import log_loss
np.random.seed(0)
# Generate data
X, y = make_blobs(n_samples=1000, n_features=2, random_state=42,
cluster_std=5.0)
X_train, y_train = X[:600], y[:600]
X_valid, y_valid = X[600:800], y[600:800]
X_train_valid, y_train_valid = X[:800], y[:800]
X_test, y_test = X[800:], y[800:]
# Train uncalibrated random forest classifier on whole train and validation
# data and evaluate on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train_valid, y_train_valid)
clf_probs = clf.predict_proba(X_test)
score = log_loss(y_test, clf_probs)
# Train random forest classifier, calibrate on validation data and evaluate
# on test data
clf = RandomForestClassifier(n_estimators=25)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
sig_clf = CalibratedClassifierCV(clf, method="sigmoid", cv="prefit")
sig_clf.fit(X_valid, y_valid)
sig_clf_probs = sig_clf.predict_proba(X_test)
sig_score = log_loss(y_test, sig_clf_probs)
# Plot changes in predicted probabilities via arrows
plt.figure(0)
colors = ["r", "g", "b"]
for i in range(clf_probs.shape[0]):
plt.arrow(clf_probs[i, 0], clf_probs[i, 1],
sig_clf_probs[i, 0] - clf_probs[i, 0],
sig_clf_probs[i, 1] - clf_probs[i, 1],
color=colors[y_test[i]], head_width=1e-2)
# Plot perfect predictions
plt.plot([1.0], [0.0], 'ro', ms=20, label="Class 1")
plt.plot([0.0], [1.0], 'go', ms=20, label="Class 2")
plt.plot([0.0], [0.0], 'bo', ms=20, label="Class 3")
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
# Annotate points on the simplex
plt.annotate(r'($\frac{1}{3}$, $\frac{1}{3}$, $\frac{1}{3}$)',
xy=(1.0/3, 1.0/3), xytext=(1.0/3, .23), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.plot([1.0/3], [1.0/3], 'ko', ms=5)
plt.annotate(r'($\frac{1}{2}$, $0$, $\frac{1}{2}$)',
xy=(.5, .0), xytext=(.5, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $\frac{1}{2}$, $\frac{1}{2}$)',
xy=(.0, .5), xytext=(.1, .5), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($\frac{1}{2}$, $\frac{1}{2}$, $0$)',
xy=(.5, .5), xytext=(.6, .6), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $0$, $1$)',
xy=(0, 0), xytext=(.1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($1$, $0$, $0$)',
xy=(1, 0), xytext=(1, .1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
plt.annotate(r'($0$, $1$, $0$)',
xy=(0, 1), xytext=(.1, 1), xycoords='data',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='center', verticalalignment='center')
# Add grid
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Change of predicted probabilities after sigmoid calibration")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.legend(loc="best")
print("Log-loss of")
print(" * uncalibrated classifier trained on 800 datapoints: %.3f "
% score)
print(" * classifier trained on 600 datapoints and calibrated on "
"200 datapoint: %.3f" % sig_score)
# Illustrate calibrator
plt.figure(1)
# generate grid over 2-simplex
p1d = np.linspace(0, 1, 20)
p0, p1 = np.meshgrid(p1d, p1d)
p2 = 1 - p0 - p1
p = np.c_[p0.ravel(), p1.ravel(), p2.ravel()]
p = p[p[:, 2] >= 0]
calibrated_classifier = sig_clf.calibrated_classifiers_[0]
prediction = np.vstack([calibrator.predict(this_p)
for calibrator, this_p in
zip(calibrated_classifier.calibrators_, p.T)]).T
prediction /= prediction.sum(axis=1)[:, None]
# Plot modifications of calibrator
for i in range(prediction.shape[0]):
plt.arrow(p[i, 0], p[i, 1],
prediction[i, 0] - p[i, 0], prediction[i, 1] - p[i, 1],
head_width=1e-2, color=colors[np.argmax(p[i])])
# Plot boundaries of unit simplex
plt.plot([0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], 'k', label="Simplex")
plt.grid("off")
for x in [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
plt.plot([0, x], [x, 0], 'k', alpha=0.2)
plt.plot([0, 0 + (1-x)/2], [x, x + (1-x)/2], 'k', alpha=0.2)
plt.plot([x, x + (1-x)/2], [0, 0 + (1-x)/2], 'k', alpha=0.2)
plt.title("Illustration of sigmoid calibrator")
plt.xlabel("Probability class 1")
plt.ylabel("Probability class 2")
plt.xlim(-0.05, 1.05)
plt.ylim(-0.05, 1.05)
plt.show()
| bsd-3-clause |
xiaoxiamii/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
PepSalehi/scipy_2015_sklearn_tutorial | notebooks/figures/plot_digits_datasets.py | 19 | 2750 | # Taken from example in scikit-learn examples
# Authors: Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Gael Varoquaux
# License: BSD 3 clause (C) INRIA 2011
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import offsetbox
from sklearn import (manifold, datasets, decomposition, ensemble, lda,
random_projection)
def digits_plot():
digits = datasets.load_digits(n_class=6)
n_digits = 500
X = digits.data[:n_digits]
y = digits.target[:n_digits]
n_samples, n_features = X.shape
n_neighbors = 30
def plot_embedding(X, title=None):
x_min, x_max = np.min(X, 0), np.max(X, 0)
X = (X - x_min) / (x_max - x_min)
plt.figure()
ax = plt.subplot(111)
for i in range(X.shape[0]):
plt.text(X[i, 0], X[i, 1], str(digits.target[i]),
color=plt.cm.Set1(y[i] / 10.),
fontdict={'weight': 'bold', 'size': 9})
if hasattr(offsetbox, 'AnnotationBbox'):
# only print thumbnails with matplotlib > 1.0
shown_images = np.array([[1., 1.]]) # just something big
for i in range(X.shape[0]):
dist = np.sum((X[i] - shown_images) ** 2, 1)
if np.min(dist) < 1e5:
# don't show points that are too close
# set a high threshold to basically turn this off
continue
shown_images = np.r_[shown_images, [X[i]]]
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(digits.images[i], cmap=plt.cm.gray_r),
X[i])
ax.add_artist(imagebox)
plt.xticks([]), plt.yticks([])
if title is not None:
plt.title(title)
n_img_per_row = 10
img = np.zeros((10 * n_img_per_row, 10 * n_img_per_row))
for i in range(n_img_per_row):
ix = 10 * i + 1
for j in range(n_img_per_row):
iy = 10 * j + 1
img[ix:ix + 8, iy:iy + 8] = X[i * n_img_per_row + j].reshape((8, 8))
plt.imshow(img, cmap=plt.cm.binary)
plt.xticks([])
plt.yticks([])
plt.title('A selection from the 64-dimensional digits dataset')
print("Computing PCA projection")
pca = decomposition.PCA(n_components=2).fit(X)
X_pca = pca.transform(X)
plot_embedding(X_pca, "Principal Components projection of the digits")
plt.figure()
plt.matshow(pca.components_[0, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.figure()
plt.matshow(pca.components_[1, :].reshape(8, 8), cmap="gray")
plt.axis('off')
plt.show()
| cc0-1.0 |
pkruskal/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
stemblab/intuitive-cs | py/recon.py | 2 | 4493 | #!puzlet
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import proj3d
# Plot vector as line segement will ball at one end.
def plot_vec(start,end,label,color):
ax.plot([start[0],end[0]],[start[1],end[1]],[start[2],end[2]],
label=label,color=color,linewidth=2)
ax.scatter(end[0],end[1],end[2],marker='o',color=color)
def set_axes():
ax.set_autoscale_on(False)
ax.set_xlabel(r'$x_0$',fontsize=20)
ax.set_ylabel(r'$x_1$',fontsize=20)
ax.set_zlabel(r'$x_2$',fontsize=20)
ax.set_xlim(-0.8, 1.2)
ax.set_ylim(-0.2, 1.2)
ax.set_zlim(0, 1.4)
ax.set_xticks([-0.5,0,0.5,1])
ax.set_yticks([0,0.5,1])
ax.set_zticks([0,0.5,1])
fig = plt.figure()
ax = fig.gca(projection='3d')
set_axes()
origin=[0,0,0]
plot_vec(origin,[1,0,0],label='constant',color='blue')
plot_vec(origin,[0,1,0],label='line',color='green')
plot_vec(origin,[0,0,1],label='parabola',color='red')
ax.legend()
fig.savefig("recon1_1.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
ax.text(1.3,0,0.9, r'$Ax=b$', backgroundcolor='#fcffc9',
ha='center', va='bottom', size=18, zorder=10)
# Ax=b
A = np.array([[1, -1, 1], [1, 2, 4]])
b = np.array([1, 4])
# planes
x0 = np.arange(-1, 1, 0.1)
x1 = np.arange(-1, 1, 0.1)
xx0, yy0 = np.meshgrid(x0, x1)
ax.plot_surface(xx0, yy0, 1-xx0+yy0,
rstride=2, cstride=2, alpha=0.1, color='y')
ax.plot_surface(xx0, yy0, 1-xx0/4.-yy0/2.,
rstride=2, cstride=2, alpha=0.1,color='m')
# solutuon to Ax=b
pinvA = np.linalg.pinv(A)
U = np.array([0,0,1]) # np.dot(pinvA,b) # One solution
w = np.array([1.5, 0, 0]) # Arbitrary vector to get another solution
N = np.dot((np.eye(3) - np.dot(pinvA,A)),w)
ax.plot(*zip(U+N*2/3.,U-N*4/3.),color='k',linewidth=3,
label=r'$x$ (not sparse)')
ax.legend()
fig.savefig("recon1_2.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
ax.text(0,0,1.2, r'1-sparse $x$', backgroundcolor='#fcffc9',
ha='center', va='bottom', size=16, zorder=10)
ax.scatter(0,0,1,marker='*',s=400,color='r',label=r'$x$ (1-sparse)')
ax.legend()
fig.savefig("recon1_3.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
def plot_norms(l, h, U):
Nl = len(l)
f, axarr = plt.subplots(Nl, 1)
def plot(l, n):
ax.set_title('$l_%s$-norm' % str(n))
ax.scatter(np.array(h), np.array(l))
# Show U value for minimum of norm
#m = np.argmin(l)
#l_min = l[m]
#h_min = h[m]
#U_min = U[m]
#ax_lim = ax.axis()
#offset = -0.05*(ax_lim[3] - ax_lim[2])
#v = lambda d: '{0:.2f}'.format(U_min[d])
#label = "U=[%s, %s, %s]" % (v(0), v(1), v(2))
#ax.text(h_min, l_min+offset, label, va='top')
for n in range(Nl):
ax = axarr[n]
plot(l[n], n)
plt.tight_layout()
Np = 201 # number of points to plot (Must be odd to include 0!)
h = np.array(np.linspace(-1, 1, Np)) # null vector multiplier
Y=U.reshape(3,1)+np.dot(N.reshape(3,1),h.reshape(1,Np))
# 3xNp array of x candidates
l=np.zeros((3,Np))
for n in range(3):
l[n,:] = np.apply_along_axis(np.linalg.norm, 0, Y, n)
# See: http://matplotlib.org/examples/axes_grid/demo_parasite_axes2.html
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
plt.clf()
host = host_subplot(111, axes_class=AA.Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
par2 = host.twinx()
offset = 60
new_fixed_axis = par2.get_grid_helper().new_fixed_axis
par2.axis["right"] = new_fixed_axis(loc="right",
axes=par2,
offset=(offset, 0))
par2.axis["right"].toggle(all=True)
host.set_xlim(-1, 1)
host.set_ylim(1, 3.5)
host.set_xlabel("Distance from Sparse Sorution ($d$)")
host.set_ylabel(r"$\||x\||_0$")
par1.set_ylabel(r"$\||x\||_1$")
par2.set_ylabel(r"$\||x\||_2$")
p1, = host.plot(h, l[0], label=r"$\||x\||_0$")
p2, = par1.plot(h, l[1], label=r"$\||x\||_1$")
p3, = par2.plot(h, l[2], label=r"$\||x\||_2$")
par1.set_ylim(1, 3)
par2.set_ylim(0, 2)
host.legend()
host.axis["left"].label.set_color(p1.get_color())
host.axis["left"].label.set_fontsize(20)
par1.axis["right"].label.set_color(p2.get_color())
par1.axis["right"].label.set_fontsize(20)
par2.axis["right"].label.set_color(p3.get_color())
par2.axis["right"].label.set_fontsize(20)
fig.savefig("norms.svg", transparent=True, bbox_inches='tight', pad_inches=0.15)
| mit |
Crompulence/cpl-library | examples/interactive_plot_example/python/CFD_recv_and_plot_grid_interactive.py | 1 | 3724 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
from mpi4py import MPI
from cplpy import CPL
from draw_grid import draw_grid
#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
CFD_COMM = CPL.init(CPL.CFD_REALM)
nprocs_realm = CFD_COMM.Get_size()
# Parameters of the cpu topology (cartesian grid)
npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
NProcs = np.product(npxyz)
xyzL = np.array([10.0, 10.0, 10.0], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
ncxyz = np.array([16, 6, 16], order='F', dtype=np.int32)
if (nprocs_realm != NProcs):
print("Non-coherent number of processes in CFD ", nprocs_realm,
" no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
MPI.Abort(errorcode=1)
#Setup coupled simulation
cart_comm = CFD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
#Plot output
fig, ax = plt.subplots(1,1)
plt.subplots_adjust(bottom=0.25)
axslider = plt.axes([0.25, 0.1, 0.65, 0.03])
freq = 1.
sfreq = Slider(axslider, 'Freq', 0.1, 2.0, valinit=freq)
def update(val):
freq = sfreq.val
global freq
print("CHANGED", freq)
sfreq.on_changed(update)
plt.ion()
plt.show()
# === Plot both grids ===
dx = CPL.get("xl_cfd")/float(CPL.get("ncx"))
dy = CPL.get("yl_cfd")/float(CPL.get("ncy"))
dz = CPL.get("zl_cfd")/float(CPL.get("ncz"))
ioverlap = (CPL.get("icmax_olap")-CPL.get("icmin_olap")+1)
joverlap = (CPL.get("jcmax_olap")-CPL.get("jcmin_olap")+1)
koverlap = (CPL.get("kcmax_olap")-CPL.get("kcmin_olap")+1)
xoverlap = ioverlap*dx
yoverlap = joverlap*dy
zoverlap = koverlap*dz
for time in range(100000):
# recv data to plot
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
recv_array = np.zeros((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
recv_array, ierr = CPL.recv(recv_array, olap_limits)
#Plot CFD and coupler Grid
draw_grid(ax,
nx=CPL.get("ncx"),
ny=CPL.get("ncy"),
nz=CPL.get("ncz"),
px=CPL.get("npx_cfd"),
py=CPL.get("npy_cfd"),
pz=CPL.get("npz_cfd"),
xmin=CPL.get("x_orig_cfd"),
ymin=CPL.get("y_orig_cfd"),
zmin=CPL.get("z_orig_cfd"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=CPL.get("yl_cfd"),
zmax=(CPL.get("kcmax_olap")+1)*dz,
lc = 'r',
label='CFD')
#Plot MD domain
draw_grid(ax, nx=1, ny=1, nz=1,
px=CPL.get("npx_md"),
py=CPL.get("npy_md"),
pz=CPL.get("npz_md"),
xmin=CPL.get("x_orig_md"),
ymin=-CPL.get("yl_md")+yoverlap,
zmin=CPL.get("z_orig_md"),
xmax=(CPL.get("icmax_olap")+1)*dx,
ymax=yoverlap,
zmax=(CPL.get("kcmax_olap")+1)*dz,
label='MD')
#Plot x component on grid
x = np.linspace(CPL.get("x_orig_cfd")+.5*dx,xoverlap-.5*dx,ioverlap)
z = np.linspace(CPL.get("z_orig_cfd")+.5*dz,zoverlap-.5*dz,koverlap)
for j in range(joverlap):
ax.plot(x, 0.5*dy*(recv_array[0,:,j,0]+1.+2*j), 's-')
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
print(time, freq)
plt.pause(0.1)
ax.cla()
# send data to update
olap_limits = CPL.get_olap_limits()
portion = CPL.my_proc_portion(olap_limits)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
send_array = freq*np.ones((1, ncxl, ncyl, nczl), order='F', dtype=np.float64)
CPL.send(send_array, olap_limits)
CPL.finalize()
MPI.Finalize()
| gpl-3.0 |
berkeley-stat159/project-alpha | code/utils/scripts/glm_script.py | 1 | 3957 | """ Script for GLM functions.
Run with:
python glm_script.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
import nibabel as nib
import os
import sys
# Relative paths to subject 1 data.
project_path = "../../../"
pathtodata = project_path + "data/ds009/sub001/"
condition_location = pathtodata+"model/model001/onsets/task001_run001/"
location_of_images = project_path+"images/"
location_of_functions = project_path+"code/utils/functions/"
sys.path.append(location_of_functions)
# Load events2neural from the stimuli module.
from stimuli import events2neural
from event_related_fMRI_functions import hrf_single, convolution_specialized
# Load our GLM functions.
from glm import glm, glm_diagnostics, glm_multiple
# Load the image data for subject 1.
img = nib.load(pathtodata+"BOLD/task001_run001/bold.nii.gz")
data = img.get_data()
data = data[...,6:] # Knock off the first 6 observations.
cond1=np.loadtxt(condition_location+"cond001.txt")
cond2=np.loadtxt(condition_location+"cond002.txt")
cond3=np.loadtxt(condition_location+"cond003.txt")
#######################
# a. (my) convolution #
#######################
all_stimuli=np.array(sorted(list(cond2[:,0])+list(cond3[:,0])+list(cond1[:,0]))) # could also just x_s_array
my_hrf = convolution_specialized(all_stimuli,np.ones(len(all_stimuli)),hrf_single,np.linspace(0,239*2-2,239))
##################
# b. np.convolve #
##################
# initial needed values
TR = 2
tr_times = np.arange(0, 30, TR)
hrf_at_trs = np.array([hrf_single(x) for x in tr_times])
n_vols=data.shape[-1]
# creating the .txt file for the events2neural function
cond_all=np.row_stack((cond1,cond2,cond3))
cond_all=sorted(cond_all,key= lambda x:x[0])
np.savetxt(condition_location+"cond_all.txt",cond_all)
neural_prediction = events2neural(condition_location+"cond_all.txt",TR,n_vols)
convolved = np.convolve(neural_prediction, hrf_at_trs) # hrf_at_trs sample data
N = len(neural_prediction) # N == n_vols == 173
M = len(hrf_at_trs) # M == 12
np_hrf=convolved[:N]
#############################
#############################
# Analysis and diagonistics #
#############################
#############################
#######################
# a. (my) convolution #
#######################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_my, X_my = glm(data, my_hrf)
# Some diagnostics.
MRSS_my, fitted_my, residuals_my = glm_diagnostics(B_my, X_my, data)
# Print out the mean MRSS.
print("MRSS using 'my' convolution function: "+str(np.mean(MRSS_my)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2]) #change from cherry-picking
plt.plot(fitted_my[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my.png")
plt.close()
##################
# b. np.convolve #
##################
# Now get the estimated coefficients and design matrix for doing
# regression on the convolved time course.
B_np, X_np = glm(data, np_hrf)
# Some diagnostics.
MRSS_np, fitted_np, residuals_np = glm_diagnostics(B_np, X_np, data)
# Print out the mean MRSS.
print("MRSS using np convolution function: "+str(np.mean(MRSS_np)))
# Plot the time course for a single voxel with the fitted values.
# Looks pretty bad.
plt.plot(data[41, 47, 2])
plt.plot(fitted_np[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_np.png")
plt.close()
X_my3=np.ones((data.shape[-1],4))
for i in range(2):
X_my3[:,i+1]=my_hrf**(i+1)
B_my3, X_my3 = glm_multiple(data, X_my3)
MRSS_my3, fitted_my3, residuals_my3 = glm_diagnostics(B_my3, X_my3, data)
print("MRSS using 'my' convolution function, 3rd degree polynomial: "+str(np.mean(MRSS_my3))+ ", but the chart looks better")
plt.plot(data[41, 47, 2])
plt.plot(fitted_my3[41, 47, 2])
plt.savefig(location_of_images+"glm_plot_my3.png")
plt.close()
| bsd-3-clause |
SISC2014/JobAnalysis | MongoRetrieval/src/EfficiencyHistogram.py | 1 | 6076 | '''
Created on Jun 19, 2014
@author: Erik Halperin
List of Keys
_id
JobStartDate
Requirements
TransferInput
TotalSuspensions
LastJobStatus
BufferBlockSize
OrigMaxHosts
RequestMemory
WantRemoteSyscalls
LastHoldReasonCode
ExitStatus
Args
JobFinishedHookDone
JobCurrentStartDate
CompletionDate
JobLeaseDuration
Err
RemoteWallClockTime
JobUniverse
RequestCpus
RemoveReason
StreamErr
Rank
WantRemoteIO
LocalSysCpu
UsedOCWrapper
CumulativeSlotTime
TransferIn
MachineAttrCpus0
CondorPlatform
CurrentTime
ExitReason
StreamOut
WantCheckpoint
GlobalJobId
TransferInputSizeMB
JobStatus
LastPublicClaimId
MemoryUsage
NumSystemHolds
TransferOutput
PeriodicRemove
NumShadowStarts
LastHoldReasonSubCode
LastSuspensionTime
ShouldTransferFiles
QDate
RemoteSysCpu
ImageSize_RAW
LastRemoteHost
CondorVersion
DiskUsage_RAW
PeriodicRelease
NumCkpts_RAW
JobCurrentStartExecutingDate
ProjectName
CoreSize
RemoteUserCpu
BytesSent
Owner
BytesRecvd
ExitCode
NumJobStarts
ExecutableSize_RAW
Notification
ExecutableSize
Environment
StartdPrincipal
RootDir
MinHosts
CumulativeSuspensionTime
JOBGLIDEIN_ResourceName
ProcId
MATCH_EXP_JOBGLIDEIN_ResourceName
OnExitRemove
User
UserLog
CommittedSuspensionTime
NumRestarts
JobCoreDumped
Cmd
NumJobMatches
DiskUsage
LastRemotePool
CommittedSlotTime
ResidentSetSize
WhenToTransferOutput
ExitBySignal
Out
RequestDisk
ImageSize
NumCkpts
LastJobLeaseRenewal
MachineAttrSlotWeight0
ResidentSetSize_RAW
JobPrio
JobRunCount
PeriodicHold
ClusterId
NiceUser
MyType
LocalUserCpu
BufferSize
LastHoldReason
CurrentHosts
LeaveJobInQueue
OnExitHold
EnteredCurrentStatus
MaxHosts
CommittedTime
LastMatchTime
In
JobNotification
'''
import re
import matplotlib.pyplot as plt
from pymongo import MongoClient
#takes a list of dictionaries and returns a list of floats
def parseList(l):
l = map(str, l)
newlist = []
for k in l:
newlist.append(re.sub('[RemoteWallClockTimeUsrpu_id\"\'{}: ]', '', k))
newlist = map(float, newlist)
return list(newlist)
#returns a list of dictionaries
#item is from list of keys, username: "example@login01.osgconnect.net", cluster: "123456", site: "phys.ucconn.edu",
#coll: MongoDB collection
#username/cluster/site may be None, in which case they will not be used
#item should be _id
def dbFindItemFromUser(item, username, cluster, site, coll):
mylist = []
rgx = "$regex"
if(username != None):
username = '\"' + username + '\"'
dicU = {'User': username }
else:
dicU = {}
if(cluster != None):
dicC = { 'ClusterId': cluster }
else:
dicC = {}
if(site != None):
dicS = { 'LastRemoteHost': { rgx: site } }
else:
dicS = {}
dicU.update(dicC)
dicU.update(dicS)
pr = { item: 1, '_id': 0 }
for condor_history in coll.find(dicU, pr):
mylist.append(condor_history)
return mylist
#returns a list of dictionaries
#username and coll are same as above
def dbFindIdFromUser(username, coll):
mylist = []
username = '\"' + username + '\"'
cr = { 'User': username }
pr = { '_id': 1 }
for condor_history in coll.find(cr, pr):
mylist.append(condor_history)
return mylist
#creates a scatterplot of two items
def plotScatter(item1, item2, username, cluster, coll, xlab, ylab, title):
lst1 = parseList(dbFindItemFromUser(item1, username, cluster, coll))
lst2 = parseList(dbFindItemFromUser(item2, username, cluster, coll))
plt.plot(lst1, lst2, 'bo')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.show()
#creates a histogram of a list
#l: list to plot, bs: number of bins
def plotHist(l, bs, xlab, ylab, title):
plt.hist(l, bins=bs)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.show()
def getEfficiency(username, cluster, site, coll):
ruc = parseList(dbFindItemFromUser("RemoteUserCpu", username, cluster, site, coll))
rwct = parseList(dbFindItemFromUser("RemoteWallClockTime", username, cluster, site, coll))
efflist = []
totcount = 0
goodcount = 0 #certain efficiency values are >1 due to a condor error. these values are discarded
zerocount = 0 #testing possible condor bug where RemoteUserCpu is 0 but RemoteWallClockTime is quite large
for x,y in zip(ruc, rwct):
if(y == 0):
totcount += 1
elif(x/y > 1):
totcount += 1
else:
if(x == 0):
zerocount +=1
efflist.append(x/y)
totcount += 1
goodcount +=1
return [efflist, goodcount, totcount]
#Given at least one input for username/cluster/site, creates a histogram of the RemoteUserCpu/RemoteWallClockTime for the results
def efficiencyHistogram(username, cluster, site, coll, bins, xlab, ylab, title):
retlist = getEfficiency(username, cluster, site, coll) #0: efflist, 1: goodcount, 2: totcount
print("Jobs Plotted:", retlist[1], "/", retlist[2])
plotHist(retlist[0], bins, xlab, ylab, title)
def fourEffHists(lst1, lst2, lst3, lst4, lab1, lab2, lab3, lab4, bs, xlab, ylab, title):
plt.hist(lst1, bins=bs, histtype='stepfilled', label=lab1)
plt.hist(lst2, bins=bs, histtype='stepfilled', label=lab2)
plt.hist(lst3, bins=bs, histtype='stepfilled', label=lab3)
plt.hist(lst4, bins=bs, histtype='stepfilled', label=lab4)
plt.title(title)
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.legend()
plt.show()
def mainEH(host, port):
client = MongoClient(host, port)
db = client.condor_history
coll = db.history_records
#sites: uc.mwt2.org, phys.uconn.edu, hpc.smu.edu, usatlas.bnl.gov
#names (@login01.osgconnect.net): lfzhao, sthapa, echism, wcatino, bamitchell
str_name = "bamitchell@login01.osgconnect.net"
efficiencyHistogram(str_name, None, None, coll, 75, "UserCPU/WallClockTime", "Frequency", "Efficiencies for " + str_name)
mainEH('mc.mwt2.org', 27017) | mit |
pmediano/ComputationalNeurodynamics | Fall2016/Exercise_1/Solutions/IzNeuronRK4.py | 1 | 1897 | """
Computational Neurodynamics
Exercise 1
Simulates Izhikevich's neuron model using the Runge-Kutta 4 method.
Parameters for regular spiking, fast spiking and bursting
neurons extracted from:
http://www.izhikevich.org/publications/spikes.htm
(C) Murray Shanahan et al, 2016
"""
import numpy as np
import matplotlib.pyplot as plt
# Create time points
Tmin = 0
Tmax = 200 # Simulation time
dt = 0.01 # Step size
T = np.arange(Tmin, Tmax+dt, dt)
# Base current
I = 10
## Parameters of Izhikevich's model (regular spiking)
a = 0.02
b = 0.2
c = -65
d = 8
## Parameters of Izhikevich's model (fast spiking)
# a = 0.02
# b = 0.25
# c = -65
# d = 2
## Parameters of Izhikevich's model (bursting)
# a = 0.02
# b = 0.2
# c = -50
# d = 2
## Make a state vector that has a (v, u) pair for each timestep
s = np.zeros((len(T), 2))
## Initial values
s[0, 0] = -65
s[0, 1] = -1
# Note that s1[0] is v, s1[1] is u. This is Izhikevich equation in vector form
def s_dt(s1, I):
v_dt = 0.04*(s1[0]**2) + 5*s1[0] + 140 - s1[1] + I
u_dt = a*(b*s1[0] - s1[1])
return np.array([v_dt, u_dt])
## SIMULATE
for t in range(len(T)-1):
# Calculate the four constants of Runge-Kutta method
k_1 = s_dt(s[t], I)
k_2 = s_dt(s[t] + 0.5*dt*k_1, I)
k_3 = s_dt(s[t] + 0.5*dt*k_2, I)
k_4 = s_dt(s[t] + dt*k_3, I)
s[t+1] = s[t] + (1.0/6)*dt*(k_1 + 2*k_2 + 2*k_3 + k_4)
# Reset the neuron if it has spiked
if s[t+1, 0] >= 30:
s[t, 0] = 30 # Add a Dirac pulse for visualisation
s[t+1, 0] = c # Reset to resting potential
s[t+1, 1] += d # Update recovery variable
v = s[:, 0]
u = s[:, 1]
## Plot the membrane potential
plt.subplot(211)
plt.plot(T, v)
plt.xlabel('Time (ms)')
plt.ylabel('Membrane potential v (mV)')
plt.title('Izhikevich Neuron')
# Plot the reset variable
plt.subplot(212)
plt.plot(T, u)
plt.xlabel('Time (ms)')
plt.ylabel('Reset variable u')
plt.show()
| gpl-3.0 |
gems-uff/noworkflow | capture/noworkflow/resources/demo/annual_precipitation/step4/precipitation.py | 4 | 2619 | #!/usr/bin/python2
import csv
import numpy as np
import matplotlib.pyplot as plt
import time
from itertools import chain
from collections import defaultdict
def read(filename):
result = defaultdict(list)
with open(filename, "r") as c:
reader = csv.reader(c, delimiter=";")
for row in reader:
month = int(row[1].split("/")[1])
precipitation = float(row[3])
result[month].append(precipitation)
return result
def write(filename, data, year):
with open(filename, "w") as c:
writer = csv.writer(c, delimiter=";")
for month in sorted(data.keys()):
for day, value in enumerate(data[month]):
writer.writerow([
83743, "{:02}/{:02}/{}".format(day + 1, month, year),
1200, value])
def remove_outliers(data, thresh=2.5):
full_data = np.asarray(tuple(chain.from_iterable(data[i]
for i in sorted(data.keys()))))
non_zeros = full_data != 0
median = np.median(full_data[non_zeros])
result = {}
for month in data:
values = np.asarray(data[month])[:, None]
diff = np.sum((values - median)**2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.median(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
outliers = modified_z_score > thresh
non_outliers = modified_z_score <= thresh
new_data = np.zeros(len(values))
new_data[non_outliers] = np.transpose(values[non_outliers])[0]
new_data[outliers] = median
result[month] = new_data.tolist()
return result
def prepare(series, months, names, div=.1, colors=["b", "g", "r"]):
fig, ax = plt.subplots()
ax.set_ylabel("Precipitation (mm)")
ax.set_xlabel("Month")
ax.set_title("Precipitation by Month")
ax.set_xticks(months + .5)
ax.set_xticklabels(list(map(str, months)))
ax.set_ylim([0, 400])
half_div = div / 2.0
width = (1.0 - div) / len(series)
bars = []
for i, data in enumerate(series):
bars.append(ax.bar(months + half_div + i * width, data, width,
color=colors[i]))
ax.legend(bars, names)
def create_bargraph(output, months, years, *prec):
prepare(prec, months, years)
plt.savefig(output)
def sum_by_month(data, months):
time.sleep(1)
return [sum(data[i]) for i in months]
__VERSION__ = "1.1.0"
if __name__ == "__main__":
import sys
filename = sys.argv[1]
month = sys.argv[2]
data = read(filename)
print(";".join(map(str, data[int(month)])))
| mit |
sk413025/tilitools | latentsvdd.py | 1 | 3222 | from cvxopt import matrix,spmatrix,sparse,uniform,normal,setseed
from cvxopt.blas import dot,dotu
from cvxopt.solvers import qp
from cvxopt.lapack import syev
import numpy as np
import math as math
from kernel import Kernel
from svdd import SVDD
from ocsvm import OCSVM
import pylab as pl
import matplotlib.pyplot as plt
class LatentSVDD:
""" Latent variable support vector data description.
Written by Nico Goernitz, TU Berlin, 2014
For more information see:
'Learning and Evaluation with non-i.i.d Label Noise'
Goernitz et al., AISTATS & JMLR W&CP, 2014
"""
PRECISION = 10**-3 # important: effects the threshold, support vectors and speed!
C = 1.0 # (scalar) the regularization constant > 0
sobj = [] # structured object contains various functions
# i.e. get_num_dims(), get_num_samples(), get_sample(i), argmin(sol,i)
sol = [] # (vector) solution vector (after training, of course)
def __init__(self, sobj, C=1.0):
self.C = C
self.sobj = sobj
def train_dc(self, max_iter=50):
""" Solve the LatentSVDD optimization problem with a
sequential convex programming/DC-programming
approach:
Iteratively, find the most likely configuration of
the latent variables and then, optimize for the
model parameter using fixed latent states.
"""
N = self.sobj.get_num_samples()
DIMS = self.sobj.get_num_dims()
# intermediate solutions
# latent variables
latent = [0]*N
sol = 10.0*normal(DIMS,1)
psi = matrix(0.0, (DIMS,N)) # (dim x exm)
old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
threshold = 0
obj = -1
iter = 0
# terminate if objective function value doesn't change much
while iter<max_iter and (iter<2 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
print('Starting iteration {0}.'.format(iter))
print(sum(sum(abs(np.array(psi-old_psi)))))
iter += 1
old_psi = matrix(psi)
# 1. linearize
# for the current solution compute the
# most likely latent variable configuration
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i, opt_type='quadratic')
# 2. solve the intermediate convex optimization problem
kernel = Kernel.get_kernel(psi,psi)
svdd = SVDD(kernel, self.C)
svdd.train_dual()
threshold = svdd.get_threshold()
inds = svdd.get_support_dual()
alphas = svdd.get_support_dual_values()
sol = psi[:,inds]*alphas
self.sol = sol
self.latent = latent
return (sol, latent, threshold)
def apply(self, pred_sobj):
""" Application of the LatentSVDD:
anomaly_score = min_z ||c*-\Psi(x,z)||^2
latent_state = argmin_z ||c*-\Psi(x,z)||^2
"""
N = pred_sobj.get_num_samples()
norm2 = self.sol.trans()*self.sol
vals = matrix(0.0, (1,N))
lats = matrix(0.0, (1,N))
for i in range(N):
# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
# Hence => ||sol||^2 - max_z 2<sol,Psi(x,z)> - ||Psi(x,z)||^2
(max_obj, lats[i], foo) = pred_sobj.argmax(self.sol, i, opt_type='quadratic')
vals[i] = norm2 - max_obj
return (vals, lats)
| mit |
hlin117/scikit-learn | examples/ensemble/plot_forest_iris.py | 18 | 6190 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
plt.scatter(X[:, 0], X[:, 1], c=y,
cmap=ListedColormap(['r', 'y', 'b']))
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
bioinformatics-IBCH/logloss-beraf | logloss_beraf/model_ops/trainer.py | 1 | 12714 | # coding=utf-8
import copy
import logging
import os
# https://github.com/matplotlib/matplotlib/issues/3466/#issuecomment-195899517
import itertools
import matplotlib
matplotlib.use('agg')
import numpy as np
import pandas
from sklearn import (
preprocessing,
model_selection,
)
from sklearn.cross_validation import (
LeaveOneOut,
StratifiedKFold,
)
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import RandomizedLogisticRegression
import cPickle as pickle
from utils.constants import (
PREFILTER_PCA_PLOT_NAME,
POSTFILTER_PCA_PLOT_NAME,
FEATURE_IMPORTANCE_PLOT_NAME,
FEATURE_COLUMN,
FEATURE_IMPORTANCE_COLUMN,
TRAINED_MODEL_NAME,
)
from visualization.plotting import plot_pca_by_annotation
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
from settings import logger
class LLBModelTrainer(object):
"""
Class implementing main steps of the algorithm:
1. Initial regions filtering with a user-specified delta beta-values threshold
2. Applying randomized logistic regression in order to additionally pre-filter input regions
3. Extracting highly correlated sites
4. Reconstructing logloss function on the interval of user specified limit of number of sites
5. Detecting optimal panel of regions and training final model
Also does some visualizations
"""
def __init__(self, threads=0, max_num_of_features=20,
cv_method="SKFold", class_weights="balanced", final_clf_estimators_num=3000,
intermediate_clf_estimators_num=1000, logloss_estimates=50, min_beta_threshold=0.2,
rr_iterations=5000, correlation_threshold=0.85, output_folder=None):
"""
:param threads:
:type threads: int
:param max_num_of_features: maximum number of features a model can contain
:type max_num_of_features: int
:param cv_method: Supported cross-validation methods: "LOO", "SKFold"
:type cv_method: str
:param class_weights: Class balancing strategy
:type class_weights: dict, str
:param final_clf_estimators_num: number of estimators used in a final classifier
:type final_clf_estimators_num: int
:param intermediate_clf_estimators_num: number of estimators used in intermediate classifiers
:type intermediate_clf_estimators_num: int
:param logloss_estimates: Number of LogLoss estimates on number of sites limited interval
:type logloss_estimates: int
:param min_beta_threshold: Minimum beta-values difference threshold
:type min_beta_threshold: float
:param rr_iterations: Number of randomized regression iterations
"""
self.threads = threads
self.max_num_of_features = max_num_of_features
self.min_beta_threshold = min_beta_threshold
# train process configuration
self.cv_method = cv_method
self.class_weights = class_weights
self.final_clf_estimators_num = final_clf_estimators_num
self.intermediate_clf_estimators_num = intermediate_clf_estimators_num
self.rr_iterations = rr_iterations
self.logloss_estimates = logloss_estimates
# common
self.correlation_threshold = correlation_threshold
self.output_folder = output_folder if output_folder is not None else "results"
if not os.path.exists(self.output_folder):
os.makedirs(self.output_folder)
def _run_randomized_regression(self, feature_df, annotation, clinical_column, sample_fraction=0.7):
annotation = copy.deepcopy(annotation)
# Encode labels of the classes
le = preprocessing.LabelEncoder()
annotation[clinical_column] = le.fit_transform(annotation[clinical_column])
clf = RandomizedLogisticRegression(
n_resampling=self.rr_iterations,
sample_fraction=sample_fraction,
n_jobs=1,
verbose=1,
).fit(feature_df, annotation[clinical_column])
selected_features = feature_df.T[clf.scores_ != 0].index
logger.info("Number of selected features: %d", len(selected_features))
return selected_features, clf
def _train_clf(self, X, y, n_estimators=10):
clf = RandomForestClassifier(n_estimators, n_jobs=self.threads, class_weight=self.class_weights)
scores = scores_accuracy = np.array([0])
cv_algo = None
if self.cv_method is not None:
if self.cv_method == "LOO":
cv_algo = LeaveOneOut(len(y))
elif self.cv_method == "SKFold":
cv_algo = StratifiedKFold(y)
logger.info("Running cross-validation...")
scores = model_selection.cross_val_score(
clf,
X,
y,
cv=cv_algo,
scoring='neg_log_loss',
n_jobs=self.threads,
verbose=1,
)
clf.fit(X, y)
return clf, scores.mean(), scores.std()
def _describe_and_filter_regions(self, basic_region_df, annotation, clinical_column, sample_name_column):
logger.info("Initial number of regions: {0}".format(basic_region_df.shape))
# Initial filtering based on min_beta_threshold
class_combinations = itertools.combinations(annotation[clinical_column].unique(), 2)
for combination in class_combinations:
first_class_samples = annotation[annotation[clinical_column] == combination[0]][sample_name_column]
second_class_samples = annotation[annotation[clinical_column] == combination[1]][sample_name_column]
mean_difference = (basic_region_df.loc[first_class_samples].mean()
- basic_region_df.loc[second_class_samples].mean())
basic_region_df = basic_region_df[mean_difference[abs(mean_difference) > self.min_beta_threshold].index.tolist()]
basic_region_df = basic_region_df.dropna(how="any", axis=1)
logger.info("Number of features after initial filtration: {0}".format(basic_region_df.shape))
plot_pca_by_annotation(
basic_region_df,
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, PREFILTER_PCA_PLOT_NAME),
)
logger.info("Starting feature selection with RLR...")
selected_features, model = self._run_randomized_regression(
basic_region_df,
annotation,
clinical_column,
)
plot_pca_by_annotation(
basic_region_df[selected_features],
annotation,
clinical_column,
sample_name_column,
outfile=os.path.join(self.output_folder, POSTFILTER_PCA_PLOT_NAME),
)
return selected_features, model
def plot_fi_distribution(self, feature_importances):
ax = feature_importances[FEATURE_IMPORTANCE_COLUMN].hist()
ax.set_xlabel("Feature Importance")
ax.set_ylabel("Number of features")
fig = ax.get_figure()
fig.savefig(os.path.join(self.output_folder, FEATURE_IMPORTANCE_PLOT_NAME))
def _apply_feature_imp_thresh(self, features, feature_imp, thresh):
return [
feature[0] for feature in
zip(features.values, feature_imp)
if feature[1] > thresh
]
def get_threshold(self, logloss_df):
# Standard error
ll_se = logloss_df["mean"].std() / np.sqrt(len(logloss_df["mean"]))
# Restricting search to desired number of features.
logloss_df = logloss_df[logloss_df["len"] <= int(self.max_num_of_features)]
ll_max = logloss_df[logloss_df["mean"] == logloss_df["mean"].max()].iloc[0]
ll_interval = logloss_df[logloss_df["mean"] > (ll_max["mean"] - 0.5 * ll_se)]
res = ll_interval[ll_interval["len"] == ll_interval["len"].min()].iloc[0]
return res
def train(self, train_regions, anndf, sample_class_column, sample_name_column):
"""
Main functionality
:param train_regions: input dataframe with all regions methylation
:type train_regions: pandas.DataFrame
:param anndf: annotation dataframe, containing at least sample name and sample class
:type anndf: pandas.DataFrame
:param sample_class_column: name of the sample class column
:type sample_class_column: str
:param sample_name_column: name of the sample name column
:type sample_name_column: str
:return:
"""
# train_regions = train_regions.T
# First sort both train_regions and annotation according to sample names
train_regions = train_regions.sort_index(ascending=True)
# Ensure annotation contains only samples from the train_regions
anndf = anndf[anndf[sample_name_column].isin(train_regions.index.tolist())].sort_values(
by=[sample_name_column],
ascending=True
).dropna(subset=[sample_name_column])
train_regions = train_regions.ix[anndf[sample_name_column].tolist()]
assert anndf[sample_name_column].tolist() == train_regions.index.tolist(), \
"Samples in the annotations table are diferrent from those in feature table"
# Prefilter regions
selected_regions, clf = self._describe_and_filter_regions(
train_regions,
anndf,
sample_class_column,
sample_name_column,
)
# Estimate feature importances (FI)
first_clf, mean, std = self._train_clf(
train_regions[selected_regions.values],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
feature_importances = pandas.DataFrame.from_records(
zip(selected_regions.values, first_clf.feature_importances_),
columns=[FEATURE_COLUMN, FEATURE_IMPORTANCE_COLUMN],
)
# Visualizing feature importance distribution
self.plot_fi_distribution(feature_importances)
# Extracting correlated site
feature_importances = feature_importances[
abs(feature_importances[FEATURE_IMPORTANCE_COLUMN]) > 0
]
corr_matrix = train_regions[feature_importances[FEATURE_COLUMN]].corr().applymap(
lambda x: 1 if abs(x) >= self.correlation_threshold else 0
)
logloss_df_cols = ["thresh", "mean", "std", "len"]
logloss_di = pandas.DataFrame(columns=logloss_df_cols)
for thresh in np.arange(
feature_importances[FEATURE_IMPORTANCE_COLUMN].quantile(0.99),
feature_importances[FEATURE_IMPORTANCE_COLUMN].max(),
(
feature_importances[FEATURE_IMPORTANCE_COLUMN].max() -
feature_importances[FEATURE_IMPORTANCE_COLUMN].min()
) / self.logloss_estimates
):
selected_features = self._apply_feature_imp_thresh(selected_regions, first_clf.feature_importances_, thresh)
if len(selected_features) < 2:
continue
logger.info(
"Estimating %d features on feature importance threshold %f",
len(selected_features),
thresh
)
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.intermediate_clf_estimators_num,
)
logloss_di = logloss_di.append(
pandas.Series([thresh, mean, std, len(selected_features)], index=logloss_df_cols),
ignore_index=True,
)
logger.info("LogLoss mean=%f, std=%f on threshold %f", mean, std, thresh)
logger.info("Detecting optimal feature subset...")
thresh = self.get_threshold(logloss_di)
logger.info("Selected threshold")
logger.info(thresh)
selected_features = self._apply_feature_imp_thresh(
selected_regions,
first_clf.feature_importances_,
thresh["thresh"],
)
logger.info("Trainig final model...")
clf, mean, std = self._train_clf(
train_regions[selected_features],
anndf[sample_class_column],
n_estimators=self.final_clf_estimators_num,
)
logger.info("Selected features: {0}".format(selected_features))
pickle.dump((clf, selected_features), open(os.path.join(self.output_folder, TRAINED_MODEL_NAME), 'w'))
return selected_features, clf, mean, std
| gpl-3.0 |
timqian/sms-tools | lectures/3-Fourier-properties/plots-code/zero-padding.py | 26 | 1083 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming
from scipy.fftpack import fft, fftshift
plt.figure(1, figsize=(9.5, 6))
M = 8
N1 = 8
N2 = 16
N3 = 32
x = np.cos(2*np.pi*2/M*np.arange(M)) * np.hanning(M)
plt.subplot(4,1,1)
plt.title('x, M=8')
plt.plot(np.arange(-M/2.0,M/2), x, 'b', marker='x', lw=1.5)
plt.axis([-M/2,M/2-1,-1,1])
mX = 20 * np.log10(np.abs(fftshift(fft(x, N1))))
plt.subplot(4,1,2)
plt.plot(np.arange(-N1/2.0,N1/2), mX, marker='x', color='r', lw=1.5)
plt.axis([-N1/2,N1/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX1, N=8')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N2))))
plt.subplot(4,1,3)
plt.plot(np.arange(-N2/2.0,N2/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N2/2,N2/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX2, N=16')
mX = 20 * np.log10(np.abs(fftshift(fft(x, N3))))
plt.subplot(4,1,4)
plt.plot(np.arange(-N3/2.0,N3/2),mX,marker='x',color='r', lw=1.5)
plt.axis([-N3/2,N3/2-1,-20,max(mX)+1])
plt.title('magnitude spectrum: mX3, N=32')
plt.tight_layout()
plt.savefig('zero-padding.png')
plt.show()
| agpl-3.0 |
proyan/sot-torque-control | python/dynamic_graph/sot/torque_control/identification/identify_motor_acc.py | 1 | 2771 | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 12 18:47:50 2017
@author: adelpret
"""
from scipy import signal
import numpy as np
from scipy import ndimage
import matplotlib.pyplot as plt
from identification_utils import solve1stOrderLeastSquare
def identify_motor_acc(dt, dq, ddq, current, tau, Kt_p, Kv_p, ZERO_VELOCITY_THRESHOLD_SMALL,
ZERO_JERK_THRESHOLD, SHOW_THRESHOLD_EFFECT):
#Filter current*****************************************************
win = signal.hann(10)
filtered_current = signal.convolve(current, win, mode='same') / sum(win)
current = filtered_current
# Mask valid data***************************************************
#~ # remove high jerk
dddq = np.gradient(ddq,1)/dt
maskConstAcc = (abs (dddq)<ZERO_JERK_THRESHOLD)
#~ # erode to get only steady phases where acceleration is constant
maskConstAcc=ndimage.morphology.binary_erosion(maskConstAcc,None,100)
maskPosVel=(dq> ZERO_VELOCITY_THRESHOLD_SMALL)
maskNegVel=(dq<-ZERO_VELOCITY_THRESHOLD_SMALL)
maskConstPosAcc=np.logical_and( maskConstAcc ,maskPosVel )
maskConstNegAcc=np.logical_and( maskConstAcc ,maskNegVel )
if SHOW_THRESHOLD_EFFECT :
plt.figure()
plt.plot(ddq); plt.ylabel('ddq')
ddq_const=ddq.copy()
ddq_const[np.logical_not(maskConstAcc)]=np.nan
plt.plot(ddq_const); plt.ylabel('ddq_const')
plt.show()
#~ y = a. x + b
#~ i-Kt.tau-Kv.dq = Ka.ddq + Kf
#~
# Identification ***************************************************
y = current-Kt_p*tau - Kv_p*dq
y[maskConstPosAcc] = current[maskConstPosAcc]-Kt_p*tau[maskConstPosAcc] - Kv_p*dq[maskConstPosAcc]
y[maskConstNegAcc] = current[maskConstNegAcc]-Kt_p*tau[maskConstNegAcc] - Kv_p*dq[maskConstNegAcc]
y_label = r'$i(t)-{K_t}{\tau(t)}-{K_v}{\dot{q}(t)}$'
x = ddq
x_label = r'$\ddot{q}(t)$'
(Kap,Kfp)=solve1stOrderLeastSquare(x[maskConstPosAcc],y[maskConstPosAcc])
(Kan,b)=solve1stOrderLeastSquare(x[maskConstNegAcc],y[maskConstNegAcc])
Kfn=-b
# Plot *************************************************************
plt.figure()
plt.axhline(0, color='black',lw=1)
plt.axvline(0, color='black',lw=1)
plt.plot(x ,y ,'.' ,lw=3,markersize=1,c='0.5');
plt.plot(x[maskConstPosAcc],y[maskConstPosAcc],'rx',lw=3,markersize=1);
plt.plot(x[maskConstNegAcc],y[maskConstNegAcc],'bx',lw=3,markersize=1);
#plot identified lin model
plt.plot([min(x),max(x)],[Kap*min(x)+Kfp ,Kap*max(x)+Kfp],'g:',lw=3)
plt.plot([min(x),max(x)],[Kan*min(x)-Kfn ,Kan*max(x)-Kfn],'g:',lw=3)
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.show()
return (Kap, Kan, Kfp, Kfn) | gpl-3.0 |
stylianos-kampakis/scikit-learn | examples/exercises/plot_cv_diabetes.py | 231 | 2527 | """
===============================================
Cross-validation on diabetes Dataset Exercise
===============================================
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the :ref:`cv_estimators_tut` part of the
:ref:`model_selection_tut` section of the :ref:`stat_learn_tut_index`.
"""
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import cross_validation, datasets, linear_model
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = linear_model.Lasso()
alphas = np.logspace(-4, -.5, 30)
scores = list()
scores_std = list()
for alpha in alphas:
lasso.alpha = alpha
this_scores = cross_validation.cross_val_score(lasso, X, y, n_jobs=1)
scores.append(np.mean(this_scores))
scores_std.append(np.std(this_scores))
plt.figure(figsize=(4, 3))
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
plt.semilogx(alphas, np.array(scores) + np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.semilogx(alphas, np.array(scores) - np.array(scores_std) / np.sqrt(len(X)),
'b--')
plt.ylabel('CV score')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
##############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = linear_model.LassoCV(alphas=alphas)
k_fold = cross_validation.KFold(len(X), 3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
| bsd-3-clause |
jviada/QuantEcon.py | quantecon/models/solow/impulse_response.py | 7 | 10840 | """
Classes for generating and plotting impulse response functions.
@author : David R. Pugh
@date : 2014-10-06
"""
from __future__ import division
from textwrap import dedent
import matplotlib.pyplot as plt
import numpy as np
class ImpulseResponse(object):
"""Base class representing an impulse response function for a Model."""
# number of points to use for "padding"
N = 10
# length of impulse response
T = 100
def __init__(self, model):
"""
Create an instance of the ImpulseResponse class.
Parameters
----------
model : model.Model
Instance of the model.Model class representing a Solow model.
"""
self.model = model
def __repr__(self):
"""Machine readable summary of a ImpulseResponse instance."""
return self.__str__()
def __str__(self):
"""Human readable summary of a ImpulseResponse instance."""
m = """
Impulse response function (IRF):
- N (number of points used for padding) : {N:d}
- T (length of the impulse response) : {T:d}
"""
formatted_str = dedent(m.format(N=self.N, T=self.T))
return formatted_str
@property
def _padding(self):
"""
Impulse response functions are "padded" for pretty plotting.
:getter: Return the current "padding" values.
:type: numpy.ndarray
"""
return np.hstack((self._padding_time, self._padding_variables))
@property
def _padding_scaling_factor(self):
"""
Scaling factor used in constructing the impulse response function
"padding".
:getter: Return the current scaling factor.
:type: numpy.ndarray
"""
# extract the relevant parameters
A0 = self.model.params['A0']
L0 = self.model.params['L0']
g = self.model.params['g']
n = self.model.params['n']
if self.kind == 'per_capita':
factor = A0 * np.exp(g * self._padding_time)
elif self.kind == 'levels':
factor = A0 * L0 * np.exp((g + n) * self._padding_time)
else:
factor = np.ones(self.N)
return factor.reshape((self.N, 1))
@property
def _padding_time(self):
"""
The independent variable, time, is "padded" using values from -N to -1.
:getter: Return the current "padding" values.
:type: numpy.ndarray
"""
return np.linspace(-self.N, -1, self.N).reshape((self.N, 1))
@property
def _padding_variables(self):
"""
Impulse response functions for endogenous variables are "padded" with
N periods of steady state values.
:getter: Return current "padding" values.
:kind: numpy.ndarray
"""
# economy is initial in steady state
k0 = self.model.steady_state
y0 = self.model.evaluate_intensive_output(k0)
c0 = self.model.evaluate_consumption(k0)
i0 = self.model.evaluate_actual_investment(k0)
intitial_condition = np.array([[k0, y0, c0, i0]])
return self._padding_scaling_factor * intitial_condition
@property
def _response(self):
"""
Response functions combined independent and endogenous variables.
:getter: Return the current response values.
:type: numpy.ndarray
"""
return np.hstack((self._response_time, self._response_variables))
@property
def _response_time(self):
"""
The independent variable, time, for the response ranges from 0 to T.
:getter: Return the current resonse time values.
:type: numpy.ndarray
"""
return np.linspace(0, self.T, self.T + 1).reshape((self.T + 1, 1))
@property
def _response_variables(self):
"""
Response of endogenous variables to exogenous impulse.
:getter: Return the current response.
:type: numpy.ndarray
"""
# economy is initial in steady state
k0 = self.model.steady_state
# apply the impulse...force validate params!
tmp_params = self.model.params.copy()
tmp_params.update(self.impulse)
self.model.params = tmp_params
# ...and generate the response
soln = self.model.ivp.solve(t0=0.0, y0=k0, h=1.0, T=self.T,
integrator='dop853')
# gather the results
k = soln[:, 1][:, np.newaxis]
y = self.model.evaluate_intensive_output(k)
c = self.model.evaluate_consumption(k)
i = self.model.evaluate_actual_investment(k)
return self._response_scaling_factor * np.hstack((k, y, c, i))
@property
def _response_scaling_factor(self):
"""
Scaling factor used in constructing the impulse response.
:getter: Return the current scaling factor.
:type: numpy.ndarray
"""
# extract the relevant parameters
A0 = self.model.params['A0']
L0 = self.model.params['L0']
g = self.model.params['g']
n = self.model.params['n']
if self.kind == 'per_capita':
factor = A0 * np.exp(g * self._response_time)
elif self.kind == 'levels':
factor = A0 * L0 * np.exp((g + n) * self._response_time)
else:
factor = np.ones(self.T + 1)
return factor.reshape((self.T + 1, 1))
@property
def impulse(self):
"""
Dictionary of new parameter values representing an impulse.
:getter: Return the current impulse dictionary.
:setter: Set a new impulse dictionary.
:type: dictionary
"""
return self._impulse
@property
def kind(self):
"""
The kind of impulse response function to generate. Must be one of:
'levels', 'per_capita', 'efficiency_units'.
:getter: Return the current kind of impulse responses.
:setter: Set a new value for the kind of impulse responses.
:type: str
"""
return self._kind
@property
def impulse_response(self):
"""
Impulse response functions generated by a shock to model parameter(s).
:getter: Return the current impulse response functions.
:type: numpy.ndarray
"""
orig_params = self.model.params.copy()
# create the irf
tmp_irf = np.vstack((self._padding, self._response))
# reset the model parameters
self.model.params = orig_params
return tmp_irf
@impulse.setter
def impulse(self, params):
"""Set a new impulse dictionary."""
self._impulse = self._validate_impulse(params)
@kind.setter
def kind(self, value):
"""Set a new value for the kind attribute."""
self._kind = self._validate_kind(value)
def _validate_impulse(self, params):
"""Validates the impulse attribute."""
if not isinstance(params, dict):
mesg = "ImpulseResponse.impulse must have type dict, not {}."
raise AttributeError(mesg.format(params.__class__))
elif not set(params.keys()) <= set(self.model.params.keys()):
mesg = "Invalid parameter included in the impulse dictionary."""
raise AttributeError(mesg)
else:
return params
@staticmethod
def _validate_kind(value):
"""Validates the kind attribute."""
valid_kinds = ['levels', 'per_capita', 'efficiency_units']
if value not in valid_kinds:
mesg = "The 'kind' attribute must be in {}."
raise AttributeError(mesg.format(valid_kinds))
else:
return value
def plot_impulse_response(self, ax, variable, log=False):
"""
Plot an impulse response function.
Parameters
----------
ax : `matplotlib.axes.AxesSubplot`
An instance of `matplotlib.axes.AxesSubplot`.
variable : str
Variable whose impulse response functions you wish to plot.
impulse : dict
Dictionary of new parameter values representing the impulse whose
model response you wish to plot.
kind : str (default='efficiency_units')
Whether you want impulse response functions in 'levels',
'per_capita', or 'efficiency_units'.
log : boolean (default=False)
Whether or not to have logarithmic scales on the vertical axes.
Useful when plotting impulse response functions with
kind='per_capita' or kind='levels'.
Returns
-------
A list containing:
irf_line : maplotlib.lines.Line2D
A Line2D object representing the impulse response for the requested
variable.
bgp_line : maplotlib.lines.Line2D
A Line2D object representing the pre-impulse balanced growth path
for the model.
"""
# create a mapping from variables to column indices
irf = self.impulse_response
irf_dict = {'capital': irf[:, [0, 1]],
'output': irf[:, [0, 2]],
'consumption': irf[:, [0, 3]],
'investment': irf[:, [0, 4]],
}
# create the plot
traj = irf_dict[variable]
irf_line = ax.plot(traj[:, 0], traj[:, 1])
# add the old balanced growth path
g = self.model.params['g']
n = self.model.params['n']
t = self.N + traj[:, 0]
if self.kind == 'per_capita':
bgp_line = ax.plot(traj[:, 0], traj[0, 1] * np.exp(g * t), 'k--',
label='Original BGP')
ax.set_ylabel(variable.title() + ' (per capita)', fontsize=15,
family='serif')
elif self.kind == 'levels':
bgp_line = ax.plot(traj[:, 0], traj[0, 1] * np.exp((g + n) * t),
'k--', label='Original BGP')
ax.set_ylabel(variable.title(), fontsize=15, family='serif')
else:
bgp_line = ax.axhline(traj[0, 1], linestyle='dashed', color='k',
label='Original BGP')
ax.set_ylabel(variable.title() + ' (per unit effective labor)',
fontsize=15, family='serif')
# format axes, labels, title, legend, etc
ax.set_xlabel('Time', fontsize=15, family='serif')
ax.set_ylim(0.95 * traj[:, 1].min(), 1.05 * traj[:, 1].max())
if log is True:
ax.set_yscale('log')
ax.set_title('Impulse response function', fontsize=20, family='serif')
ax.grid('on')
ax.legend(loc=0, frameon=False, bbox_to_anchor=(1.0, 1.0),
prop={'family': 'serif'})
return [irf_line, bgp_line]
| bsd-3-clause |
rbooth200/DiscEvolution | DiscEvolution/driver.py | 1 | 12630 | # driver.py
#
# Author: R. Booth
# Date: 17 - Nov - 2016
#
# Combined model for dust, gas and chemical evolution
################################################################################
from __future__ import print_function
import numpy as np
import os
from .photoevaporation import FixedExternalEvaporation
from .constants import yr
from . import io
class DiscEvolutionDriver(object):
"""Driver class for full evolution model.
Required Arguments:
disc : Disc model to update
Optional Physics update:
dust : Update the dust, i.e. radial drift
gas : Update due to gas effects, i.e. Viscous evolution
diffusion : Seperate diffusion update
internal_photo : Remove gas by internal photoevaporation
photoevaporation : Remove gas by external photoevaporation
chemistry : Solver for the chemical evolution
History:
history : Tracks values of key parameters over time
Note: Diffusion is usually handled in the dust dynamics module
Other options:
t0 : Starting time, default = 0, code units
t_out:Previous output times, default = None, years
"""
def __init__(self, disc, gas=None, dust=None, diffusion=None, chemistry=None, ext_photoevaporation=None, int_photoevaporation=None, history=None, t0=0.):
self._disc = disc
self._gas = gas
self._dust = dust
self._diffusion = diffusion
self._chemistry = chemistry
self._external_photo = ext_photoevaporation
self._internal_photo = int_photoevaporation
self._history = history
self._t = t0
self._nstep = 0
def __call__(self, tmax):
"""Evolve the disc for a single timestep
args:
dtmax : Upper limit to time-step
returns:
dt : Time step taken
"""
disc = self._disc
# Compute the maximum time-step
dt = tmax - self.t
if self._gas:
dt = min(dt, self._gas.max_timestep(self._disc))
if self._dust:
v_visc = self._gas.viscous_velocity(disc)
dt = min(dt, self._dust.max_timestep(self._disc, v_visc))
if self._dust._diffuse:
dt = min(dt, self._dust._diffuse.max_timestep(self._disc))
if self._diffusion:
dt = min(dt, self._diffusion.max_timestep(self._disc))
if self._external_photo and hasattr(self._external_photo,"_density"): # If we are using density to calculate mass loss rates, we need to limit the time step based on photoevaporation
(dM_dot, dM_gas) = self._external_photo.optically_thin_weighting(disc)
Dt = dM_gas[(dM_dot>0)] / dM_dot[(dM_dot>0)]
Dt_min = np.min(Dt)
dt = min(dt,Dt_min)
# Determine tracers for dust step
gas_chem, ice_chem = None, None
dust = None
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
# Do dust evolution
if self._dust:
self._dust(dt, disc,
gas_tracers=gas_chem,
dust_tracers=ice_chem, v_visc=v_visc)
# Determine tracers for gas steps
try:
gas_chem = disc.chem.gas.data
ice_chem = disc.chem.ice.data
except AttributeError:
pass
try:
dust = disc.dust_frac
except AttributeError:
pass
# Do Advection-diffusion update
if self._gas:
self._gas(dt, disc, [dust, gas_chem, ice_chem])
if self._diffusion:
if gas_chem is not None:
gas_chem[:] += dt * self._diffusion(disc, gas_chem)
if ice_chem is not None:
ice_chem[:] += dt * self._diffusion(disc, ice_chem)
if dust is not None:
dust[:] += dt * self._diffusion(disc, dust)
# Do external photoevaporation
if self._external_photo:
self._external_photo(disc, dt)
# Do internal photoevaporation
if self._internal_photo:
self._internal_photo(disc, dt/yr, self._external_photo)
# Pin the values to >= 0 and <=1:
disc.Sigma[:] = np.maximum(disc.Sigma, 0)
try:
disc.dust_frac[:] = np.maximum(disc.dust_frac, 0)
disc.dust_frac[:] /= np.maximum(disc.dust_frac.sum(0), 1.0)
except AttributeError:
pass
try:
disc.chem.gas.data[:] = np.maximum(disc.chem.gas.data, 0)
disc.chem.ice.data[:] = np.maximum(disc.chem.ice.data, 0)
except AttributeError:
pass
# Chemistry
if self._chemistry:
rho = disc.midplane_gas_density
eps = disc.dust_frac.sum(0)
grain_size = disc.grain_size[-1]
T = disc.T
self._chemistry.update(dt, T, rho, eps, disc.chem,
grain_size=grain_size)
# If we have dust, we should update it now the ice fraction has
# changed
disc.update_ices(disc.chem.ice)
# Now we should update the auxillary properties, do grain growth etc
disc.update(dt)
self._t += dt
self._nstep += 1
return dt
@property
def disc(self):
return self._disc
@property
def t(self):
return self._t
@property
def num_steps(self):
return self._nstep
@property
def gas(self):
return self._gas
@property
def dust(self):
return self._dust
@property
def diffusion(self):
return self._diffusion
@property
def chemistry(self):
return self._chemistry
@property
def photoevaporation_external(self):
return self._external_photo
@property
def photoevaporation_internal(self):
return self._internal_photo
@property
def history(self):
return self._history
def dump_ASCII(self, filename):
"""Write the current state to a file, including header information"""
# Put together a header containing information about the physics
# included
head = ''
if self._gas:
head += self._gas.ASCII_header() + '\n'
if self._dust:
head += self._dust.ASCII_header() + '\n'
if self._diffusion:
head += self._diffusion.ASCII_header() + '\n'
if self._chemistry:
head += self._chemistry.ASCII_header() + '\n'
if self._external_photo:
head += self._external_photo.ASCII_header() + '\n'
if self._internal_photo:
head += self._internal_photo.ASCII_header() + '\n'
# Write it all to disc
io.dump_ASCII(filename, self._disc, self.t, head)
def dump_hdf5(self, filename):
"""Write the current state in HDF5 format, with header information"""
headers = []
if self._gas: headers.append(self._gas.HDF5_attributes())
if self._dust: headers.append(self._dust.HDF5_attributes())
if self._diffusion: headers.append(self._diffusion.HDF5_attributes())
if self._chemistry: headers.append(self._chemistry.HDF5_attributes())
if self._external_photo: headers.append(self._external_photo.HDF5_attributes())
if self._internal_photo: headers.append(self._internal_photo.HDF5_attributes())
io.dump_hdf5(filename, self._disc, self.t, headers)
if __name__ == "__main__":
from .star import SimpleStar
from .grid import Grid
from .eos import IrradiatedEOS
from .viscous_evolution import ViscousEvolution
from .dust import DustGrowthTwoPop, SingleFluidDrift
from .opacity import Zhu2012, Tazzari2016
from .diffusion import TracerDiffusion
from .chemistry import TimeDepCOChemOberg, SimpleCOAtomAbund
from .constants import Msun, AU
from .disc_utils import mkdir_p
import matplotlib.pyplot as plt
alpha = 1e-3
Mdot = 1e-8
Rd = 100.
#kappa = Zhu2012
kappa = Tazzari2016()
N_cell = 250
R_in = 0.1
R_out = 500.
yr = 2*np.pi
output_dir = 'test_DiscEvo'
output_times = np.arange(0, 4) * 1e6 * yr
plot_times = np.array([0, 1e4, 1e5, 5e5, 1e6, 3e6])*yr
# Setup the initial conditions
Mdot *= (Msun / yr) / AU**2
grid = Grid(R_in, R_out, N_cell, spacing='natural')
star = SimpleStar(M=1, R=2.5, T_eff=4000.)
# Initial guess for Sigma:
R = grid.Rc
Sigma = (Mdot / (0.1 * alpha * R**2 * star.Omega_k(R))) * np.exp(-R/Rd)
# Iterate until constant Mdot
eos = IrradiatedEOS(star, alpha, kappa=kappa)
eos.set_grid(grid)
eos.update(0, Sigma)
for i in range(100):
Sigma = 0.5 * (Sigma + (Mdot / (3 * np.pi * eos.nu)) * np.exp(-R/Rd))
eos.update(0, Sigma)
# Create the disc object
disc = DustGrowthTwoPop(grid, star, eos, 0.01, Sigma=Sigma)
# Setup the chemistry
chemistry = TimeDepCOChemOberg(a=1e-5)
# Setup the dust-to-gas ratio from the chemistry
solar_abund = SimpleCOAtomAbund(N_cell)
solar_abund.set_solar_abundances()
# Iterate ice fractions to get the dust-to-gas ratio:
for i in range(10):
chem = chemistry.equilibrium_chem(disc.T,
disc.midplane_gas_density,
disc.dust_frac.sum(0),
solar_abund)
disc.initialize_dust_density(chem.ice.total_abund)
disc.chem = chem
# Setup the dynamics modules:
gas = ViscousEvolution()
dust = SingleFluidDrift(TracerDiffusion())
evo = DiscEvolutionDriver(disc, gas=gas, dust=dust, chemistry=chemistry)
# Setup the IO controller
IO = io.Event_Controller(save=output_times, plot=plot_times)
# Run the model!
while not IO.finished():
ti = IO.next_event_time()
while evo.t < ti:
dt = evo(ti)
if (evo.num_steps % 1000) == 0:
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / yr))
print('dt: {} yr'.format(dt / yr))
if IO.check_event(evo.t, 'save'):
from .disc_utils import mkdir_p
mkdir_p(output_dir)
snap_name = 'disc_{:04d}.dat'.format(IO.event_number('save'))
evo.dump_ASCII(os.path.join(output_dir, snap_name))
snap_name = 'disc_{:04d}.h5'.format(IO.event_number('save'))
evo.dump_hdf5(os.path.join(output_dir, snap_name))
if IO.check_event(evo.t, 'plot'):
err_state = np.seterr(all='warn')
print('Nstep: {}'.format(evo.num_steps))
print('Time: {} yr'.format(evo.t / (2 * np.pi)))
plt.subplot(321)
l, = plt.loglog(grid.Rc, evo.disc.Sigma_G)
plt.loglog(grid.Rc, evo.disc.Sigma_D.sum(0), '--', c=l.get_color())
plt.xlabel('$R$')
plt.ylabel('$\Sigma_\mathrm{G, D}$')
plt.subplot(322)
plt.loglog(grid.Rc, evo.disc.dust_frac.sum(0))
plt.xlabel('$R$')
plt.ylabel('$\epsilon$')
plt.subplot(323)
plt.loglog(grid.Rc, evo.disc.Stokes()[1])
plt.xlabel('$R$')
plt.ylabel('$St$')
plt.subplot(324)
plt.loglog(grid.Rc, evo.disc.grain_size[1])
plt.xlabel('$R$')
plt.ylabel('$a\,[\mathrm{cm}]$')
plt.subplot(325)
gCO = evo.disc.chem.gas.atomic_abundance()
sCO = evo.disc.chem.ice.atomic_abundance()
gCO.data[:] /= solar_abund.data
sCO.data[:] /= solar_abund.data
c = l.get_color()
plt.semilogx(grid.Rc, gCO['C'], '-', c=c, linewidth=1)
plt.semilogx(grid.Rc, gCO['O'], '-', c=c, linewidth=2)
plt.semilogx(grid.Rc, sCO['C'], ':', c=c, linewidth=1)
plt.semilogx(grid.Rc, sCO['O'], ':', c=c, linewidth=2)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[X]_\mathrm{solar}$')
plt.subplot(326)
plt.semilogx(grid.Rc, gCO['C'] / gCO['O'], '-', c=c)
plt.semilogx(grid.Rc, sCO['C'] / sCO['O'], ':', c=c)
plt.xlabel('$R\,[\mathrm{au}}$')
plt.ylabel('$[C/O]_\mathrm{solar}$')
np.seterr(**err_state)
IO.pop_events(evo.t)
if len(plot_times) > 0:
plt.show()
| gpl-3.0 |
mosbys/Clone | Cloning_v1/drive.py | 1 | 3838 | import argparse
import base64
import json
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from random import randint
from keras.models import model_from_json
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array
import cv2
# Fix error with Keras and TensorFlow
import tensorflow as tf
import matplotlib.pyplot as plt
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
iDebug = 0
def preprocess(image, top_offset=.375, bottom_offset=.125):
"""
Applies preprocessing pipeline to an image: crops `top_offset` and `bottom_offset`
portions of image, resizes to 32x128 px and scales pixel values to [0, 1].
"""
top = int(top_offset * image.shape[0])
bottom = int(bottom_offset * image.shape[0])
image = image[top:-bottom, :]
newShape = image.shape
image= cv2.resize(image,(int(newShape[1]/2), int(newShape[0]/2)), interpolation = cv2.INTER_CUBIC)
return image
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image_array = np.asarray(image)
image_array=preprocess(image_array)
newShape = image_array.shape
#image_array=cv2.resize(image_array,(newShape[1], newShape[0]),interpolation=cv2.INTER_CUBIC)
transformed_image_array = image_array[None, :, :, :]
if (iDebug==1):
plt.imshow(image_array)
plt.show()
#transformed_image_array2 = np.zeros([1,2*64,64,3])
#transformed_image_array2[0]=cv2.resize(transformed_image_array[0],(2*64, 64),interpolation=cv2.INTER_CUBIC)
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(transformed_image_array, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
#steering_angle = randint(0,100)/100*randint(-1,1);
throttle = 0.2
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
# NOTE: if you saved the file by calling json.dump(model.to_json(), ...)
# then you will have to call:
#
# model = model_from_json(json.loads(jfile.read()))\
#
# instead.
#model = model_from_json(jfile.read())
model = model_from_json(json.loads(jfile.read()))
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) | gpl-2.0 |
phoebe-project/phoebe2-docs | development/tutorials/general_concepts.py | 2 | 14093 | #!/usr/bin/env python
# coding: utf-8
# General Concepts: The PHOEBE Bundle
# ======================
#
# **HOW TO RUN THIS FILE**: if you're running this in a Jupyter notebook or Google Colab session, you can click on a cell and then shift+Enter to run the cell and automatically select the next cell. Alt+Enter will run a cell and create a new cell below it. Ctrl+Enter will run a cell but keep it selected. To restart from scratch, restart the kernel/runtime.
#
#
# All of these tutorials assume basic comfort with Python in general - particularly with the concepts of lists, dictionaries, and objects as well as basic comfort with using the numpy and matplotlib packages. This tutorial introduces all the general concepts of accessing parameters within the Bundle.
#
# Setup
# ----------------------------------------------
#
# Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
# In[1]:
#!pip install -I "phoebe>=2.3,<2.4"
# Let's get started with some basic imports:
# In[2]:
import phoebe
from phoebe import u # units
# If running in IPython notebooks, you may see a "ShimWarning" depending on the version of Jupyter you are using - this is safe to ignore.
#
# PHOEBE 2 uses constants defined in the IAU 2015 Resolution which conflict with the constants defined in astropy. As a result, you'll see the warnings as phoebe.u and phoebe.c "hijacks" the values in astropy.units and astropy.constants.
#
# Whenever providing units, please make sure to use `phoebe.u` instead of `astropy.units`, otherwise the conversions may be inconsistent.
# ### Logger
#
# Before starting any script, it is a good habit to initialize a logger and define which levels of information you want printed to the command line (clevel) and dumped to a file (flevel). A convenience function is provided at the top-level via [phoebe.logger](../api/phoebe.logger.md) to initialize the logger with any desired level.
#
# The levels from most to least information are:
#
# * DEBUG
# * INFO
# * WARNING
# * ERROR
# * CRITICAL
#
# In[3]:
logger = phoebe.logger(clevel='WARNING')
# All of these arguments are optional and will default to clevel='WARNING' if not provided. There is therefore no need to provide a filename if you don't provide a value for flevel.
#
# So with this logger, anything with INFO, WARNING, ERROR, or CRITICAL levels will be printed to the screen. All messages of any level will be written to a file named 'tutorial.log' in the current directory.
#
# Note: the logger messages are not included in the outputs shown below.
#
# ## Overview
#
# As a quick overview of what's to come, here is a quick preview of some of the steps used when modeling a binary system with PHOEBE. Each of these steps will be explained in more detail throughout these tutorials.
#
# First we need to create our binary system. For the sake of most of these tutorials, we'll use the default detached binary available through the [phoebe.default_binary](../api/phoebe.default_binary.md) constructor.
# In[4]:
b = phoebe.default_binary()
# This object holds all the parameters and their respective values. We'll see in this tutorial and the next tutorial on [constraints](constraints.ipynb) how to search through these parameters and set their values.
# In[5]:
b.set_value(qualifier='teff', component='primary', value=6500)
# Next, we need to define our datasets via [b.add_dataset](../api/phoebe.frontend.bundle.Bundle.add_dataset.md). This will be the topic of the following tutorial on [datasets](datasets.ipynb).
# In[6]:
b.add_dataset('lc', compute_times=phoebe.linspace(0,1,101))
# We'll then want to run our forward model to create a synthetic model of the observables defined by these datasets using [b.run_compute](../api/phoebe.frontend.bundle.Bundle.run_compute.md), which will be the topic of the [computing observables](compute.ipynb) tutorial.
# In[7]:
b.run_compute()
# We can access the value of any parameter, including the arrays in the synthetic model just generated. To export arrays to a file, we could call [b.export_arrays](../api/phoebe.parameters.ParameterSet.export_arrays.md)
# In[8]:
print(b.get_value(qualifier='fluxes', context='model'))
# We can then plot the resulting model with [b.plot](../api/phoebe.parameters.ParameterSet.plot.md), which will be covered in the [plotting](plotting.ipynb) tutorial.
# In[9]:
afig, mplfig = b.plot(show=True)
# And then lastly, if we wanted to solve the inverse problem and "fit" parameters to observational data, we may want to add [distributions](distributions.ipynb) to our system so that we can run [estimators, optimizers, or samplers](solver.ipynb).
# ## Default Binary Bundle
# For this tutorial, let's start over and discuss this `b` object in more detail and how to access and change the values of the input parameters.
#
# Everything for our system will be stored in this single Python object that we call the [Bundle](../api/phoebe.frontend.bundle.Bundle.md) which we'll call `b` (short for bundle).
# In[10]:
b = phoebe.default_binary()
# The Bundle is just a collection of [Parameter](../api/phoebe.parameters.Parameter.md) objects along with some callable methods. Here we can see that the default binary Bundle consists of over 100 individual parameters.
# In[11]:
b
# If we want to view or edit a Parameter in the Bundle, we first need to know how to access it. Each Parameter object has a number of tags which can be used to [filter](../api/phoebe.parameters.ParameterSet.filter.md) (similar to a database query). When filtering the Bundle, a [ParameterSet](../api/phoebe.parameters.ParameterSet.md) is returned - this is essentially just a subset of the Parameters in the Bundle and can be further filtered until eventually accessing a single Parameter.
# In[12]:
b.filter(context='compute')
# Here we filtered on the context tag for all Parameters with `context='compute'` (i.e. the options for computing a model). If we want to see all the available options for this tag in the Bundle, we can use the plural form of the tag as a property on the Bundle or any ParameterSet.
# In[13]:
b.contexts
# Although there is no strict hierarchy or order to the tags, it can be helpful to think of the context tag as the top-level tag and is often very helpful to filter by the appropriate context first.
#
# Other tags currently include:
# * kind
# * figure
# * component
# * feature
# * dataset
# * distribution
# * compute
# * model
# * solver
# * solution
# * time
# * qualifier
# Accessing the plural form of the tag as an attribute also works on a filtered ParameterSet
# In[14]:
b.filter(context='compute').components
# This then tells us what can be used to filter further.
# In[15]:
b.filter(context='compute').filter(component='primary')
# The qualifier tag is the shorthand name of the Parameter itself. If you don't know what you're looking for, it is often useful to list all the qualifiers of the Bundle or a given ParameterSet.
# In[16]:
b.filter(context='compute', component='primary').qualifiers
# Now that we know the options for the qualifier within this filter, we can choose to filter on one of those. Let's look filter by the 'ntriangles' qualifier.
# In[17]:
b.filter(context='compute', component='primary', qualifier='ntriangles')
# Once we filter far enough to get to a single Parameter, we can use [get_parameter](../api/phoebe.parameters.ParameterSet.get_parameter.md) to return the Parameter object itself (instead of a ParameterSet).
# In[18]:
b.filter(context='compute', component='primary', qualifier='ntriangles').get_parameter()
# As a shortcut, get_parameter also takes filtering keywords. So the above line is also equivalent to the following:
# In[19]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles')
# Each Parameter object contains several keys that provide information about that Parameter. The keys "description" and "value" are always included, with additional keys available depending on the type of Parameter.
# In[20]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_value()
# In[21]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_description()
# We can also see a top-level view of the filtered parameters and descriptions (note: the syntax with @ symbols will be explained further in the section on twigs below.
# In[22]:
print(b.filter(context='compute', component='primary').info)
# Since the Parameter for `ntriangles` is a FloatParameter, it also includes a key for the allowable limits.
# In[23]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').get_limits()
# In this case, we're looking at the Parameter called `ntriangles` with the component tag set to 'primary'. This Parameter therefore defines how many triangles should be created when creating the mesh for the star named 'primary'. By default, this is set to 1500 triangles, with allowable values above 100.
#
# If we wanted a finer mesh, we could change the value.
# In[24]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles').set_value(2000)
# In[25]:
b.get_parameter(context='compute', component='primary', qualifier='ntriangles')
# If we choose the `distortion_method` qualifier from that same ParameterSet, we'll see that it has a few different keys in addition to description and value.
# In[26]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method')
# In[27]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_value()
# In[28]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_description()
# Since the distortion_method Parameter is a [ChoiceParameter](../api/phoebe.parameters.ChoiceParameter.md), it contains a key for the allowable choices.
# In[29]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_choices()
# We can only set a value if it is contained within this list - if you attempt to set a non-valid value, an error will be raised.
# In[30]:
try:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').set_value('blah')
except Exception as e:
print(e)
# In[31]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').set_value('rotstar')
# In[32]:
b.get_parameter(context='compute', component='primary', qualifier='distortion_method').get_value()
# [Parameter](../api/phoebe.parameters.Parameter.md) types include:
# * [IntParameter](../api/phoebe.parameters.IntParameter.md)
# * [FloatParameter](../api/phoebe.parameters.FloatParameter.md)
# * [FloatArrayParameter](../api/phoebe.parameters.FloatArrayParameter.md)
# * [BoolParameter](../api/phoebe.parameters.BoolParameter.md)
# * [StringParameter](../api/phoebe.parameters.StringParameter.md)
# * [ChoiceParameter](../api/phoebe.parameters.ChoiceParameter.md)
# * [SelectParameter](../api/phoebe.parameters.SelectParameter.md)
# * [DictParameter](../api/phoebe.parameters.DictParameter.md)
# * [ConstraintParameter](../api/phoebe.parameters.ConstraintParameter.md)
# * [DistributionParameter](../api/phoebe.parameters.DistributionParameter.md)
# * [HierarchyParameter](../api/phoebe.parameters.HierarchyParameter.md)
# * [UnitParameter](../api/phoebe.parameters.UnitParameter.md)
# * [JobParameter](../api/phoebe.parameters.JobParameter.md)
#
# these Parameter types and their available options are all described in great detail in [Advanced: Parameter Types](parameters.ipynb)
# ### Twigs
# As a shortcut to needing to filter by all these tags, the Bundle and ParameterSets can be filtered through what we call "twigs" (as in a Bundle of twigs). These are essentially a single string-representation of the tags, separated by `@` symbols.
#
# This is very useful as a shorthand when working in an interactive Python console, but somewhat obfuscates the names of the tags and can make it difficult if you use them in a script and make changes earlier in the script.
#
# For example, the following lines give identical results:
# In[33]:
b.filter(context='compute', component='primary')
# In[34]:
b['primary@compute']
# In[35]:
b['compute@primary']
# However, this dictionary-style twig access will never return a ParameterSet with a single Parameter, instead it will return the Parameter itself. This can be seen in the different output between the following two lines:
# In[36]:
b.filter(context='compute', component='primary', qualifier='distortion_method')
# In[37]:
b['distortion_method@primary@compute']
# Because of this, this dictionary-style twig access can also set the value directly:
# In[38]:
b['distortion_method@primary@compute'] = 'roche'
# In[39]:
print(b['distortion_method@primary@compute'])
# And can even provide direct access to the keys/attributes of the Parameter (value, description, limits, etc)
# In[40]:
print(b['value@distortion_method@primary@compute'])
# In[41]:
print(b['description@distortion_method@primary@compute'])
# As with the tags, you can call .twigs on any ParameterSet to see the "smallest unique twigs" of the contained Parameters
# In[42]:
b['compute'].twigs
# Since the more verbose method without twigs is a bit clearer to read, most of the tutorials will show that syntax, but feel free to use twigs if they make more sense to you.
# Next
# ----------
#
# Next up: let's learn about [constraints](constraints.ipynb).
#
# Or look at any of the following advanced topics:
# * [Advanced: Parameter Types](parameters.ipynb)
# * [Advanced: Parameter Units](units.ipynb)
# * [Advanced: Building a System](building_a_system.ipynb)
# * [Advanced: Contact Binary Hierarchy](contact_binary_hierarchy.ipynb)
# * [Advanced: Saving, Loading, and Exporting](saving_and_loading.ipynb)
| gpl-3.0 |
snurkabill/pydeeplearn | code/lib/ann.py | 2 | 21874 | """ Implementation of a simple ANN. """
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import numpy as np
import theano
from theano import tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import matplotlib.pyplot as plt
theanoFloat = theano.config.floatX
"""In all the above topLayer does not mean the top most layer, but rather the
layer above the current one."""
# TODO: different activation function and try relu
# and fix this
from common import *
from debug import *
DEBUG = False
class MiniBatchTrainer(object):
# TODO: maybe creating the ring here might be better?
def __init__(self, input, nrLayers, initialWeights, initialBiases,
visibleDropout, hiddenDropout):
self.input = input
# Let's initialize the fields
# The weights and biases, make them shared variables
self.weights = []
self.biases = []
nrWeights = nrLayers - 1
for i in xrange(nrWeights):
w = theano.shared(value=np.asarray(initialWeights[i],
dtype=theanoFloat),
name='W')
self.weights.append(w)
b = theano.shared(value=np.asarray(initialBiases[i],
dtype=theanoFloat),
name='b')
self.biases.append(b)
# Set the parameters of the object
# Do not set more than this, these will be used for differentiation in the
# gradient
self.params = self.weights + self.biases
# Required for setting the norm constraint
# Note that only the hidden units have norm constraint
# The last layer (softmax) does not have it
self.hasNormConstraint = [True] * (nrWeights - 1) + [False] * (nrWeights + 1)
# Required for momentum
# The updates that were performed in the last batch
# It is important that the order in which
# we add the oldUpdates is the same as which we add the params
# TODO: add an assertion for this
self.oldUpdates = []
for i in xrange(nrWeights):
oldDw = theano.shared(value=np.zeros(shape=initialWeights[i].shape,
dtype=theanoFloat),
name='oldDw')
self.oldUpdates.append(oldDw)
for i in xrange(nrWeights):
oldDb = theano.shared(value=np.zeros(shape=initialBiases[i].shape,
dtype=theanoFloat),
name='oldDb')
self.oldUpdates.append(oldDb)
# Rmsprop
# The old mean that were performed in the last batch
self.oldMeanSquare = []
for i in xrange(nrWeights):
oldDw = theano.shared(value=np.zeros(shape=initialWeights[i].shape,
dtype=theanoFloat),
name='oldDw')
self.oldMeanSquare.append(oldDw)
for i in xrange(nrWeights):
oldDb = theano.shared(value=np.zeros(shape=initialBiases[i].shape,
dtype=theanoFloat),
name='oldDb')
self.oldMeanSquare.append(oldDb)
# Create a theano random number generator
# Required to sample units for dropout
# If it is not shared, does it update when we do the
# when we go to another function call?
self.theano_rng = RandomStreams(seed=np.random.randint(1, 1000))
# Sample from the visible layer
# Get the mask that is used for the visible units
dropout_mask = self.theano_rng.binomial(n=1, p=visibleDropout,
size=self.input.shape,
dtype=theanoFloat)
currentLayerValues = self.input * dropout_mask
for stage in xrange(nrWeights -1):
w = self.weights[stage]
b = self.biases[stage]
linearSum = T.dot(currentLayerValues, w) + b
# TODO: make this a function that you pass around
# it is important to make the classification activation functions outside
# Also check the Stamford paper again to what they did to average out
# the results with softmax and regression layers?
# Use hiddenDropout: give the next layer only some of the units
# from this layer
dropout_mask = self.theano_rng.binomial(n=1, p=hiddenDropout,
size=linearSum.shape,
dtype=theanoFloat)
currentLayerValues = dropout_mask * T.nnet.sigmoid(linearSum)
# Last layer operations
w = self.weights[nrWeights - 1]
b = self.biases[nrWeights - 1]
linearSum = T.dot(currentLayerValues, w) + b
# Do not use theano's softmax, it is numerically unstable
# and it causes Nans to appear
# Note that semantically this is the same
e_x = T.exp(linearSum - linearSum.max(axis=1, keepdims=True))
currentLayerValues = e_x / e_x.sum(axis=1, keepdims=True)
self.output = currentLayerValues
def cost(self, y):
return T.nnet.categorical_crossentropy(self.output, y)
""" Class that implements an artificial neural network."""
class ANN(object):
"""
Arguments:
nrLayers: the number of layers of the network. In case of discriminative
traning, also contains the classifcation layer
(the last softmax layer)
type: integer
layerSizes: the sizes of the individual layers.
type: list of integers of size nrLayers
"""
def __init__(self, nrLayers, layerSizes,
supervisedLearningRate=0.05,
nesterovMomentum=True,
rmsprop=True,
miniBatchSize=10,
hiddenDropout=0.5,
visibleDropout=0.8,
normConstraint=None):
self.nrLayers = nrLayers
self.layerSizes = layerSizes
assert len(layerSizes) == nrLayers
self.hiddenDropout = hiddenDropout
self.visibleDropout = visibleDropout
self.miniBatchSize = miniBatchSize
self.supervisedLearningRate = supervisedLearningRate
self.nesterovMomentum = nesterovMomentum
self.rmsprop = rmsprop
self.normConstraint = normConstraint
def initialize(self, data):
self.weights = [None] * (self.nrLayers - 1)
self.biases = [None] * (self.nrLayers - 1)
for i in xrange(self.nrLayers - 2):
self.weights[i] = np.asarray(np.random.normal(0, 0.01,
(self.layerSizes[i], self.layerSizes[i+1])),
dtype=theanoFloat)
self.biases[i] = np.zeros(shape=(self.layerSizes[i+1]),
dtype=theanoFloat)
lastLayerWeights = np.zeros(shape=(self.layerSizes[-2], self.layerSizes[-1]),
dtype=theanoFloat)
lastLayerBiases = np.zeros(shape=(self.layerSizes[-1]),
dtype=theanoFloat)
self.weights[-1] = lastLayerWeights
self.biases[-1] = lastLayerBiases
assert len(self.weights) == self.nrLayers - 1
assert len(self.biases) == self.nrLayers - 1
"""
Choose a percentage (percentValidation) of the data given to be
validation data, used for early stopping of the model.
"""
def train(self, data, labels, maxEpochs, validation=True, percentValidation=0.1):
if validation:
nrInstances = len(data)
validationIndices = np.random.choice(xrange(nrInstances),
percentValidation * nrInstances)
trainingIndices = list(set(xrange(nrInstances)) - set(validationIndices))
trainingData = data[trainingIndices, :]
trainingLabels = labels[trainingIndices, :]
validationData = data[validationIndices, :]
validationLabels = labels[validationIndices, :]
self.trainWithGivenValidationSet(trainingData, trainingLabels, validation,
validationData, validationLabels, maxEpochs)
else:
trainingData = data
trainingLabels = labels
self.trainNoValidation(trainingData, trainingLabels, maxEpochs)
def trainWithGivenValidationSet(self, data, labels,
validationData,
validationLabels,
maxEpochs):
sharedData = theano.shared(np.asarray(data, dtype=theanoFloat))
sharedLabels = theano.shared(np.asarray(labels, dtype=theanoFloat))
self.initialize(data)
self.nrMiniBatches = len(data) / self.miniBatchSize
sharedValidationData = theano.shared(np.asarray(validationData, dtype=theanoFloat))
sharedValidationLabels = theano.shared(np.asarray(validationLabels, dtype=theanoFloat))
# Does backprop for the data and a the end sets the weights
self.fineTune(sharedData, sharedLabels, validation,
sharedValidationData, sharedValidationLabels, maxEpochs)
# Get the classification weights
self.classifcationWeights = map(lambda x: x * self.hiddenDropout, self.weights)
self.classifcationBiases = self.biases
def trainNoValidation(self, data, labels, maxEpochs):
sharedData = theano.shared(np.asarray(data, dtype=theanoFloat))
sharedLabels = theano.shared(np.asarray(labels, dtype=theanoFloat))
self.initialize(data)
self.nrMiniBatches = len(data) / self.miniBatchSize
# Does backprop for the data and a the end sets the weights
self.fineTune(sharedData, sharedLabels, False, None, None, maxEpochs)
# Get the classification weights
self.classifcationWeights = map(lambda x: x * self.hiddenDropout, self.weights)
self.classifcationBiases = self.biases
"""Fine tunes the weigths and biases using backpropagation.
data and labels are shared
Arguments:
data: The data used for traning and fine tuning
data has to be a theano variable for it to work in the current version
labels: A numpy nd array. Each label should be transformed into a binary
base vector before passed into this function.
miniBatch: The number of instances to be used in a miniBatch
epochs: The number of epochs to use for fine tuning
"""
def fineTune(self, data, labels, validation, validationData, validationLabels,
maxEpochs):
print "supervisedLearningRate"
print self.supervisedLearningRate
batchLearningRate = self.supervisedLearningRate / self.miniBatchSize
batchLearningRate = np.float32(batchLearningRate)
# Let's build the symbolic graph which takes the data trough the network
# allocate symbolic variables for the data
# index of a mini-batch
miniBatchIndex = T.lscalar()
momentum = T.fscalar()
# The mini-batch data is a matrix
x = T.matrix('x', dtype=theanoFloat)
# labels[start:end] this needs to be a matrix because we output probabilities
y = T.matrix('y', dtype=theanoFloat)
batchTrainer = MiniBatchTrainer(input=x, nrLayers=self.nrLayers,
initialWeights=self.weights,
initialBiases=self.biases,
visibleDropout=0.8,
hiddenDropout=0.5)
# the error is the sum of the errors in the individual cases
error = T.sum(batchTrainer.cost(y))
if DEBUG:
mode = theano.compile.MonitorMode(post_func=detect_nan).excluding(
'local_elemwise_fusion', 'inplace')
else:
mode = None
if self.nesterovMomentum:
preDeltaUpdates, updates = self.buildUpdatesNesterov(batchTrainer, momentum,
batchLearningRate, error)
momentum_step = theano.function(
inputs=[momentum],
outputs=[],
updates=preDeltaUpdates,
mode = mode)
update_params = theano.function(
inputs =[miniBatchIndex, momentum],
outputs=error,
updates=updates,
givens={
x: data[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize],
y: labels[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize]},
mode=mode)
def trainModel(miniBatchIndex, momentum):
momentum_step(momentum)
return update_params(miniBatchIndex, momentum)
else:
updates = self.buildUpdatesSimpleMomentum(batchTrainer, momentum,
batchLearningRate, error)
trainModel = theano.function(
inputs=[miniBatchIndex, momentum],
outputs=error,
updates=updates,
givens={
x: data[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize],
y: labels[miniBatchIndex * self.miniBatchSize:(miniBatchIndex + 1) * self.miniBatchSize]})
theano.printing.pydotprint(trainModel)
if validation:
# Let's create the function that validates the model!
validateModel = theano.function(inputs=[],
outputs=batchTrainer.cost(y),
givens={x: validationData,
y: validationLabels})
self.trainLoopWithValidation(trainModel, validateModel, maxEpochs)
else:
if validationData is not None or validationLabels is not None:
raise Exception(("You provided validation data but requested a train method "
"that does not need validation"))
self.trainLoopModelFixedEpochs(batchTrainer, trainModel, maxEpochs)
# Set up the weights in the dbn object
for i in xrange(len(self.weights)):
self.weights[i] = batchTrainer.weights[i].get_value()
print self.weights
for i in xrange(len(self.biases)):
self.biases[i] = batchTrainer.biases[i].get_value()
print self.biases
def trainLoopModelFixedEpochs(self, batchTrainer, trainModel, maxEpochs):
for epoch in xrange(maxEpochs):
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
for i in xrange(self.nrLayers - 2):
assert np.all(np.linalg.norm(batchTrainer.weights[i].get_value(), axis=0) <= self.normConstraint + 1e-8)
print "number of epochs"
print epoch
def trainLoopWithValidation(self, trainModel, validateModel, maxEpochs):
lastValidationError = np.inf
count = 0
epoch = 0
validationErrors = []
while epoch < maxEpochs and count < 8:
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
meanValidation = np.mean(validateModel(), axis=0)
validationErrors += [meanValidation]
if meanValidation > lastValidationError:
count +=1
else:
count = 0
lastValidationError = meanValidation
epoch +=1
try:
plt.plot(validationErrors)
plt.show()
except e:
print "validation error plot not made"
print "number of epochs"
print epoch
# A very greedy approach to training
# Probably not the best idea but worth trying
# A more mild version would be to actually take 3 conescutive ones
# that give the best average (to ensure you are not in a luck place)
# and take the best of them
def trainModelGetBestWeights(self, trainModel, validateModel, maxEpochs):
bestValidationError = np.inf
validationErrors = []
bestWeights = None
bestBiases = None
for epoch in xrange(maxEpochs):
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
meanValidation = np.mean(validateModel(), axis=0)
validationErrors += [meanValidation]
if meanValidation < bestValidationError:
bestValidationError = meanValidation
# Save the weights which are the best ones
bestWeights = batchTrainer.weights
bestBiases = biases.biases
# If we have improved at all during training
if bestWeights is not None and bestBiases is not None:
batchTrainer.weights = bestWeights
batchTrainer.biases = bestBiases
try:
plt.plot(validationErrors)
plt.show()
except e:
print "validation error plot not made"
print "number of epochs"
print epoch
def trainModelPatience(self, trainModel, validateModel, maxEpochs):
bestValidationError = np.inf
epoch = 0
doneTraining = False
improvmentTreshold = 0.995
patience = 10 # do at least 10 passes trough the data no matter what
while (epoch < maxEpochs) and not doneTraining:
# Train the net with all data
print "epoch " + str(epoch)
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.01),
np.float32(0.99)))
for batchNr in xrange(self.nrMiniBatches):
trainModel(batchNr, momentum)
# why axis = 0? this should be a number?!
meanValidation = np.mean(validateModel, maxEpochs())
print 'meanValidation'
print meanValidation
if meanValidation < bestValidationError:
# If we have improved well enough, then increase the patience
if meanValidation < bestValidationError * improvmentTreshold:
print "increasing patience"
patience = max(patience, epoch * 2)
bestValidationError = meanValidation
if patience <= epoch:
doneTraining = True
epoch += 1
print "number of epochs"
print epoch
def buildUpdatesNesterov(self, batchTrainer, momentum,
batchLearningRate, error):
preDeltaUpdates = []
for param, oldUpdate in zip(batchTrainer.params, batchTrainer.oldUpdates):
preDeltaUpdates.append((param, param + momentum * oldUpdate))
# specify how to update the parameters of the model as a list of
# (variable, update expression) pairs
deltaParams = T.grad(error, batchTrainer.params)
updates = []
parametersTuples = zip(batchTrainer.params,
deltaParams,
batchTrainer.oldUpdates,
batchTrainer.oldMeanSquare,
batchTrainer.hasNormConstraint)
for param, delta, oldUpdate, oldMeanSquare, hasNormConstraint in parametersTuples:
if self.rmsprop:
meanSquare = 0.9 * oldMeanSquare + 0.1 * delta ** 2
paramUpdate = - batchLearningRate * delta / T.sqrt(meanSquare + 1e-8)
updates.append((oldMeanSquare, meanSquare))
else:
paramUpdate = - batchLearningRate * delta
newParam = param + paramUpdate
if self.normConstraint is not None and hasNormConstraint:
norms = SquaredElementWiseNorm(newParam)
rescaled = norms > self.normConstraint
factors = T.ones(norms.shape, dtype=theanoFloat) / T.sqrt(norms) * np.sqrt(self.normConstraint, dtype='float32') - 1.0
replaceNewParam = (factors * rescaled) * newParam
replaceNewParam += newParam
newParam = replaceNewParam
# paramUpdate = newParam - param
updates.append((param, newParam))
updates.append((oldUpdate, momentum * oldUpdate + paramUpdate))
return preDeltaUpdates, updates
def buildUpdatesSimpleMomentum(self, batchTrainer, momentum,
batchLearningRate, error):
deltaParams = T.grad(error, batchTrainer.params)
updates = []
parametersTuples = zip(batchTrainer.params,
deltaParams,
batchTrainer.oldUpdates,
batchTrainer.oldMeanSquare,
batchTrainer.hasNormConstraint)
for param, delta, oldUpdate, oldMeanSquare, hasNormConstraint in parametersTuples:
paramUpdate = momentum * oldUpdate
if self.rmsprop:
meanSquare = 0.9 * oldMeanSquare + 0.1 * delta ** 2
paramUpdate += - batchLearningRate * delta / T.sqrt(meanSquare + 1e-8)
updates.append((oldMeanSquare, meanSquare))
else:
paramUpdate += - batchLearningRate * delta
newParam = param + paramUpdate
if self.normConstraint is not None and hasNormConstraint:
norms = SquaredElementWiseNorm(newParam)
rescaled = norms > self.normConstraint
factors = T.ones(norms.shape, dtype=theanoFloat) / T.sqrt(norms) * np.sqrt(self.normConstraint, dtype='float32') - 1.0
replaceNewParam = (factors * rescaled) * newParam
replaceNewParam += newParam
newParam = replaceNewParam
# paramUpdate = newParam - param
updates.append((param, newParam))
updates.append((oldUpdate, paramUpdate))
return updates
def classify(self, dataInstaces):
dataInstacesConverted = np.asarray(dataInstaces, dtype=theanoFloat)
x = T.matrix('x', dtype=theanoFloat)
# Use the classification weights because now we have hiddenDropout
# Ensure that you have no hiddenDropout in classification
# TODO: are the variables still shared? or can we make a new one?
batchTrainer = MiniBatchTrainer(input=x, nrLayers=self.nrLayers,
initialWeights=self.classifcationWeights,
initialBiases=self.classifcationBiases,
visibleDropout=1,
hiddenDropout=1)
classify = theano.function(
inputs=[],
outputs=batchTrainer.output,
updates={},
givens={x: dataInstacesConverted})
lastLayers = classify()
return lastLayers, np.argmax(lastLayers, axis=1)
# Element wise norm of the columns of a matrix
def SquaredElementWiseNorm(x):
return T.sum(T.sqr(x), axis=0)
| bsd-3-clause |
soulmachine/scikit-learn | examples/cluster/plot_dbscan.py | 346 | 2479 | # -*- coding: utf-8 -*-
"""
===================================
Demo of DBSCAN clustering algorithm
===================================
Finds core samples of high density and expands clusters from them.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
from sklearn.preprocessing import StandardScaler
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=750, centers=centers, cluster_std=0.4,
random_state=0)
X = StandardScaler().fit_transform(X)
##############################################################################
# Compute DBSCAN
db = DBSCAN(eps=0.3, min_samples=10).fit(X)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels))
##############################################################################
# Plot result
import matplotlib.pyplot as plt
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = plt.cm.Spectral(np.linspace(0, 1, len(unique_labels)))
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = 'k'
class_member_mask = (labels == k)
xy = X[class_member_mask & core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
xy = X[class_member_mask & ~core_samples_mask]
plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
mmottahedi/neuralnilm_prototype | scripts/e351.py | 2 | 6885 | from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter
from lasagne.nonlinearities import sigmoid, rectify, tanh
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 5000
GRADIENT_STEPS = 100
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5] * 5,
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=512,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
one_target_per_seq=False,
n_seq_per_batch=16,
# subsample_target=2,
include_diff=False,
clip_appliance_power=True,
target_is_prediction=False,
# independently_center_inputs = True,
standardise_input=True,
unit_variance_targets=True,
# input_padding=8,
lag=0,
classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
N = 50
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: mse(x, t).mean(),
loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
updates_func=momentum,
learning_rate=1e-4,
learning_rate_changes_by_iteration={
# 200: 1e-2,
# 400: 1e-3,
# 800: 1e-4
# 500: 1e-3
# 4000: 1e-03,
# 6000: 5e-06,
# 7000: 1e-06
# 2000: 5e-06
# 3000: 1e-05
# 7000: 5e-06,
# 10000: 1e-06,
# 15000: 5e-07,
# 50000: 1e-07
},
do_save_activations=True,
auto_reshape=False,
plotter=CentralOutputPlotter
# plotter=MDNPlotter
)
"""
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
||||||||||
12345678901234567890
"""
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
N = 512
output_shape = source.output_shape_after_processing()
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': rectify,
'border_mode': 'same'
},
{
'type': FeaturePoolLayer,
'ds': 4, # number of feature maps to be pooled together
'axis': 2, # pool over the time axis
'pool_function': T.max
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': N,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 2,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': N // 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': output_shape[1] * output_shape[2],
'nonlinearity': sigmoid
}
]
net = Net(**net_dict_copy)
return net
def main():
# EXPERIMENTS = list('abcdefghijklmnopqrstuvwxyz')
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
raise
finally:
logging.shutdown()
if __name__ == "__main__":
main()
| mit |
molpopgen/fwdpy11 | examples/discrete_demography/localadaptation.py | 1 | 7832 | #
# Copyright (C) 2019 Kevin Thornton <krthornt@uci.edu>
#
# This file is part of fwdpy11.
#
# fwdpy11 is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# fwdpy11 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with fwdpy11. If not, see <http://www.gnu.org/licenses/>.
#
"""
Local adaptation of a quantitative trait to differing optima.
"""
import argparse
import math
import sys
from collections import namedtuple
import numpy as np
import pandas as pd
import fwdpy11
# Simulations with tree sequence recording need
# to know the max position in a genome. Here,
# we use a length of 1.0. Thus, all mutation
# and recombination events will be uniform
# random variables on the continuous interval
# [0, GENOME_LENGTH).
GENOME_LENGTH = 1.0
# When recording quant-genetic statistics during a simulation,
# we will use this type. Named tuples are extremely efficient,
# and they are easily converted into Pandas DataFrame objects,
# which is very convenient for analysis and output.
Datum = namedtuple("Data", ["generation", "deme", "gbar", "vg", "wbar"])
def make_parser():
"""
Create a command-line interface to the script.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
required = parser.add_argument_group("Required arguments")
required.add_argument("--popsize", "-N", type=int, help="Diploid population size")
required.add_argument(
"--mu", "-m", type=float, help="Mutation rate (per gamete, per generation)"
)
required.add_argument(
"--sigma",
"-s",
type=float,
help="Standard deviation of Gaussian" "distribution of mutational effects",
)
optional = parser.add_argument_group("Optional arguments")
optional.add_argument(
"--rho", type=float, default=1000.0, help="Scaled recombination rate, rho=4Nr"
)
optional.add_argument(
"--VS",
type=float,
default=10.0,
help="Inverse strength of stabilizing selection",
)
optional.add_argument(
"--opt", type=float, default=1.0, help="Value of new phenotypic optimum"
)
optional.add_argument(
"--migrates",
type=float,
nargs=2,
default=None,
help="Migration rates from 0 to 1 and 1 to 0, respectively.",
)
optional.add_argument(
"--time",
type=float,
default=0.1,
help="Amount of time to simulate past" "optimum shift, in units of N",
)
optional.add_argument(
"--plotfile", type=str, default=None, help="File name for plot"
)
optional.add_argument("--seed", type=int, default=42, help="Random number seed.")
return parser
def validate_arguments(args):
"""
Validate input arguments.
Note: this is likely incomplete.
"""
if args.popsize is None:
raise ValueError("popsize cannot be None")
if args.mu < 0:
raise ValueError("mu must be non-negative")
if args.mu is None:
raise ValueError("mu cannot be None")
if args.mu < 0 or math.isfinite(args.mu) is False:
raise ValueError("Mutation rate must be non-negative and finite")
if args.sigma is None:
raise ValueError("sigma cannot be none")
if args.sigma < 0 or math.isfinite(args.sigma) is False:
raise ValueError(
"Std. dev. of distribution of effect sizes"
"must be non-negative and finite"
)
if args.migrates is not None:
for m in args.migrates:
if m < 0 or m > 1:
raise ValueError("migration rates must be 0 <= m <= 1")
def make_migmatrix(migrates):
if migrates is None:
return None
mm = np.zeros(4).reshape(2, 2)
mm[0, 1] = migrates[1]
mm[1, 0] = migrates[0]
rs = np.sum(mm, axis=1)
np.fill_diagonal(mm, 1.0 - rs)
return fwdpy11.MigrationMatrix(mm)
class Recorder(object):
"""
fwdpy11 allows you to define objects that record data
from populations during simulation. Such objects must
be callable, and the easiest way to do things is to
create a class with a __call__ function.
"""
def __init__(self, start):
self.data = []
self.start = start
def __call__(self, pop, recorder):
if pop.generation >= self.start:
# Record mean trait value each generation.
md = np.array(pop.diploid_metadata, copy=False)
demes = np.unique(md["deme"])
for d in demes:
w = np.where(md["deme"] == d)[0]
gbar = md["g"][w].mean()
vg = md["g"][w].var()
wbar = md["w"][w].mean()
self.data.append(Datum(pop.generation, d, gbar, vg, wbar))
def plot_output(data, filename):
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
fig = plt.figure(figsize=(9, 3))
gs = gridspec.GridSpec(ncols=3, nrows=1, figure=fig)
ax_gbar = fig.add_subplot(gs[0, 0])
ax_vg = fig.add_subplot(gs[0, 1])
ax_wbar = fig.add_subplot(gs[0, 2])
df = pd.DataFrame(data, columns=Datum._fields)
g = df.groupby(["deme"])
for n, gi in g:
ax_gbar.plot(gi["generation"], gi["gbar"], label="Deme {}".format(n))
ax_vg.plot(gi["generation"], gi["vg"], label="Deme {}".format(n))
ax_wbar.plot(gi["generation"], gi["wbar"], label="Deme {}".format(n))
for ax in [ax_gbar, ax_vg, ax_wbar]:
ax.set_xlabel("Generation")
ax_gbar.set_ylabel(r"$\bar{g}$")
ax_vg.set_ylabel(r"$V(G)$")
ax_wbar.set_ylabel(r"$\bar{w}$")
ax_gbar.legend()
plt.tight_layout()
plt.savefig(filename)
def runsim(args):
"""
Run the simulation.
"""
pop = fwdpy11.DiploidPopulation(2 * args.popsize, GENOME_LENGTH)
np.random.seed(args.seed)
rng = fwdpy11.GSLrng(args.seed)
GSSmo0 = fwdpy11.GSSmo(
[
fwdpy11.Optimum(when=0, optimum=0.0, VS=args.VS),
fwdpy11.Optimum(when=10 * args.popsize, optimum=args.opt, VS=args.VS),
]
)
GSSmo1 = fwdpy11.GSSmo(
[
fwdpy11.Optimum(when=0, optimum=0.0, VS=args.VS),
fwdpy11.Optimum(
when=10 * args.popsize, optimum=-1.0 * args.opt, VS=args.VS
),
]
)
mm = make_migmatrix(args.migrates)
dd = fwdpy11.DiscreteDemography(
mass_migrations=[fwdpy11.move_individuals(0, 0, 1, 0.5)], migmatrix=mm
)
p = {
"nregions": [], # No neutral mutations -- add them later!
"gvalue": [fwdpy11.Additive(2.0, GSSmo0), fwdpy11.Additive(2.0, GSSmo1)],
"sregions": [fwdpy11.GaussianS(0, GENOME_LENGTH, 1, args.sigma)],
"recregions": [fwdpy11.Region(0, GENOME_LENGTH, 1)],
"rates": (0.0, args.mu, args.rho / float(4 * args.popsize)),
# Keep mutations at frequency 1 in the pop if they affect fitness.
"prune_selected": False,
"demography": dd,
"simlen": 10 * args.popsize + int(args.popsize * args.time),
}
params = fwdpy11.ModelParams(**p)
r = Recorder(10 * args.popsize)
fwdpy11.evolvets(rng, pop, params, 100, r, suppress_table_indexing=True)
if args.plotfile is not None:
plot_output(r.data, args.plotfile)
if __name__ == "__main__":
parser = make_parser()
args = parser.parse_args(sys.argv[1:])
validate_arguments(args)
runsim(args)
| gpl-3.0 |
OPU-Surveillance-System/monitoring | master/scripts/planner/solvers/test_penalization_plot.py | 1 | 1040 | import matplotlib.pyplot as plt
with open("test_pen", "r") as f:
data = f.read()
data = data.split("\n")[:-1]
data = [data[i].split(" ") for i in range(0, len(data))]
pen = [float(data[i][0]) for i in range(len(data))]
u = [float(data[i][1]) for i in range(len(data))]
d = [float(data[i][2]) for i in range(len(data))]
gain = [((d[i-1] - d[i])) / (u[i] - u[i - 1]) for i in range(1, len(data))]
gain = [gain[0]] + gain
print(u, d, gain)
fig, ax1 = plt.subplots()
pu, = ax1.plot(pen, u, color="r", label="Uncertainty rate")
ax1.scatter(pen, u, color="k")
#ax1.axhline(9000, color="r", linestyle="--")
#ax1.set_title("Cost evolution according to the number of iterations")
ax1.set_xlabel("Penalization coefficient")
ax1.set_ylabel("Uncertainty rate")
ax2 = ax1.twinx()
pd, = ax2.plot(pen, d, color="b", linestyle="--", label="Distance")
ax2.scatter(pen, d, color="k")
ax2.set_ylabel("Distance")
#ax2.axhline(0.99, color="b", linestyle="--")
#plt.axvline(4000000, color="k",linestyle = ":")
plt.legend(handles=[pu, pd], loc=7)
plt.show()
| mit |
NDManh/numbbo | code-postprocessing/bbob_pproc/pptex.py | 4 | 14442 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Routines for writing TeX for tables."""
from __future__ import absolute_import
import os
import sys
import string
import numpy
from . import toolsstats
from pdb import set_trace
#GLOBAL VARIABLES DEFINITION
alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
#conversion of matplotlib elements to LaTeX
latex_marker_map = {'o': r'$\circ$',
'd': r'$\diamondsuit$',
's': r'$\Box$',
'v': r'$\triangledown$',
'*': r'$\star$',
'h': r'$\varhexagon$', # need \usepackage{wasysymb}
'^': r'$\triangle$',
'p': r'$\pentagon$', # need \usepackage{wasysymb}
'H': r'$\hexagon$', # need \usepackage{wasysymb}
'<': r'$\triangleleft$',
'D': r'$\Diamond$',
'>': r'$\triangleright$',
'1': r'$\downY$', # need \usepackage{MnSymbol}
'2': r'$\upY$', # need \usepackage{MnSymbol}
'3': r'$\rightY$', # need \usepackage{MnSymbol}
'4': r'$\leftY$'} # need \usepackage{MnSymbol}
html_marker_map = {
'o': r'○',
'd': r'♢',
's': r'◻',
'v': r'▽',
'*': r'☆',
'h': r'varhexagon',
'^': r'△',
'p': r'pentagon',
'H': r'hexagon',
'<': r'◁',
'D': r'◇',
'>': r'▷',
'1': r'downY',
'2': r'upY',
'3': r'rightY',
'4': r'leftY'}
latex_color_map_old = {
'g': 'green!45!black',
'r': 'red',
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'k': 'black',
'b': 'blue'}
latex_color_map = {
'c': 'cyan',
'm': 'magenta',
'y': 'yellow',
'b': 'blue',
'g': 'green',
'#000080': 'NavyBlue',
'r': 'red',
'#ffd700': 'Goldenrod',
'#d02090': 'VioletRed',
'k': 'Black',
'#6495ed': 'CornflowerBlue',
'#ff4500': 'OrangeRed',
'#ffff00': 'Yellow',
'#ff00ff': 'Magenta',
'#bebebe': 'Gray',
'#87ceeb': 'SkyBlue',
'#ffa500': 'Orange',
'#ffc0cb': 'Lavender',
'#4169e1': 'RoyalBlue',
'#228b22': 'ForestGreen',
'#32cd32': 'LimeGreen',
'#9acd32': 'YellowGreen',
'#adff2f': 'GreenYellow'}
#CLASS DEFINITION
class Error(Exception):
""" Base class for errors. """
pass
class WrongInputSizeError(Error):
"""Error if an array has the wrong size for the following operation.
:returns: message containing the size of the array and the required
size.
"""
def __init__(self,arrName, arrSize, reqSize):
self.arrName = arrName
self.arrSize = arrSize
self.reqSize = reqSize
def __str__(self):
message = 'The size of %s is %s. One dimension must be of length %s!' %\
(self.arrName,str(self.arrSize), str(self.reqSize))
return repr(message)
#TOP LEVEL METHODS
def color_to_latex(color):
try:
res = '\color{%s}' % latex_color_map[color]
except KeyError, err:
try:
float(color)
res = '\color[gray]{%s}' % color
except ValueError:
raise err
return res
def marker_to_latex(marker):
return latex_marker_map[marker]
def marker_to_html(marker):
return html_marker_map[marker]
def numtotext(n):
"""Returns a text from a positive integer.
Is to be used for generating command names: they cannot include number
characters.
WARNING: n should not be larger than (53*52)-1 = 2755 for the moment
"""
if n < 52:
str = alphabet[n]
elif n < 53*52:
str = alphabet[(n-52)//52] + alphabet[n-n//52*52]
else:
raise Exception('Cannot handle a number of algorithms that large.')
return str
def writeLabels(label):
"""Format text to be output by LaTeX."""
return label.replace('_', r'\_')
def writeFEvals(fevals, precision='.2'):
"""Returns string representation of a number of function evaluations."""
if numpy.isinf(fevals):
return r'$\infty$'
tmp = (('%' + precision + 'g') % fevals)
res = tmp.split('e')
if len(res) > 1:
res[1] = '%d' % int(res[1])
res = '%s' % 'e'.join(res)
pr2 = str(float(precision) + .2)
#res2 = (('%' + pr2 + 'g') % fevals)
res2 = (('%' + pr2 + 'g') % float(tmp))
# To have the same number of significant digits.
if len(res) >= len(res2):
res = res2
else:
res = res[0]
return res
def writeFEvals2(fevals, precision=2, maxdigits=None, isscientific=False):
"""Returns string representation of a number of function evaluations.
This method is supposed to be used for filling up a LaTeX tabular.
To address the eventual need to keep their string representation
short, the method here proposes the shortest representation between
the full representation and a modified scientific representation.
:param float fevals:
:param int precision: number of significant digits
:param int maxdigits:
:param bool isscientific:
Examples:
====== ========= =====================
Number Precision Output Representation
====== ========= =====================
102345 2 digits 1.0e5
====== ========= =====================
"""
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
if maxdigits is None:
precision = int(precision)
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
res = (('%.' + str(precision-1) + 'e') % fevals)
repr1 = res
tmp = repr1.split('e')
tmp[1] = '%d' % int(tmp[1]) # Drop the eventual plus sign and trailing zero
repr1 = 'e'.join(tmp)
repr2 = (('%.' + str(precision+1) + 'f') % float(res)).rstrip('0').rstrip('.')
#set_trace()
if len(repr1) > len(repr2) and not isscientific:
return repr2
return repr1
else:
# takes precedence, in this case we expect a positive integer
if not isinstance(fevals, int):
return '%d' % fevals
repr2 = '%.0f' % fevals
if len(repr2) > maxdigits:
precision = maxdigits - 4
# 1) one symbol for the most significant digit
# 2) one for the dot, 3) one for the e, 4) one for the exponent
if numpy.log10(fevals) > 10:
precision -= 1
if precision < 0:
precision = 0
repr1 = (('%.' + str(precision) + 'e') % fevals).split('e')
repr1[1] = '%d' % int(repr1[1]) # drop the sign and trailing zero
repr1 = 'e'.join(repr1)
return repr1
return repr2
def writeFEvalsMaxSymbols(fevals, maxsymbols, isscientific=False):
"""Return the smallest string representation of a number.
This method is only concerned with the maximum number of significant
digits.
Two alternatives:
1) modified scientific notation (without the trailing + and zero in
the exponent)
2) float notation
:returns: string representation of a number of function evaluations
or ERT.
"""
#Compared to writeFEvals2?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(fevals)
if numpy.isinf(fevals):
return r'$\infty$'
#repr1 is the alternative scientific notation
#repr2 is the full notation but with a number of significant digits given
#by the variable precision.
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
repr1 = (('%.' + str(maxsymbols) + 'e') % fevals)
size1 = len(repr1)
tmp = repr1.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
remainingsymbols = max(maxsymbols - len(tmp2) - 2, 0)
tmp[0] = (('%.' + str(remainingsymbols) + 'f') % float(tmp[0]))
repr1 = 'e'.join(tmp)
#len(repr1) <= maxsymbols is not always the case but should be most usual
tmp = '%.0f' % fevals
remainingsymbols = max(maxsymbols - len(tmp), 0)
repr2 = (('%.' + str(remainingsymbols) + 'f') % fevals)
tmp = repr2.split('.', 1)
if len(tmp) > 1:
tmp[-1] = tmp[-1].rstrip('0')
repr2 = '.'.join(tmp)
repr2 = repr2.rstrip('.')
#set_trace()
if len(repr1)-repr1.count('.') < len(repr2)-repr2.count('.') or isscientific:
return repr1
#tmp1 = '%4.0f' % bestalgdata[-1]
#tmp2 = ('%2.2g' % bestalgdata[-1]).split('e', 1)
#if len(tmp2) > 1:
# tmp2[-1] = tmp2[-1].lstrip('+0')
# tmp2 = 'e'.join(tmp2)
# tmp = tmp1
# if len(tmp1) >= len(tmp2):
# tmp = tmp2
# curline.append(r'\multicolumn{2}{c|}{%s}' % tmp)
return repr2
def writeFEvalsMaxPrec(entry, SIG, maxfloatrepr=1e5):
"""Return a string representation of a number.
Two alternatives:
1) float notation with a precision smaller or equal to SIG (if the
entry is one, then the result is 1).
2) if the number is larger or equal to maxfloatrepr, a modified
scientific notation (without the trailing + and zero in the
exponent)
:returns: string representation of a number of function evaluations
or ERT.
"""
#CAVE: what if entry is smaller than 10**(-SIG)?
#Printf:
# %[flags][width][.precision][length]specifier
assert not numpy.isnan(entry)
if numpy.isinf(entry):
return r'$\infty$'
if entry == 1.:
res = '1'
elif entry < maxfloatrepr:
# the full notation but with given maximum precision
corr = 1 if abs(entry) < 1 else 0
tmp = '%.0f' % entry
remainingsymbols = max(SIG - len(tmp) + corr, 0)
res = (('%.' + str(remainingsymbols) + 'f') % entry)
else:
# modified scientific notation:
#smallest representation of the decimal part
#drop + and starting zeros of the exponent part
res = (('%.' + str(max([0, SIG - 1])) + 'e') % entry)
size1 = len(res)
tmp = res.split('e', 1)
tmp2 = tmp[-1].lstrip('+-0')
if float(tmp[-1]) < 0:
tmp2 = '-' + tmp2
tmp[-1] = tmp2
if len(tmp) > 1 and tmp[-1]:
res = 'e'.join(tmp)
else:
res = tmp[0]
return res
def tableLaTeX(table, spec, extraeol=()):
"""Generates a tabular from a sequence of sequence of strings.
:param seq table: sequence of sequence of strings
:param string spec: string for table specification, see
http://en.wikibooks.org/wiki/LaTeX/Tables#The_tabular_environment
:param seq extraeol: sequence of string the same length as the table
(same number of lines) which are added at the
end of each line.
:returns: sequence of strings of a LaTeX tabular.
"""
if not extraeol:
extraeol = len(table) * ['']
# TODO: check that spec and extraeol have the right format?
res = [r'\begin{tabular}{%s}' % spec]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\' + extraeol[i]
# curline = ' & '.join(line) + r'\\\hline' + extraeol[i]
res.append(curline)
res.append(' & '.join(table[-1]) + extraeol[-1])
res.append(r'\end{tabular}')
res = '\n'.join(res)
return res
def tableXLaTeX(table, spec, extraeol=()):
"""Generates a tabular from a sequence of sequence of strings.
:param seq table: sequence of sequence of strings
:param string spec: string for table specification, see
http://en.wikibooks.org/wiki/LaTeX/Tables#The_tabular_environment
:param seq extraeol: sequence of string the same length as the table
(same number of lines) which are added at the
end of each line.
:returns: sequence of strings of a LaTeX tabular.
"""
if not extraeol:
extraeol = len(table) * ['']
# TODO: check that spec and extraeol have the right format?
if 1 < 3:
res = [r'\begin{tabularx}{1.0\textwidth}{%s}' % spec]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\' + extraeol[i]
res.append(curline)
else: # format with hline, when is it needed, for non-paper tables?
res = [r'\begin{tabularx}{1.3\textwidth}{%s}' % spec]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\\hline' + extraeol[i]
res.append(curline)
res.append(' & '.join(table[-1]) + extraeol[-1])
res.append(r'\end{tabularx}')
res = '\n'.join(res)
return res
def tableLaTeXStar(table, width, spec, extraeol=()):
"""Generates a tabular\* from a sequence of sequence of strings
:param seq table: sequence of sequence of strings
:param string width: string for the width of the table
:param strin spec: string for table specification, see
http://en.wikibooks.org/wiki/LaTeX/Tables#The_tabular_environment
:param seq extraeol: sequence of string the same length as the table
(same number of lines) which are added at the
end of each line.
"""
if not extraeol:
extraeol = len(table) * ['']
# TODO: check that spec and extraeol have the right format?
res = [r'\begin{tabular*}{%s}{%s}' % (width, spec)]
for i, line in enumerate(table[:-1]):
curline = ' & '.join(line) + r'\\' + extraeol[i]
res.append(curline)
res.append(' & '.join(table[-1]) + extraeol[-1])
res.append(r'\end{tabular*}')
res = '\n'.join(res)
return res
class DataTable(list):
pass
| bsd-3-clause |
pbrod/scipy | scipy/special/basic.py | 3 | 70421 | #
# Author: Travis Oliphant, 2002
#
from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
import math
from scipy._lib.six import xrange
from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
imag, sqrt, where, mgrid, sin, place, issubdtype,
extract, less, inexact, nan, zeros, sinc)
from . import _ufuncs as ufuncs
from ._ufuncs import (ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma,
psi, _zeta, hankel1, hankel2, yv, kv, ndtri,
poch, binom, hyp0f1)
from . import specfun
from . import orthogonal
from ._comb import _comb_int
__all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros',
'ber_zeros', 'bernoulli', 'berp_zeros', 'bessel_diff_formula',
'bi_zeros', 'clpmn', 'comb', 'digamma', 'diric', 'ellipk',
'erf_zeros', 'erfcinv', 'erfinv', 'euler', 'factorial',
'factorialk', 'factorial2', 'fresnel_zeros',
'fresnelc_zeros', 'fresnels_zeros', 'gamma', 'h1vp',
'h2vp', 'hankel1', 'hankel2', 'hyp0f1', 'iv', 'ivp', 'jn_zeros',
'jnjnp_zeros', 'jnp_zeros', 'jnyn_zeros', 'jv', 'jvp', 'kei_zeros',
'keip_zeros', 'kelvin_zeros', 'ker_zeros', 'kerp_zeros', 'kv',
'kvp', 'lmbda', 'lpmn', 'lpn', 'lqmn', 'lqn', 'mathieu_a',
'mathieu_b', 'mathieu_even_coef', 'mathieu_odd_coef', 'ndtri',
'obl_cv_seq', 'pbdn_seq', 'pbdv_seq', 'pbvv_seq', 'perm',
'polygamma', 'pro_cv_seq', 'psi', 'riccati_jn', 'riccati_yn',
'sinc', 'sph_in', 'sph_inkn',
'sph_jn', 'sph_jnyn', 'sph_kn', 'sph_yn', 'y0_zeros', 'y1_zeros',
'y1p_zeros', 'yn_zeros', 'ynp_zeros', 'yv', 'yvp', 'zeta']
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt` zeros of
Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
See jn_zeros, jnp_zeros, yn_zeros, ynp_zeros to get separate arrays.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Jn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Jn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
"""Compute zeros of integer-order Bessel function Yn(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
"""Compute zeros of integer-order Bessel function derivative Yn'(x).
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in xrange(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
bessel_diff_formula = np.deprecate(_bessel_diff_formula,
message="bessel_diff_formula is a private function, do not use it!")
def jvp(v, z, n=1):
"""Compute nth derivative of Bessel function Jv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute nth derivative of Bessel function Yv(z) with respect to `z`.
Parameters
----------
v : float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute nth derivative of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int
Order of derivative. Default is first derivative.
Returns
-------
out : ndarray
The results
Examples
--------
Calculate multiple values at order 5:
>>> from scipy.special import kvp
>>> kvp(5, (1, 2, 3+5j))
array([-1849.0354+0.j , -25.7735+0.j , -0.0307+0.0875j])
Calculate for a single value at multiple orders:
>>> kvp((4, 4.5, 5), 1)
array([ -184.0309, -568.9585, -1849.0354])
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute nth derivative of modified Bessel function Iv(z) with respect
to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.29.E5
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute nth derivative of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : float
Order of Hankel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.6.E7
"""
if not isinstance(n, int) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
@np.deprecate(message="scipy.special.sph_jn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn instead. "
"Note that the new function has a different signature.")
def sph_jn(n, z):
"""Compute spherical Bessel function jn(z) and derivative.
This function computes the value and first derivative of jn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of jn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
See also
--------
spherical_jn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)]
@np.deprecate(message="scipy.special.sph_yn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_yn(n, z):
"""Compute spherical Bessel function yn(z) and derivative.
This function computes the value and first derivative of yn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of yn to compute
z : complex
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
return yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_jnyn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_jn and "
"scipy.special.spherical_yn instead. "
"Note that the new function has a different signature.")
def sph_jnyn(n, z):
"""Compute spherical Bessel functions jn(z) and yn(z) and derivatives.
This function computes the value and first derivative of jn(z) and yn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of jn and yn to compute
z : complex
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(z), ..., jn(z)
jnp : ndarray
First derivative j0'(z), ..., jn'(z)
yn : ndarray
Value of y0(z), ..., yn(z)
ynp : ndarray
First derivative y0'(z), ..., yn'(z)
See also
--------
spherical_jn
spherical_yn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, jn, jnp, yn, ynp = specfun.csphjy(n1, z)
else:
nm, yn, ynp = specfun.sphy(n1, z)
nm, jn, jnp = specfun.sphj(n1, z)
return jn[:(n+1)], jnp[:(n+1)], yn[:(n+1)], ynp[:(n+1)]
@np.deprecate(message="scipy.special.sph_in is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in instead. "
"Note that the new function has a different signature.")
def sph_in(n, z):
"""Compute spherical Bessel function in(z) and derivative.
This function computes the value and first derivative of in(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of in to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
See also
--------
spherical_in
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
return In[:(n+1)], Inp[:(n+1)]
@np.deprecate(message="scipy.special.sph_kn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_kn(n, z):
"""Compute spherical Bessel function kn(z) and derivative.
This function computes the value and first derivative of kn(z) for all
orders up to and including n.
Parameters
----------
n : int
Maximum order of kn to compute
z : complex
Argument at which to evaluate
Returns
-------
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, kn, knp = specfun.sphk(n1, z)
return kn[:(n+1)], knp[:(n+1)]
@np.deprecate(message="scipy.special.sph_inkn is deprecated in scipy 0.18.0. "
"Use scipy.special.spherical_in and "
"scipy.special.spherical_kn instead. "
"Note that the new function has a different signature.")
def sph_inkn(n, z):
"""Compute spherical Bessel functions in(z), kn(z), and derivatives.
This function computes the value and first derivative of in(z) and kn(z)
for all orders up to and including n.
Parameters
----------
n : int
Maximum order of in and kn to compute
z : complex
Argument at which to evaluate
Returns
-------
in : ndarray
Value of i0(z), ..., in(z)
inp : ndarray
First derivative i0'(z), ..., in'(z)
kn : ndarray
Value of k0(z), ..., kn(z)
knp : ndarray
First derivative k0'(z), ..., kn'(z)
See also
--------
spherical_in
spherical_kn
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 8.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z) or less(z, 0):
nm, In, Inp, kn, knp = specfun.csphik(n1, z)
else:
nm, In, Inp = specfun.sphi(n1, z)
nm, kn, knp = specfun.sphk(n1, z)
return In[:(n+1)], Inp[:(n+1)], kn[:(n+1)], knp[:(n+1)]
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions.
http://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erfinv(y):
"""Inverse function for erf.
"""
return ndtri((y+1)/2.0)/sqrt(2)
def erfcinv(y):
"""Inverse function for erfc.
"""
return -ndtri(0.5*y)/sqrt(2)
def erf_zeros(nt):
"""Compute nt complex zeros of error function erf(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return specfun.fcszo(2, nt), specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return orthogonal.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
"""Polygamma function n.
This is the nth derivative of the digamma (psi) function.
Parameters
----------
n : array_like of int
The order of the derivative of `psi`.
x : array_like
Where to evaluate the polygamma function.
Returns
-------
polygamma : ndarray
The result.
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
print("Warning, too many predicted coefficients.")
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Sequence of associated Legendre functions of the first kind.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind for complex arguments.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] NIST Digital Library of Mathematical Functions
http://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not(type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with ufuncs.errstate(all='ignore'):
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Sequence of associated Legendre functions of the second kind.
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = specfun.clqmn(mm, nn, z)
else:
q, qd = specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E0..En (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre function of the first kind.
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = specfun.clpn(n1, z)
else:
pn, pd = specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre function of the second kind.
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (n != floor(n)) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = specfun.clqn(n1, z)
else:
qn, qd = specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = specfun.lamv(v1, x)
else:
vm, vl, dl = specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei(x).
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the Kelvin function ber'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the Kelvin function bei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the Kelvin function ker'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the Kelvin function kei'(x).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (specfun.klvnzo(nt, 1),
specfun.klvnzo(nt, 2),
specfun.klvnzo(nt, 3),
specfun.klvnzo(nt, 4),
specfun.klvnzo(nt, 5),
specfun.klvnzo(nt, 6),
specfun.klvnzo(nt, 7),
specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
http://jin.ece.illinois.edu/specfunc.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return specfun.segv(m, n, c, -1)[1][:maxL]
def ellipk(m):
r"""Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
Returns
-------
K : array_like
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
The parameterization in terms of :math:`m` follows that of section
17.2 in [1]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
return ellipkm1(1 - asarray(m))
def agm(a, b):
"""Arithmetic, Geometric Mean.
Start with a_0=a and b_0=b and iteratively compute
a_{n+1} = (a_n+b_n)/2
b_{n+1} = sqrt(a_n*b_n)
until a_n=b_n. The result is agm(a, b)
agm(a, b)=agm(b, a)
agm(a, a) = a
min(a, b) < agm(a, b) < max(a, b)
"""
s = a + b + 0.0
return (pi / 4) * s / ellipkm1(4 * a * b / s ** 2)
def comb(N, k, exact=False, repetition=False):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
Returns
-------
val : int, float, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient ufunc
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120L
>>> comb(10, 3, exact=True, repetition=True)
220L
"""
if repetition:
return comb(N + k - 1, k, exact)
if exact:
return _comb_int(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in xrange(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# http://stackoverflow.com/a/16327037/125507
def _range_prod(lo, hi):
"""
Product of a range of numbers.
Returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + 1 < hi:
mid = (hi + lo) // 2
return _range_prod(lo, mid) * _range_prod(mid + 1, hi)
if lo == hi:
return lo
return lo * hi
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \\Gamma(n+1)
Examples
--------
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120L
"""
if exact:
if np.ndim(n) == 0:
return 0 if n < 0 else math.factorial(n)
else:
n = asarray(n)
un = np.unique(n).astype(object)
# Convert to object array of long ints if np.int can't handle size
if un[-1] > 20:
dt = object
elif un[-1] > 12:
dt = np.int64
else:
dt = np.int
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
if un.size:
val = math.factorial(un[0])
out[n == un[0]] = val
for i in xrange(len(un) - 1):
prev = un[i] + 1
current = un[i + 1]
val *= _range_prod(prev, current)
out[n == current] = val
return out
else:
n = asarray(n)
vals = gamma(n + 1)
return where(n >= 0, vals, 0)
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = special.gamma(n/2+1)*2**((m+1)/2)/sqrt(pi) n odd
= 2**(n/2) * (n/2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. Arrays are only supported with `exact` set
to False. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105L
"""
if exact:
if n < -1:
return 0
if n <= 0:
return 1
val = 1
for k in xrange(n, 0, -2):
val *= k
return val
else:
n = asarray(n)
vals = zeros(n.shape, 'd')
cond1 = (n % 2) & (n >= -1)
cond2 = (1-(n % 2)) & (n >= -1)
oddn = extract(cond1, n)
evenn = extract(cond2, n)
nd2o = oddn / 2.0
nd2e = evenn / 2.0
place(vals, cond1, gamma(nd2o + 1) / sqrt(pi) * pow(2.0, nd2o + 0.5))
place(vals, cond2, gamma(nd2e + 1) * pow(2.0, nd2e))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120L
>>> factorialk(5, 3, exact=True)
10L
"""
if exact:
if n < 1-k:
return 0
if n <= 0:
return 1
val = 1
for j in xrange(n, 0, -k):
val = val*j
return val
else:
raise NotImplementedError
def zeta(x, q=None, out=None):
r"""
Riemann or Hurwitz zeta function.
Parameters
----------
x : array_like of float
Input data, must be real
q : array_like of float, optional
Input data, must be real. Defaults to Riemann zeta.
out : ndarray, optional
Output array for the computed values.
Notes
-----
The two-argument version is the Hurwitz zeta function:
.. math:: \zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x},
Riemann zeta function corresponds to ``q = 1``.
See also
--------
zetac
"""
if q is None:
q = 1
return _zeta(x, q, out)
| bsd-3-clause |
boada/astLib | astLib/astSED.py | 2 | 55763 | """module for performing calculations on Spectral Energy Distributions (SEDs)
(c) 2007-2013 Matt Hilton
U{http://astlib.sourceforge.net}
This module provides classes for manipulating SEDs, in particular the Bruzual &
Charlot 2003, Maraston 2005, and Percival et al 2009 stellar population
synthesis models are currently supported. Functions are provided for
calculating the evolution of colours and magnitudes in these models with
redshift etc., and for fitting broadband photometry using these models.
@var VEGA: The SED of Vega, used for calculation of magnitudes on the Vega system.
@type VEGA: L{SED} object
@var AB: Flat spectrum SED, used for calculation of magnitudes on the AB system.
@type AB: L{SED} object
@var SOL: The SED of the Sun.
@type SOL: L{SED} object
"""
#-----------------------------------------------------------------------------
import numpy
import math
import operator
try:
from scipy import interpolate
from scipy import ndimage
except ImportError:
print("WARNING: astSED: failed to import scipy modules - some functions "
"will not work.")
import astLib
from astLib import astCalc
import os
try:
import matplotlib
from matplotlib import pylab
matplotlib.interactive(False)
except ImportError:
print("WARNING: astSED: failed to import matplotlib - some functions will "
"not work.")
import glob
#-----------------------------------------------------------------------------
class Passband:
"""This class describes a filter transmission curve. Passband objects are
created by loading data from from text files containing wavelength in
angstroms in the first column, relative transmission efficiency in the
second column (whitespace delimited). For example, to create a Passband
object for the 2MASS J filter:
passband=astSED.Passband("J_2MASS.res")
where "J_2MASS.res" is a file in the current working directory that
describes the filter.
Wavelength units can be specified as 'angstroms', 'nanometres' or
'microns'; if either of the latter, they will be converted to angstroms.
"""
def __init__(self, fileName, normalise=True, inputUnits='angstroms'):
inFile = open(fileName, "r")
lines = inFile.readlines()
wavelength = []
transmission = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
transmission.append(float(bits[1]))
wavelength.append(float(bits[0]))
self.wavelength = numpy.array(wavelength)
self.transmission = numpy.array(transmission)
if inputUnits == 'angstroms':
pass
elif inputUnits == 'nanometres':
self.wavelength = self.wavelength * 10.0
elif inputUnits == 'microns':
self.wavelength = self.wavelength * 10000.0
elif inputUnits == 'mm':
self.wavelength = self.wavelength * 1e7
elif inputUnits == 'GHz':
self.wavelength = 3e8 / (self.wavelength * 1e9)
self.wavelength = self.wavelength * 1e10
else:
raise Exception("didn't understand passband input units")
# Sort into ascending order of wavelength otherwise normalisation will be wrong
merged = numpy.array([self.wavelength, self.transmission]).transpose()
sortedMerged = numpy.array(sorted(merged, key=operator.itemgetter(0)))
self.wavelength = sortedMerged[:, 0]
self.transmission = sortedMerged[:, 1]
if normalise:
self.transmission = self.transmission / numpy.trapz(
self.transmission, self.wavelength)
# Store a ready-to-go interpolation object to speed calculation of fluxes up
self.interpolator = interpolate.interp1d(self.wavelength,
self.transmission,
kind='linear')
def asList(self):
"""Returns a two dimensional list of [wavelength, transmission],
suitable for plotting by gnuplot.
@rtype: list
@return: list in format [wavelength, transmission]
"""
listData = []
for l, f in zip(self.wavelength, self.transmission):
listData.append([l, f])
return listData
def rescale(self, maxTransmission):
"""Rescales the passband so that maximum value of the transmission is
equal to maxTransmission. Useful for plotting.
@type maxTransmission: float
@param maxTransmission: maximum value of rescaled transmission curve
"""
self.transmission = self.transmission * (maxTransmission /
self.transmission.max())
def plot(self, xmin='min', xmax='max', maxTransmission=None):
"""Plots the passband, rescaling the maximum of the tranmission curve
to maxTransmission if required.
@type xmin: float or 'min'
@param xmin: minimum of the wavelength range of the plot
@type xmax: float or 'max'
@param xmax: maximum of the wavelength range of the plot
@type maxTransmission: float
@param maxTransmission: maximum value of rescaled transmission curve
"""
if maxTransmission is not None:
self.rescale(maxTransmission)
pylab.matplotlib.interactive(True)
pylab.plot(self.wavelength, self.transmission)
if xmin == 'min':
xmin = self.wavelength.min()
if xmax == 'max':
xmax = self.wavelength.max()
pylab.xlim(xmin, xmax)
pylab.xlabel("Wavelength")
pylab.ylabel("Relative Flux")
def effectiveWavelength(self):
"""Calculates effective wavelength for the passband. This is the same
as equation (3) of Carter et al. 2009.
@rtype: float
@return: effective wavelength of the passband, in Angstroms
"""
a = numpy.trapz(self.transmission * self.wavelength, self.wavelength)
b = numpy.trapz(self.transmission / self.wavelength, self.wavelength)
effWavelength = numpy.sqrt(a / b)
return effWavelength
#-----------------------------------------------------------------------------
class TopHatPassband(Passband):
"""This class generates a passband with a top hat response between the
given wavelengths.
"""
def __init__(self, wavelengthMin, wavelengthMax, normalise=True):
"""Generates a passband object with top hat response between
wavelengthMin, wavelengthMax. Units are assumed to be Angstroms.
@type wavelengthMin: float
@param wavelengthMin: minimum of the wavelength range of the passband
@type wavelengthMax: float
@param wavelengthMax: maximum of the wavelength range of the passband
@type normalise: bool
@param normalise: if True, scale such that total area under the
passband over the wavelength
range is 1.
"""
self.wavelength = numpy.arange(
wavelengthMin, wavelengthMax + 10,
10, dtype=float)
self.transmission = numpy.ones(self.wavelength.shape, dtype=float)
if normalise:
self.transmission = self.transmission / numpy.trapz(
self.transmission, self.wavelength)
# Store a ready-to-go interpolation object to speed calculation of fluxes up
self.interpolator = interpolate.interp1d(self.wavelength,
self.transmission,
kind='linear')
#-----------------------------------------------------------------------------
class SED:
"""This class describes a Spectral Energy Distribution (SED).
To create a SED object, lists (or numpy arrays) of wavelength and relative
flux must be provided. The SED can optionally be redshifted. The wavelength
units of SEDs are assumed to be Angstroms - flux calculations using
Passband and SED objects specified with different wavelength units will be
incorrect.
The L{StellarPopulation} class (and derivatives) can be used to extract
SEDs for specified ages from e.g. the Bruzual & Charlot 2003 or Maraston
2005 models.
"""
def __init__(self,
wavelength=[],
flux=[],
z=0.0,
ageGyr=None,
normalise=False,
label=None):
# We keep a copy of the wavelength, flux at z = 0, as it's more robust
# to copy that to self.wavelength, flux and redshift it, rather than
# repeatedly redshifting the same arrays back and forth
self.z0wavelength = numpy.array(wavelength)
self.z0flux = numpy.array(flux)
self.wavelength = numpy.array(wavelength)
self.flux = numpy.array(flux)
self.z = z
self.label = label # plain text label, handy for using in photo-z codes
# Store the intrinsic (i.e. unextincted) flux in case we change
# extinction
self.EBMinusV = 0.0
self.intrinsic_z0flux = numpy.array(flux)
if normalise:
self.normalise()
if z != 0.0:
self.redshift(z)
self.ageGyr = ageGyr
def copy(self):
"""Copies the SED, returning a new SED object
@rtype: L{SED} object
@return: SED
"""
newSED = SED(wavelength=self.z0wavelength,
flux=self.z0flux,
z=self.z,
ageGyr=self.ageGyr,
normalise=False,
label=self.label)
return newSED
def loadFromFile(self, fileName):
"""Loads SED from a white space delimited file in the format
wavelength, flux. Lines beginning with # are ignored.
@type fileName: string
@param fileName: path to file containing wavelength, flux data
"""
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
wavelength = []
flux = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
wavelength.append(float(bits[0]))
flux.append(float(bits[1]))
# Sort SED so wavelength is in ascending order
if wavelength[0] > wavelength[-1]:
wavelength.reverse()
flux.reverse()
self.z0wavelength = numpy.array(wavelength)
self.z0flux = numpy.array(flux)
self.wavelength = numpy.array(wavelength)
self.flux = numpy.array(flux)
def writeToFile(self, fileName):
"""Writes SED to a white space delimited file in the format wavelength,
flux.
@type fileName: string
@param fileName: path to file
"""
outFile = open(fileName, "w")
for l, f in zip(self.wavelength, self.flux):
outFile.write(str(l) + " " + str(f) + "\n")
outFile.close()
def asList(self):
"""Returns a two dimensional list of [wavelength, flux], suitable for
plotting by gnuplot.
@rtype: list
@return: list in format [wavelength, flux]
"""
listData = []
for l, f in zip(self.wavelength, self.flux):
listData.append([l, f])
return listData
def plot(self, xmin='min', xmax='max'):
"""Produces a simple (wavelength, flux) plot of the SED.
@type xmin: float or 'min'
@param xmin: minimum of the wavelength range of the plot
@type xmax: float or 'max'
@param xmax: maximum of the wavelength range of the plot
"""
pylab.matplotlib.interactive(True)
pylab.plot(self.wavelength, self.flux)
if xmin == 'min':
xmin = self.wavelength.min()
if xmax == 'max':
xmax = self.wavelength.max()
# Sensible y scale
plotMask = numpy.logical_and(
numpy.greater(self.wavelength, xmin), numpy.less(self.wavelength,
xmax))
plotMax = self.flux[plotMask].max()
pylab.ylim(0, plotMax * 1.1)
pylab.xlim(xmin, xmax)
pylab.xlabel("Wavelength")
pylab.ylabel("Relative Flux")
def integrate(self, wavelengthMin='min', wavelengthMax='max'):
"""Calculates flux in SED within given wavelength range.
@type wavelengthMin: float or 'min'
@param wavelengthMin: minimum of the wavelength range
@type wavelengthMax: float or 'max'
@param wavelengthMax: maximum of the wavelength range
@rtype: float
@return: relative flux
"""
if wavelengthMin == 'min':
wavelengthMin = self.wavelength.min()
if wavelengthMax == 'max':
wavelengthMax = self.wavelength.max()
mask = numpy.logical_and(numpy.greater(self.wavelength, wavelengthMin),
numpy.less(self.wavelength, wavelengthMax))
flux = numpy.trapz(self.flux[mask], self.wavelength[mask])
return flux
def smooth(self, smoothPix):
"""Smooths SED.flux with a uniform (boxcar) filter of width smoothPix.
Cannot be undone.
@type smoothPix: int
@param smoothPix: size of uniform filter applied to SED, in pixels
"""
smoothed = ndimage.uniform_filter1d(self.flux, smoothPix)
self.flux = smoothed
def redshift(self, z):
"""Redshifts the SED to redshift z.
@type z: float
@param z: redshift
"""
# We have to conserve energy so the area under the redshifted SED has
# to be equal to the area under the unredshifted SED, otherwise
# magnitude calculations will be wrong when comparing SEDs at different
# zs
self.wavelength = numpy.zeros(self.z0wavelength.shape[0])
self.flux = numpy.zeros(self.z0flux.shape[0])
self.wavelength = self.wavelength + self.z0wavelength
self.flux = self.flux + self.z0flux
z0TotalFlux = numpy.trapz(self.z0wavelength, self.z0flux)
self.wavelength = self.wavelength * (1.0 + z)
zTotalFlux = numpy.trapz(self.wavelength, self.flux)
self.flux = self.flux * (z0TotalFlux / zTotalFlux)
self.z = z
def normalise(self, minWavelength='min', maxWavelength='max'):
"""Normalises the SED such that the area under the specified wavelength
range is equal to 1.
@type minWavelength: float or 'min'
@param minWavelength: minimum wavelength of range over which to
normalise SED
@type maxWavelength: float or 'max'
@param maxWavelength: maximum wavelength of range over which to
normalise SED
"""
if minWavelength == 'min':
minWavelength = self.wavelength.min()
if maxWavelength == 'max':
maxWavelength = self.wavelength.max()
lowCut = numpy.greater(self.wavelength, minWavelength)
highCut = numpy.less(self.wavelength, maxWavelength)
totalCut = numpy.logical_and(lowCut, highCut)
sedFluxSlice = self.flux[totalCut]
sedWavelengthSlice = self.wavelength[totalCut]
self.flux = self.flux / numpy.trapz(
abs(sedFluxSlice), sedWavelengthSlice) # self.wavelength)
def normaliseToMag(self, ABMag, passband):
"""Normalises the SED to match the flux equivalent to the given AB
magnitude in the given passband.
@type ABMag: float
@param ABMag: AB magnitude to which the SED is to be normalised at the
given passband
@type passband: an L{Passband} object
@param passband: passband at which normalisation to AB magnitude is
calculated
"""
magFlux = mag2Flux(ABMag, 0.0, passband)
sedFlux = self.calcFlux(passband)
norm = magFlux[0] / sedFlux
self.flux = self.flux * norm
self.z0flux = self.z0flux * norm
def matchFlux(self, matchSED, minWavelength, maxWavelength):
"""Matches the flux in the wavelength range given by minWavelength,
maxWavelength to the flux in the same region in matchSED. Useful for
plotting purposes.
@type matchSED: L{SED} object
@param matchSED: SED to match flux to
@type minWavelength: float
@param minWavelength: minimum of range in which to match flux of
current SED to matchSED
@type maxWavelength: float
@param maxWavelength: maximum of range in which to match flux of
current SED to matchSED
"""
interpMatch = interpolate.interp1d(matchSED.wavelength,
matchSED.flux,
kind='linear')
interpSelf = interpolate.interp1d(self.wavelength,
self.flux,
kind='linear')
wavelengthRange = numpy.arange(minWavelength, maxWavelength, 5.0)
matchFlux = numpy.trapz(interpMatch(wavelengthRange), wavelengthRange)
selfFlux = numpy.trapz(interpSelf(wavelengthRange), wavelengthRange)
self.flux = self.flux * (matchFlux / selfFlux)
def calcFlux(self, passband):
"""Calculates flux in the given passband.
@type passband: L{Passband} object
@param passband: filter passband through which to calculate the flux
from the SED
@rtype: float
@return: flux
"""
lowCut = numpy.greater(self.wavelength, passband.wavelength.min())
highCut = numpy.less(self.wavelength, passband.wavelength.max())
totalCut = numpy.logical_and(lowCut, highCut)
sedFluxSlice = self.flux[totalCut]
sedWavelengthSlice = self.wavelength[totalCut]
# Use linear interpolation to rebin the passband to the same dimensions as the
# part of the SED we're interested in
sedInBand = passband.interpolator(sedWavelengthSlice) * sedFluxSlice
totalFlux = numpy.trapz(sedInBand * sedWavelengthSlice,
sedWavelengthSlice)
totalFlux = totalFlux /\
numpy.trapz(passband.interpolator(sedWavelengthSlice) *
sedWavelengthSlice, sedWavelengthSlice)
return totalFlux
def calcMag(self, passband, addDistanceModulus=True, magType="Vega"):
"""Calculates magnitude in the given passband. If addDistanceModulus ==
True, then the distance modulus (5.0*log10*(dl*1e5), where dl is the
luminosity distance in Mpc at the redshift of the L{SED}) is added.
@type passband: L{Passband} object
@param passband: filter passband through which to calculate the
magnitude from the SED
@type addDistanceModulus: bool
@param addDistanceModulus: if True, adds 5.0*log10*(dl*1e5) to the mag
returned, where dl is the luminosity distance (Mpc) corresponding to
the SED z
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: float
@return: magnitude through the given passband on the specified
magnitude system
"""
f1 = self.calcFlux(passband)
if magType == "Vega":
f2 = VEGA.calcFlux(passband)
elif magType == "AB":
f2 = AB.calcFlux(passband)
mag = -2.5 * math.log10(f1 / f2)
if magType == "Vega":
# Add 0.026 because Vega has V=0.026 (e.g. Bohlin & Gilliland 2004)
mag += 0.026
if self.z > 0.0 and addDistanceModulus:
appMag = 5.0 * math.log10(astCalc.dl(self.z) * 1e5) + mag
else:
appMag = mag
return appMag
def calcColour(self, passband1, passband2, magType="Vega"):
"""Calculates the colour passband1-passband2.
@type passband1: L{Passband} object
@param passband1: filter passband through which to calculate the first
magnitude
@type passband2: L{Passband} object
@param passband1: filter passband through which to calculate the second
magnitude
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: float
@return: colour defined by passband1 - passband2 on the specified
magnitude system
"""
mag1 = self.calcMag(passband1,
magType=magType,
addDistanceModulus=True)
mag2 = self.calcMag(passband2,
magType=magType,
addDistanceModulus=True)
colour = mag1 - mag2
return colour
def getSEDDict(self, passbands):
"""This is a convenience function for pulling out fluxes from a SED for
a given set of passbands
in the same format as made by L{mags2SEDDict} - designed to make
fitting code simpler.
@type passbands: list of L{Passband} objects
@param passbands: list of passbands through which fluxes will be
calculated
"""
flux = []
wavelength = []
for p in passbands:
flux.append(self.calcFlux(p))
wavelength.append(p.effectiveWavelength())
SEDDict = {}
SEDDict['flux'] = numpy.array(flux)
SEDDict['wavelength'] = numpy.array(wavelength)
return SEDDict
def extinctionCalzetti(self, EBMinusV):
"""Applies the Calzetti et al. 2000 (ApJ, 533, 682) extinction law to
the SED with the given E(B-V) amount of extinction. R_v' = 4.05 is
assumed (see equation (5) of Calzetti et al.).
@type EBMinusV: float
@param EBMinusV: extinction E(B-V), in magnitudes
"""
self.EBMinusV = EBMinusV
# All done in rest frame
self.z0flux = self.intrinsic_z0flux
# Allow us to set EBMinusV == 0 to turn extinction off
if EBMinusV > 0:
# Note that EBMinusV is assumed to be Es as in equations (2) - (5)
# Note here wavelength units have to be microns for constants to
# make sense
RvPrime = 4.05 # equation (5) of Calzetti et al. 2000
shortWavelengthMask =\
numpy.logical_and(numpy.greater_equal(self.z0wavelength, 1200),
numpy.less(self.z0wavelength, 6300))
longWavelengthMask =\
numpy.logical_and(numpy.greater_equal(self.z0wavelength, 6300),
numpy.less_equal(self.z0wavelength, 22000))
wavelengthMicrons = numpy.array(self.z0wavelength / 10000.0,
dtype=numpy.float64)
kPrime = numpy.zeros(self.z0wavelength.shape[0],
dtype=numpy.float64)
kPrimeLong = (2.659 * (-1.857 + 1.040 / wavelengthMicrons)) +\
RvPrime
kPrimeShort = (2.659 * (-2.156 + 1.509 / wavelengthMicrons -
0.198 / wavelengthMicrons**2 + 0.011 /
wavelengthMicrons**3)) + RvPrime
kPrime[longWavelengthMask] = kPrimeLong[longWavelengthMask]
kPrime[shortWavelengthMask] = kPrimeShort[shortWavelengthMask]
# Here we extrapolate kPrime in similar way to what HYPERZ does
# Short wavelengths
try:
interpolator = interpolate.interp1d(self.z0wavelength,
kPrimeShort,
kind='linear')
slope = (interpolator(1100.0) - interpolator(1200.0)) / (
1100.0 - 1200.0)
intercept = interpolator(1200.0) - (slope * 1200.0)
mask = numpy.less(self.z0wavelength, 1200.0)
kPrime[mask] = slope * self.z0wavelength[mask] + intercept
# Long wavelengths
interpolator = interpolate.interp1d(self.z0wavelength,
kPrimeLong,
kind='linear')
slope = (interpolator(21900.0) - interpolator(22000.0)) / (
21900.0 - 22000.0)
intercept = interpolator(21900.0) - (slope * 21900.0)
mask = numpy.greater(self.z0wavelength, 22000.0)
kPrime[mask] = slope * self.z0wavelength[mask] + intercept
except:
raise Exception("This SED has a wavelength range that doesn't "
"cover ~1200-22000 Angstroms")
# Never let go negative
kPrime[numpy.less_equal(kPrime, 0.0)] = 1e-6
reddening = numpy.power(10, 0.4 * EBMinusV * kPrime)
self.z0flux = self.z0flux / reddening
self.redshift(self.z)
#-----------------------------------------------------------------------------
class VegaSED(SED):
"""This class stores the SED of Vega, used for calculation of magnitudes on the Vega system.
The Vega SED used is taken from Bohlin 2007
(http://adsabs.harvard.edu/abs/2007ASPC..364..315B), and is available from
the STScI CALSPEC library
(http://www.stsci.edu/hst/observatory/cdbs/calspec.html).
"""
def __init__(self, normalise=False):
VEGA_SED_PATH = astLib.__path__[
0] + os.path.sep + "data" + os.path.sep + "bohlin2006_Vega.sed" # from HST CALSPEC
inFile = open(VEGA_SED_PATH, "r")
lines = inFile.readlines()
wavelength = []
flux = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
flux.append(float(bits[1]))
wavelength.append(float(bits[0]))
self.wavelength = numpy.array(wavelength)
self.flux = numpy.array(flux, dtype=numpy.float64)
# We may want to redshift reference SEDs to calculate rest-frame colors
# from SEDs at different zs
self.z0wavelength = numpy.array(wavelength)
self.z0flux = numpy.array(flux, dtype=numpy.float64)
self.z = 0.0
#if normalise == True:
#self.flux=self.flux/numpy.trapz(self.flux, self.wavelength)
#self.z0flux=self.z0flux/numpy.trapz(self.z0flux, self.z0wavelength)
#-----------------------------------------------------------------------------
class StellarPopulation:
"""This class describes a stellar population model, either a Simple Stellar
Population (SSP) or a Composite Stellar Population (CSP), such as the
models of Bruzual & Charlot 2003 or Maraston 2005.
The constructor for this class can be used for generic SSPs or CSPs stored
in white space delimited text files, containing columns for age,
wavelength, and flux. Columns are counted from 0 ... n. Lines starting
with # are ignored.
The classes L{M05Model} (for Maraston 2005 models), L{BC03Model} (for
Bruzual & Charlot 2003 models), and L{P09Model} (for Percival et al. 2009
models) are derived from this class. The only difference between them is
the code used to load in the model data.
"""
def __init__(self,
fileName,
ageColumn=0,
wavelengthColumn=1,
fluxColumn=2):
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
self.fileName = fileName
# Extract a list of model ages and valid wavelengths from the file
self.ages = []
self.wavelengths = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
age = float(bits[ageColumn])
wavelength = float(bits[wavelengthColumn])
if age not in self.ages:
self.ages.append(age)
if wavelength not in self.wavelengths:
self.wavelengths.append(wavelength)
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.fluxGrid = numpy.zeros([len(self.ages), len(self.wavelengths)])
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
sedAge = float(bits[ageColumn])
sedWavelength = float(bits[wavelengthColumn])
sedFlux = float(bits[fluxColumn])
row = self.ages.index(sedAge)
column = self.wavelengths.index(sedWavelength)
self.fluxGrid[row][column] = sedFlux
def getSED(self, ageGyr, z=0.0, normalise=False, label=None):
"""Extract a SED for given age. Do linear interpolation between models
if necessary.
@type ageGyr: float
@param ageGyr: age of the SED in Gyr
@type z: float
@param z: redshift the SED from z = 0 to z = z
@type normalise: bool
@param normalise: normalise the SED to have area 1
@rtype: L{SED} object
@return: SED
"""
if ageGyr in self.ages:
flux = self.fluxGrid[self.ages.index(ageGyr)]
sed = SED(self.wavelengths,
flux,
z=z,
normalise=normalise,
label=label)
return sed
else:
# Use interpolation, iterating over each wavelength column
flux = []
for i in range(len(self.wavelengths)):
interpolator = interpolate.interp1d(self.ages,
self.fluxGrid[:, i],
kind='linear')
sedFlux = interpolator(ageGyr)
flux.append(sedFlux)
sed = SED(self.wavelengths,
flux,
z=z,
normalise=normalise,
label=label)
return sed
def getColourEvolution(self,
passband1,
passband2,
zFormation,
zStepSize=0.05,
magType="Vega"):
"""Calculates the evolution of the colour observed through passband1 -
passband2 for the StellarPopulation with redshift, from z = 0 to z =
zFormation.
@type passband1: L{Passband} object
@param passband1: filter passband through which to calculate the first
magnitude
@type passband2: L{Passband} object
@param passband2: filter passband through which to calculate the second
magnitude
@type zFormation: float
@param zFormation: formation redshift of the StellarPopulation
@type zStepSize: float
@param zStepSize: size of interval in z at which to calculate model
colours
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: dictionary
@return: dictionary of numpy.arrays in format {'z', 'colour'}
"""
zSteps = int(math.ceil(zFormation / zStepSize))
zData = []
colourData = []
for i in range(1, zSteps):
zc = i * zStepSize
age = astCalc.tl(zFormation) - astCalc.tl(zc)
sed = self.getSED(age, z=zc)
colour = sed.calcColour(passband1, passband2, magType=magType)
zData.append(zc)
colourData.append(colour)
zData = numpy.array(zData)
colourData = numpy.array(colourData)
return {'z': zData, 'colour': colourData}
def getMagEvolution(self,
passband,
magNormalisation,
zNormalisation,
zFormation,
zStepSize=0.05,
onePlusZSteps=False,
magType="Vega"):
"""Calculates the evolution with redshift (from z = 0 to z =
zFormation) of apparent magnitude in the observed frame through the
passband for the StellarPopulation, normalised to magNormalisation
(apparent) at z = zNormalisation.
@type passband: L{Passband} object
@param passband: filter passband through which to calculate the
magnitude
@type magNormalisation: float
@param magNormalisation: sets the apparent magnitude of the SED at
zNormalisation
@type zNormalisation: float
@param zNormalisation: the redshift at which the magnitude
normalisation is carried out
@type zFormation: float
@param zFormation: formation redshift of the StellarPopulation
@type zStepSize: float
@param zStepSize: size of interval in z at which to calculate model
magnitudes
@type onePlusZSteps: bool
@param onePlusZSteps: if True, zSteps are (1+z)*zStepSize, otherwise
zSteps are linear
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: dictionary
@return: dictionary of numpy.arrays in format {'z', 'mag'}
"""
# Count upwards in z steps as interpolation doesn't work if array ordered z decreasing
zSteps = int(math.ceil(zFormation / zStepSize))
zData = []
magData = []
absMagData = []
zc0 = 0.0
for i in range(1, zSteps):
if not onePlusZSteps:
zc = i * zStepSize
else:
zc = zc0 + (1 + zc0) * zStepSize
zc0 = zc
if zc >= zFormation:
break
age = astCalc.tl(zFormation) - astCalc.tl(zc)
sed = self.getSED(age, z=zc)
mag = sed.calcMag(passband,
magType=magType,
addDistanceModulus=True)
zData.append(zc)
magData.append(mag)
absMagData.append(sed.calcMag(passband, addDistanceModulus=False))
zData = numpy.array(zData)
magData = numpy.array(magData)
# Do the normalisation
interpolator = interpolate.interp1d(zData, magData, kind='linear')
modelNormMag = interpolator(zNormalisation)
normConstant = magNormalisation - modelNormMag
magData = magData + normConstant
return {'z': zData, 'mag': magData}
def calcEvolutionCorrection(self,
zFrom,
zTo,
zFormation,
passband,
magType="Vega"):
"""Calculates the evolution correction in magnitudes in the rest frame
through the passband from redshift zFrom to redshift zTo, where the
stellarPopulation is assumed to be formed at redshift zFormation.
@type zFrom: float
@param zFormation: redshift to evolution correct from
@type zTo: float
@param zTo: redshift to evolution correct to
@type zFormation: float
@param zFormation: formation redshift of the StellarPopulation
@type passband: L{Passband} object
@param passband: filter passband through which to calculate magnitude
@type magType: string
@param magType: either "Vega" or "AB"
@rtype: float
@return: evolution correction in magnitudes in the rest frame
"""
ageFrom = astCalc.tl(zFormation) - astCalc.tl(zFrom)
ageTo = astCalc.tl(zFormation) - astCalc.tl(zTo)
fromSED = self.getSED(ageFrom)
toSED = self.getSED(ageTo)
fromMag = fromSED.calcMag(passband,
magType=magType,
addDistanceModulus=False)
toMag = toSED.calcMag(passband,
magType=magType,
addDistanceModulus=False)
return fromMag - toMag
#-----------------------------------------------------------------------------
class M05Model(StellarPopulation):
"""This class describes a Maraston 2005 stellar population model. To load a
composite stellar population model (CSP) for a tau = 0.1 Gyr burst of star
formation, solar metallicity, Salpeter IMF:
m05csp = astSED.M05Model(M05_DIR+"/csp_e_0.10_z02_salp.sed_agb")
where M05_DIR is set to point to the directory where the Maraston 2005
models are stored on your system.
The file format of the Maraston 2005 simple stellar poulation (SSP) models
is different to the file format used for the CSPs, and this needs to be
specified using the fileType parameter. To load a SSP with solar
metallicity, red horizontal branch morphology:
m05ssp = astSED.M05Model(M05_DIR+"/sed.ssz002.rhb", fileType = "ssp")
The wavelength units of SEDs from M05 models are Angstroms, with flux in
units of erg/s/Angstrom.
"""
def __init__(self, fileName, fileType="csp"):
self.modelFamily = "M05"
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
self.fileName = fileName
if fileType == "csp":
ageColumn = 0
wavelengthColumn = 1
fluxColumn = 2
elif fileType == "ssp":
ageColumn = 0
wavelengthColumn = 2
fluxColumn = 3
else:
raise Exception("fileType must be 'ssp' or 'csp'")
# Extract a list of model ages and valid wavelengths from the file
self.ages = []
self.wavelengths = []
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
age = float(bits[ageColumn])
wavelength = float(bits[wavelengthColumn])
if age not in self.ages:
self.ages.append(age)
if wavelength not in self.wavelengths:
self.wavelengths.append(wavelength)
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.fluxGrid = numpy.zeros([len(self.ages), len(self.wavelengths)])
for line in lines:
if line[0] != "#" and len(line) > 3:
bits = line.split()
sedAge = float(bits[ageColumn])
sedWavelength = float(bits[wavelengthColumn])
sedFlux = float(bits[fluxColumn])
row = self.ages.index(sedAge)
column = self.wavelengths.index(sedWavelength)
self.fluxGrid[row][column] = sedFlux
#-----------------------------------------------------------------------------
class BC03Model(StellarPopulation):
"""This class describes a Bruzual & Charlot 2003 stellar population model,
extracted from a GALAXEV .ised file using the galaxevpl program that is
included in GALAXEV. The file format is white space delimited, with
wavelength in the first column. Subsequent columns contain the model fluxes
for SEDs of different ages, as specified when running galaxevpl. The age
corresponding to each flux column is taken from the comment line beginning
"# Age (yr)", and is converted to Gyr.
For example, to load a tau = 0.1 Gyr burst of star formation, solar
metallicity, Salpeter IMF model stored in a file (created by galaxevpl)
called "csp_lr_solar_0p1Gyr.136":
bc03model = BC03Model("csp_lr_solar_0p1Gyr.136")
The wavelength units of SEDs from BC03 models are Angstroms. Flux is
converted into units of erg/s/Angstrom (the units in the files output by
galaxevpl are LSun/Angstrom).
"""
def __init__(self, fileName):
self.modelFamily = "BC03"
self.fileName = fileName
inFile = open(fileName, "r")
lines = inFile.readlines()
inFile.close()
# Extract a list of model ages - BC03 ages are in years, so convert to Gyr
self.ages = []
for line in lines:
if line.find("# Age (yr)") != -1:
rawAges = line[line.find("# Age (yr)") + 10:].split()
for age in rawAges:
self.ages.append(float(age) / 1e9)
# Extract a list of valid wavelengths from the file
# If we have many ages in the file, this is more complicated...
lambdaLinesCount = 0
startFluxDataLine = None
for i in range(len(lines)):
line = lines[i]
if "# Lambda(A)" in line:
lambdaLinesCount = lambdaLinesCount + 1
if line[0] != "#" and len(line) > 3 and startFluxDataLine is None:
startFluxDataLine = i
self.wavelengths = []
for i in range(startFluxDataLine, len(lines), lambdaLinesCount):
line = lines[i]
bits = line.split()
self.wavelengths.append(float(bits[0]))
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.fluxGrid = numpy.zeros([len(self.ages), len(self.wavelengths)])
for i in range(startFluxDataLine, len(lines), lambdaLinesCount):
line = lines[i]
bits = []
for k in range(i, i + lambdaLinesCount):
bits = bits + lines[k].split()
ageFluxes = bits[1:]
sedWavelength = float(bits[0])
column = self.wavelengths.index(sedWavelength)
for row in range(len(ageFluxes)):
sedFlux = float(ageFluxes[row])
self.fluxGrid[row][column] = sedFlux
# Convert flux into erg/s/Angstrom - native units of galaxevpl files are LSun/Angstrom
self.fluxGrid = self.fluxGrid * 3.826e33
#-----------------------------------------------------------------------------
class P09Model(StellarPopulation):
"""This class describes a Percival et al 2009 (BaSTI;
http://albione.oa-teramo.inaf.it) stellar population model. We assume that
the synthetic spectra for each model are unpacked under the directory
pointed to by fileName.
The wavelength units of SEDs from P09 models are converted to Angstroms.
Flux is converted into units of erg/s/Angstrom (the units in the BaSTI
low-res spectra are 4.3607e-33 erg/s/m).
"""
def __init__(self, fileName):
self.modelFamily = "P09"
files = glob.glob(fileName + os.path.sep + "*.t??????")
self.fileName = fileName
# Map end of filenames to ages in Gyr
extensionAgeMap = {}
self.ages = []
for f in files:
ext = f.split(".")[-1]
ageGyr = float(f[-5:]) / 1e3
self.ages.append(ageGyr)
extensionAgeMap[ext] = ageGyr
self.ages.sort()
# Construct a grid of flux - rows correspond to each wavelength, columns to age
self.wavelengths = None
self.fluxGrid = None
for i in range(len(self.ages)):
for e in extensionAgeMap.keys():
if extensionAgeMap[e] == self.ages[i]:
inFileName = glob.glob(fileName + os.path.sep + "*." + e)[
0]
inFile = open(inFileName, "r")
lines = inFile.readlines()
inFile.close()
wavelength = []
flux = []
for line in lines:
bits = line.split()
wavelength.append(
float(bits[0]) *
10.0) # units in file are nm, not angstroms
flux.append(float(bits[1]))
if self.wavelengths is None:
self.wavelengths = wavelength
if self.fluxGrid is None:
self.fluxGrid = numpy.zeros(
[len(self.ages), len(self.wavelengths)])
self.fluxGrid[i] = flux
# Convert flux into erg/s/Angstrom - native units in BaSTI files are
# 4.3607e-33 erg/s/m
self.fluxGrid = self.fluxGrid / 4.3607e-33 / 1e10
#-----------------------------------------------------------------------------
def makeModelSEDDictList(modelList,
z,
passbandsList,
labelsList=[],
EBMinusVList=[0.0],
forceYoungerThanUniverse=True):
"""This routine makes a list of SEDDict dictionaries (see L{mags2SEDDict})
for fitting using L{fitSEDDict}. This speeds up the fitting as this allows
us to calculate model SED magnitudes only once, if all objects to be fitted
are at the same redshift. We add some meta data to the modelSEDDicts (e.g.
the model file names).
The effect of extinction by dust (assuming the Calzetti et al. 2000 law)
can be included by giving a list of E(B-V) values.
If forceYoungerThanUniverse == True, ages which are older than the universe
at the given z will not be included.
@type modelList: list of L{StellarPopulation} model objects
@param modelList: list of StellarPopulation models to include
@type z: float
@param z: redshift to apply to all stellar population models in modelList
@type EBMinusVList: list
@param EBMinusVList: list of E(B-V) extinction values to apply to all
models, in magnitudes
@type labelsList: list
@param labelsList: optional list used for labelling passbands in output
SEDDicts
@type forceYoungerThanUniverse: bool
@param forceYoungerThanUniverse: if True, do not allow models that exceed
the age of the universe at z
@rtype: list
@return: list of dictionaries containing model fluxes, to be used as input
to L{fitSEDDict}.
"""
# Otherwise if this is the case we won't actually make any model SEDDicts ...
if EBMinusVList == []:
EBMinusVList = [0.0]
modelSEDDictList = []
for m in range(len(modelList)):
testAges = numpy.array(modelList[m].ages)
if forceYoungerThanUniverse:
testAges = testAges[numpy.logical_and(
numpy.less(testAges, astCalc.tz(z)), numpy.greater(testAges,
0))]
for t in testAges:
s = modelList[m].getSED(
t,
z=z,
label=modelList[m].fileName + " - age=" + str(t) + " Gyr")
for EBMinusV in EBMinusVList:
try:
s.extinctionCalzetti(EBMinusV)
except:
raise Exception(
"Model %s has a wavelength range that doesn't cover ~1200-22000 Angstroms"
% (modelList[m].fileName))
modelSEDDict = s.getSEDDict(passbandsList)
modelSEDDict['labels'] = labelsList
modelSEDDict['E(B-V)'] = EBMinusV
modelSEDDict['ageGyr'] = t
modelSEDDict['z'] = z
modelSEDDict['fileName'] = modelList[m].fileName
modelSEDDict['modelListIndex'] = m
modelSEDDictList.append(modelSEDDict)
return modelSEDDictList
#-----------------------------------------------------------------------------
def fitSEDDict(SEDDict, modelSEDDictList):
"""Fits the given SED dictionary (made using L{mags2SEDDict}) with the
given list of model SED dictionaries. The latter should be made using
L{makeModelSEDDictList}, and entries for fluxes should correspond directly
between the model and SEDDict.
Returns a dictionary with best fit values.
@type SEDDict: dictionary, in format of L{mags2SEDDict}
@param SEDDict: dictionary of observed fluxes and uncertainties, in format
of L{mags2SEDDict}
@type modelSEDDictList: list of dictionaries, in format of
L{makeModelSEDDictList}
@param modelSEDDictList: list of dictionaries containing fluxes of models
to be fitted to the observed fluxes listed in the SEDDict. This should be
made using L{makeModelSEDDictList}.
@rtype: dictionary
@return: results of the fitting - keys:
- 'minChiSq': minimum chi squared value of best fit
- 'chiSqContrib': corresponding contribution at each passband to
the minimum chi squared value
- 'ageGyr': the age in Gyr of the best fitting model
- 'modelFileName': the file name of the stellar population model
corresponding to the best fit
- 'modelListIndex': the index of the best fitting model in the
input modelSEDDictList
- 'norm': the normalisation that the best fit model should be
multiplied by to match the SEDDict
- 'z': the redshift of the best fit model
- 'E(B-V)': the extinction, E(B-V), in magnitudes, of the best fit
model
"""
modelFlux = []
for modelSEDDict in modelSEDDictList:
modelFlux.append(modelSEDDict['flux'])
modelFlux = numpy.array(modelFlux)
sedFlux = numpy.array([SEDDict['flux']] * len(modelSEDDictList))
sedFluxErr = numpy.array([SEDDict['fluxErr']] * len(modelSEDDictList))
# Analytic expression below is for normalisation at minimum chi squared (see note book)
norm = numpy.sum(
(modelFlux * sedFlux) /
(sedFluxErr**2), axis=1) / numpy.sum(modelFlux**2 / sedFluxErr**2,
axis=1)
norms = numpy.array([norm] * modelFlux.shape[1]).transpose()
chiSq = numpy.sum(((sedFlux - norms * modelFlux)**2) / sedFluxErr**2,
axis=1)
chiSq[numpy.isnan(
chiSq)] = 1e6 # throw these out, should check this out and handle more gracefully
minChiSq = chiSq.min()
bestMatchIndex = numpy.equal(chiSq, minChiSq).nonzero()[0][0]
bestNorm = norm[bestMatchIndex]
bestChiSq = minChiSq
bestChiSqContrib = ((sedFlux[bestMatchIndex] - norms[bestMatchIndex] *
modelFlux[bestMatchIndex])**2) /\
sedFluxErr[bestMatchIndex]**2
resultsDict = {'minChiSq': bestChiSq,
'chiSqContrib': bestChiSqContrib,
'allChiSqValues': chiSq,
'ageGyr': modelSEDDictList[bestMatchIndex]['ageGyr'],
'modelFileName':
modelSEDDictList[bestMatchIndex]['fileName'],
'modelListIndex':
modelSEDDictList[bestMatchIndex]['modelListIndex'],
'norm': bestNorm,
'z': modelSEDDictList[bestMatchIndex]['z'],
'E(B-V)': modelSEDDictList[bestMatchIndex]['E(B-V)']}
return resultsDict
#-----------------------------------------------------------------------------
def mags2SEDDict(ABMags, ABMagErrs, passbands):
"""Takes a set of corresponding AB magnitudes, uncertainties, and
passbands, and returns a dictionary with keys 'flux', 'fluxErr'
'wavelength'. Fluxes are in units of erg/s/cm^2/Angstrom, wavelength in
Angstroms. These dictionaries are the staple diet of the L{fitSEDDict}
routine.
@type ABMags: list or numpy array
@param ABMags: AB magnitudes, specified in corresponding order to passbands
and ABMagErrs
@type ABMagErrs: list or numpy array
@param ABMagErrs: AB magnitude errors, specified in corresponding order to
passbands and ABMags
@type passbands: list of L{Passband} objects
@param passbands: passband objects, specified in corresponding order to
ABMags and ABMagErrs
@rtype: dictionary
@return: dictionary with keys {'flux', 'fluxErr', 'wavelength'}, suitable
for input to L{fitSEDDict}
"""
flux = []
fluxErr = []
wavelength = []
for m, e, p in zip(ABMags, ABMagErrs, passbands):
f, err = mag2Flux(m, e, p)
flux.append(f)
fluxErr.append(err)
wavelength.append(p.effectiveWavelength())
SEDDict = {}
SEDDict['flux'] = numpy.array(flux)
SEDDict['fluxErr'] = numpy.array(fluxErr)
SEDDict['wavelength'] = numpy.array(wavelength)
return SEDDict
#-----------------------------------------------------------------------------
def mag2Flux(ABMag, ABMagErr, passband):
"""Converts given AB magnitude and uncertainty into flux, in
erg/s/cm^2/Angstrom.
@type ABMag: float
@param ABMag: magnitude on AB system in passband
@type ABMagErr: float
@param ABMagErr: uncertainty in AB magnitude in passband
@type passband: L{Passband} object
@param passband: L{Passband} object at which ABMag was measured
@rtype: list
@return: [flux, fluxError], in units of erg/s/cm^2/Angstrom
"""
fluxJy = (10**23.0) * 10**(-(ABMag + 48.6) / 2.5) # AB mag
aLambda = 3e-13 # for conversion to erg s-1 cm-2 angstrom-1 with lambda in microns
effLMicron = passband.effectiveWavelength() * (1e-10 / 1e-6)
fluxWLUnits = aLambda * fluxJy / effLMicron**2
fluxJyErr = (10**23.0) * 10**(-(ABMag - ABMagErr + 48.6) / 2.5) # AB mag
fluxWLUnitsErr = aLambda * fluxJyErr / effLMicron**2
fluxWLUnitsErr = fluxWLUnitsErr - fluxWLUnits
return [fluxWLUnits, fluxWLUnitsErr]
#-----------------------------------------------------------------------------
def flux2Mag(flux, fluxErr, passband):
"""Converts given flux and uncertainty in erg/s/cm^2/Angstrom into AB
magnitudes.
@type flux: float
@param flux: flux in erg/s/cm^2/Angstrom in passband
@type fluxErr: float
@param fluxErr: uncertainty in flux in passband, in erg/s/cm^2/Angstrom
@type passband: L{Passband} object
@param passband: L{Passband} object at which ABMag was measured
@rtype: list
@return: [ABMag, ABMagError], in AB magnitudes
"""
# aLambda = 3x10-5 for effective wavelength in angstroms
aLambda = 3e-13 # for conversion to erg s-1 cm-2 angstrom-1 with lambda in microns
effLMicron = passband.effectiveWavelength() * (1e-10 / 1e-6)
fluxJy = (flux * effLMicron**2) / aLambda
mag = -2.5 * numpy.log10(fluxJy / 10**23) - 48.6
fluxErrJy = (fluxErr * effLMicron**2) / aLambda
magErr = mag - (-2.5 * numpy.log10((fluxJy + fluxErrJy) / 10**23) - 48.6)
return [mag, magErr]
#-----------------------------------------------------------------------------
def mag2Jy(ABMag):
"""Converts an AB magnitude into flux density in Jy
@type ABMag: float
@param ABMag: AB magnitude
@rtype: float
@return: flux density in Jy
"""
fluxJy = ((10**23) * 10**(-(float(ABMag) + 48.6) / 2.5))
return fluxJy
#-----------------------------------------------------------------------------
def Jy2Mag(fluxJy):
"""Converts flux density in Jy into AB magnitude
@type fluxJy: float
@param fluxJy: flux density in Jy
@rtype: float
@return: AB magnitude
"""
ABMag = -2.5 * (numpy.log10(fluxJy) - 23.0) - 48.6
return ABMag
#-----------------------------------------------------------------------------
# Data
VEGA = VegaSED()
# AB SED has constant flux density 3631 Jy
AB = SED(wavelength=numpy.logspace(1, 8, 1e5), flux=numpy.ones(int(1e6)))
AB.flux = (3e-5 * 3631) / (AB.wavelength**2)
AB.z0flux = AB.flux[:]
# Solar SED from HST CALSPEC (http://www.stsci.edu/hst/observatory/cdbs/calspec.html)
SOL = SED()
SOL.loadFromFile(astLib.__path__[0] + os.path.sep + "data" + os.path.sep +
"sun_reference_stis_001.ascii")
| lgpl-2.1 |
wkew/FTMSVisualization | 3-HeteroClassPlotter.py | 1 | 10441 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 11:42:36 2016
@author: Will Kew
will.kew@gmail.com
Copyright Will Kew, 2016
This file is part of FTMS Visualisation (also known as i-van Krevelen).
FTMS Visualisation is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
FTMS Visualisation is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with FTMS Visualisation. If not, see <http://www.gnu.org/licenses/>.
This script will read in an assigned peaklist (example input file included) and calculate the heteroatomic class distribution.
The output is a vbar plot of heteroamtic class versus count. You can also have the calculated numbers output in a format for replotting.
This tool uses Seaborn - http://seaborn.pydata.org/
A number of (partially tested) other functions to plot output are included, though commented out.
This tool was used in our recent paper on Scotch Whisky - https://link.springer.com/article/10.1007/s13361-016-1513-y
The prompt for the user about whisky samples is thus borne from this - it also serves as an example of how to customise which classes to include.
"""
from __future__ import print_function # Python 2 compatibility
from __future__ import absolute_import # Python 2 compatibility
import os, sys
import pandas as pd
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
"""
# We import also the FTMSVizProcessingModule which contains a few useful functions.
# here we define where the scripts are stored.
# Make sure to change this to where you have saved these scripts.
"""
try: #test if running in ipython
__IPYTHON__
except NameError: #if not running in ipython....
import FTMSVizProcessingModule as FTPM
path = os.getcwd()+"data\\" #example data location
else: #if running in ipython
scriptlocation = "/LOCAL/FTMSVis/FTMSVisualization-master/"
sys.path.append(scriptlocation)
import FTMSVizProcessingModule as FTPM
path = "/LOCAL/FTMSVis/data/"
whisky = input("Are these Whisky samples - Y or N?" )
if whisky.upper() == "Y":
whisky = True
else:
whisky = False
inputpath = path +"OutputCSV/"
outputpath = path + "Images/Classes/"
FTPM.make_sure_path_exists(outputpath) #this function checks the output directory exists; if it doesnt, it creates it.
print("Looking for CSVs in " + inputpath)
filesA = os.listdir(inputpath)
filesB = []
for y in filesA:
if y[-8:] =="hits.csv" and y[-10:] != "nohits.csv" and y[-11:] !="isohits.csv":
filesB.append(y)
nfiles = len(filesB)
samplenames=[]
for x in filesB:
samplenames.append(x[:-9])
heteroclasses=[]
for z in filesB:
df1 = pd.read_csv(inputpath+z,index_col=0)
hetclas = df1["HeteroClass"]
hetclaslist = hetclas.tolist()
heteroclasses.append(hetclaslist)
heteroclasses = [item for sublist in heteroclasses for item in sublist]
hetclasset = list(set(heteroclasses))
indexlist = []
for i in samplenames:
for n in range(len(hetclasset)):
indexlist.append(i)
###This section is relevant to my whisky samples
if whisky == True:
columnnames = ["Sample","Class","WoodType","Region","Age","Peated","HeteroClass","HeteroClassCount"]
df4 = pd.read_csv(path+"SampleInfo-Dict.csv",index_col=0)
df4 = df4.T
dict4 = df4.to_dict()
outputdata = pd.DataFrame(index = range(len(indexlist)), columns=columnnames)
a = 0
for y in filesB:
df2 = pd.read_csv(inputpath+y,index_col=0)
counter = Counter(df2["HeteroClass"])
for x in counter:
outputdata.iloc[a][0] = y[:-9]
outputdata.iloc[a][1] = dict4[y[:-9]]["Class"]
outputdata.iloc[a][2] = dict4[y[:-9]]["Total Wood"]
outputdata.iloc[a][3] = dict4[y[:-9]]["Region"]
outputdata.iloc[a][4] = dict4[y[:-9]]["Age"]
outputdata.iloc[a][5] = dict4[y[:-9]]["Peated"]
outputdata.iloc[a][6] = x
outputdata.iloc[a][7] = counter[x]
a = a+1
outputdata = outputdata.dropna(how="all",axis=0)
else:
columnnames = ["Sample","Class","HeteroClass","HeteroClassCount"]
outputdata = pd.DataFrame(index = range(len(indexlist)), columns=columnnames)
a = 0
for y in filesB:
df2 = pd.read_csv(inputpath+y,index_col=0)
counter = Counter(df2["HeteroClass"])
for x in counter:
outputdata.iloc[a][0] = y[:-9]
outputdata.iloc[a][1] = y[:-9] #this is the Class variable, and should be defined as approrpriate for what you're plotting. In the case of single samples, it can be the sample name.
outputdata.iloc[a][2] = x
outputdata.iloc[a][3] = counter[x]
a = a+1
outputdata = outputdata.dropna(how="all",axis=0)
pd.to_numeric(outputdata["HeteroClassCount"],errors="raise")
saveoutputdata = input("Do you want to save the output data in a text file for later re-processing - Y or N? ")
if saveoutputdata.upper() == "Y":
outputdata.to_excel(inputpath+"HetClassByClass-longform.xlsx") #this saves the info out in a longform for plotting.
#outputdata = pd.read_excel(inputpath+"HetClassByClass-longform.xlsx") #this reads that data back in. Only necessary for manually re-running bits of script.
# This section creates a unique, naturally sorted list of heteroatom classes for plotting. Only really works for CHO formula.
# If you have exotic heteroatoms, will need to refigure this yourself, or just hardcode the order you want. easy to do in Excel.
order = outputdata["HeteroClass"].tolist()
order= list(set(order))
order.sort(key=FTPM.natural_sort_key) # this natural sort function ensures a logical order to your barplot.
if whisky == True:
CHOorder = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19"]
Fullorder = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18",
"O19","O1S1","O2S1","O3S1","O4S1","O5S1","O6S1","O7S1","O8S1","O9S1","O10S1","O11S1","O12S1"]
CHOSorder =["O1S1","O2S1","O3S1","O4S1","O5S1","O6S1","O7S1","O8S1","O9S1","O10S1","O11S1","O12S1"]
CHOSorderNew = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19","OnS"]
labels = ["O2","O3","O4","O5","O6","O7","O8","O9","O10","O11","O12","O13","O14","O15","O16","O17","O18","O19",r'O$\mathregular {_n}$S']
else:
df = outputdata
#colours = ["#a6cee3","#1f78b4","#b2df8a"] #colorblind and print friendly colours picked from http://colorbrewer2.org/
colours = ["#1b9e77","#d95f02","#7570b3"] #as above, but brighter
def barplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.barplot(x="HeteroClass",y="HeteroClassCount",hue="Class",
data=outputdata,order=order,palette=sns.color_palette(colours))
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
handles, labels = ax.get_legend_handles_labels()
if len(labels) == 1:
ax.legend_.remove()
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"Barplot.png",dpi=600,bbox_inches="tight")
fig.savefig(outputpath+"Barplot.eps",dpi=600,bbox_inches="tight")
barplot() #plots a barplot.
"""
# Here are some further examples of the Seaborn Plotting library applied to this problem.
# Most of these rely on having many samples across a small number of classes you wish to compare
def violinplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.violinplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,
order=order,
palette=sns.color_palette("bright"),
split=False,bw="silverman",scale_hue=True,scale="width",
cut=2,linewidth=1.5,inner="quartiles",saturation=1)
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
sns.despine()
fig = ax.get_figure()
locs, labels = plt.xticks()
plt.xticks(locs, labels, rotation=90)
cur_ylim = ax.get_ylim()
ax.set_ylim(0,cur_ylim[1])
fig.set_size_inches((POPM.mm2inch(171,80)), forward=True)
fig.savefig(outputpath+"violinplot-scalewidth.png",dpi=600,bbox_inches="tight")
fig.savefig(outputpath+"violinplot-scalewidth.eps",dpi=600,bbox_inches="tight")
def boxplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.boxplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"))
ax.set(xlabel='Heteroatomic Class', ylabel='Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"Boxplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
def swarmplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.swarmplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"))
ax.set(xlabel='Heteroatomic Class', ylabel='Average Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"swarmplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
def stripplot():
sns.set_style("white")
sns.set_context("paper",font_scale=2)
ax = sns.stripplot(x="HeteroClass",y="HeteroClassCount",hue="Class",data=outputdata,order=order,palette=sns.color_palette("bright"),jitter=False,split=True)
ax.set(xlabel='Heteroatomic Class', ylabel='Average Count')
sns.despine()
fig = ax.get_figure()
plt.xticks(rotation=90)
fig.set_size_inches(8, 6, forward=True)
fig.savefig(outputpath+"striplot-comparison-CHO-only.png",dpi=300,bbox_inches="tight")
"""
#EOF | gpl-3.0 |
henkhaus/wow | testing/plotter.py | 1 | 1278 | from pymongo import MongoClient
from matplotlib import pyplot as plt
import os
from datetime import datetime, date, time, timedelta
client = MongoClient()
# using wowtest.auctiondata
db = client.wowtest
posts = db.auctiondata
auctions = posts.find().limit(10)
#time.time() into datetime --->
#datetime.datetime.fromtimestamp('xxxx').strftime('%c')
def dt_to_timestamp(dt):
#timestamp = (dt - datetime(1970, 1, 1).total_seconds())
return (int(dt.strftime('%s')))
def getdata(num, quantum):
valid = []
today = datetime.combine(date.today(), time())
for i in range(num+1):
day = today - i*quantum
gte = dt_to_timestamp(day)
lt = dt_to_timestamp(day+quantum)
time_query = {'$gte':gte, '$lt':lt}
valid.insert(0, posts.find({'viewtime':time_query}).count())
return valid
def format_date(x, n):
today = datetime.combine(date.today(), time())
day = today - timedelta(hours=n-x-1)
return day.strftime('%m%d%H')
def plotbar(data, color):
plt.bar(range(len(data)), data, align='center', color=color)
# run
n = 48
val = getdata(n, timedelta(hours=1))
plotbar(val, '#4788d2')
plt.xticks(range(n), [format_date(i, n) for i in range(n)], size='small', rotation=90)
plt.grid(axis='y')
plt.show()
| apache-2.0 |
badlogicmanpreet/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/rcsetup.py | 69 | 23344 | """
The rcsetup module contains the default values and the validation code for
customization using matplotlib's rc settings.
Each rc setting is assigned a default value and a function used to validate any
attempted changes to that setting. The default values and validation functions
are defined in the rcsetup module, and are used to construct the rcParams global
object which stores the settings and is referenced throughout matplotlib.
These default values should be consistent with the default matplotlibrc file
that actually reflects the values given here. Any additions or deletions to the
parameter set listed here should also be visited to the
:file:`matplotlibrc.template` in matplotlib's root source directory.
"""
import os
import warnings
from matplotlib.fontconfig_pattern import parse_fontconfig_pattern
from matplotlib.colors import is_color_like
#interactive_bk = ['gtk', 'gtkagg', 'gtkcairo', 'fltkagg', 'qtagg', 'qt4agg',
# 'tkagg', 'wx', 'wxagg', 'cocoaagg']
# The capitalized forms are needed for ipython at present; this may
# change for later versions.
interactive_bk = ['GTK', 'GTKAgg', 'GTKCairo', 'FltkAgg', 'MacOSX',
'QtAgg', 'Qt4Agg', 'TkAgg', 'WX', 'WXAgg', 'CocoaAgg']
non_interactive_bk = ['agg', 'cairo', 'emf', 'gdk',
'pdf', 'ps', 'svg', 'template']
all_backends = interactive_bk + non_interactive_bk
class ValidateInStrings:
def __init__(self, key, valid, ignorecase=False):
'valid is a list of legal strings'
self.key = key
self.ignorecase = ignorecase
def func(s):
if ignorecase: return s.lower()
else: return s
self.valid = dict([(func(k),k) for k in valid])
def __call__(self, s):
if self.ignorecase: s = s.lower()
if s in self.valid: return self.valid[s]
raise ValueError('Unrecognized %s string "%s": valid strings are %s'
% (self.key, s, self.valid.values()))
def validate_path_exists(s):
'If s is a path, return s, else False'
if os.path.exists(s): return s
else:
raise RuntimeError('"%s" should be a path but it does not exist'%s)
def validate_bool(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_bool_maybe_none(b):
'Convert b to a boolean or raise'
if type(b) is str:
b = b.lower()
if b=='none': return None
if b in ('t', 'y', 'yes', 'on', 'true', '1', 1, True): return True
elif b in ('f', 'n', 'no', 'off', 'false', '0', 0, False): return False
else:
raise ValueError('Could not convert "%s" to boolean' % b)
def validate_float(s):
'convert s to float or raise'
try: return float(s)
except ValueError:
raise ValueError('Could not convert "%s" to float' % s)
def validate_int(s):
'convert s to int or raise'
try: return int(s)
except ValueError:
raise ValueError('Could not convert "%s" to int' % s)
def validate_fonttype(s):
'confirm that this is a Postscript of PDF font type that we know how to convert to'
fonttypes = { 'type3': 3,
'truetype': 42 }
try:
fonttype = validate_int(s)
except ValueError:
if s.lower() in fonttypes.keys():
return fonttypes[s.lower()]
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.keys())
else:
if fonttype not in fonttypes.values():
raise ValueError('Supported Postscript/PDF font types are %s' % fonttypes.values())
return fonttype
#validate_backend = ValidateInStrings('backend', all_backends, ignorecase=True)
_validate_standard_backends = ValidateInStrings('backend', all_backends, ignorecase=True)
def validate_backend(s):
if s.startswith('module://'): return s
else: return _validate_standard_backends(s)
validate_numerix = ValidateInStrings('numerix',[
'Numeric','numarray','numpy',
], ignorecase=True)
validate_toolbar = ValidateInStrings('toolbar',[
'None','classic','toolbar2',
], ignorecase=True)
def validate_autolayout(v):
if v:
warnings.warn("figure.autolayout is not currently supported")
class validate_nseq_float:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n floats or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [float(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to floats')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [float(val) for val in s]
class validate_nseq_int:
def __init__(self, n):
self.n = n
def __call__(self, s):
'return a seq of n ints or raise'
if type(s) is str:
ss = s.split(',')
if len(ss) != self.n:
raise ValueError('You must supply exactly %d comma separated values'%self.n)
try:
return [int(val) for val in ss]
except ValueError:
raise ValueError('Could not convert all entries to ints')
else:
assert type(s) in (list,tuple)
if len(s) != self.n:
raise ValueError('You must supply exactly %d values'%self.n)
return [int(val) for val in s]
def validate_color(s):
'return a valid color arg'
if s.lower() == 'none':
return 'None'
if is_color_like(s):
return s
stmp = '#' + s
if is_color_like(stmp):
return stmp
# If it is still valid, it must be a tuple.
colorarg = s
msg = ''
if s.find(',')>=0:
# get rid of grouping symbols
stmp = ''.join([ c for c in s if c.isdigit() or c=='.' or c==','])
vals = stmp.split(',')
if len(vals)!=3:
msg = '\nColor tuples must be length 3'
else:
try:
colorarg = [float(val) for val in vals]
except ValueError:
msg = '\nCould not convert all entries to floats'
if not msg and is_color_like(colorarg):
return colorarg
raise ValueError('%s does not look like a color arg%s'%(s, msg))
def validate_stringlist(s):
'return a list'
if type(s) is str:
return [ v.strip() for v in s.split(',') ]
else:
assert type(s) in [list,tuple]
return [ str(v) for v in s ]
validate_orientation = ValidateInStrings('orientation',[
'landscape', 'portrait',
])
def validate_aspect(s):
if s in ('auto', 'equal'):
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid aspect specification')
def validate_fontsize(s):
if type(s) is str:
s = s.lower()
if s in ['xx-small', 'x-small', 'small', 'medium', 'large', 'x-large',
'xx-large', 'smaller', 'larger']:
return s
try:
return float(s)
except ValueError:
raise ValueError('not a valid font size')
def validate_font_properties(s):
parse_fontconfig_pattern(s)
return s
validate_fontset = ValidateInStrings('fontset', ['cm', 'stix', 'stixsans', 'custom'])
validate_verbose = ValidateInStrings('verbose',[
'silent', 'helpful', 'debug', 'debug-annoying',
])
validate_cairo_format = ValidateInStrings('cairo_format',
['png', 'ps', 'pdf', 'svg'],
ignorecase=True)
validate_ps_papersize = ValidateInStrings('ps_papersize',[
'auto', 'letter', 'legal', 'ledger',
'a0', 'a1', 'a2','a3', 'a4', 'a5', 'a6', 'a7', 'a8', 'a9', 'a10',
'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8', 'b9', 'b10',
], ignorecase=True)
def validate_ps_distiller(s):
if type(s) is str:
s = s.lower()
if s in ('none',None):
return None
elif s in ('false', False):
return False
elif s in ('ghostscript', 'xpdf'):
return s
else:
raise ValueError('matplotlibrc ps.usedistiller must either be none, ghostscript or xpdf')
validate_joinstyle = ValidateInStrings('joinstyle',['miter', 'round', 'bevel'], ignorecase=True)
validate_capstyle = ValidateInStrings('capstyle',['butt', 'round', 'projecting'], ignorecase=True)
validate_negative_linestyle = ValidateInStrings('negative_linestyle',['solid', 'dashed'], ignorecase=True)
def validate_negative_linestyle_legacy(s):
try:
res = validate_negative_linestyle(s)
return res
except ValueError:
dashes = validate_nseq_float(2)(s)
warnings.warn("Deprecated negative_linestyle specification; use 'solid' or 'dashed'")
return (0, dashes) # (offset, (solid, blank))
validate_legend_loc = ValidateInStrings('legend_loc',[
'best',
'upper right',
'upper left',
'lower left',
'lower right',
'right',
'center left',
'center right',
'lower center',
'upper center',
'center',
], ignorecase=True)
class ValidateInterval:
"""
Value must be in interval
"""
def __init__(self, vmin, vmax, closedmin=True, closedmax=True):
self.vmin = vmin
self.vmax = vmax
self.cmin = closedmin
self.cmax = closedmax
def __call__(self, s):
try: s = float(s)
except: raise RuntimeError('Value must be a float; found "%s"'%s)
if self.cmin and s<self.vmin:
raise RuntimeError('Value must be >= %f; found "%f"'%(self.vmin, s))
elif not self.cmin and s<=self.vmin:
raise RuntimeError('Value must be > %f; found "%f"'%(self.vmin, s))
if self.cmax and s>self.vmax:
raise RuntimeError('Value must be <= %f; found "%f"'%(self.vmax, s))
elif not self.cmax and s>=self.vmax:
raise RuntimeError('Value must be < %f; found "%f"'%(self.vmax, s))
return s
# a map from key -> value, converter
defaultParams = {
'backend' : ['Agg', validate_backend], # agg is certainly present
'backend_fallback' : [True, validate_bool], # agg is certainly present
'numerix' : ['numpy', validate_numerix],
'maskedarray' : [False, validate_bool],
'toolbar' : ['toolbar2', validate_toolbar],
'datapath' : [None, validate_path_exists], # handled by _get_data_path_cached
'units' : [False, validate_bool],
'interactive' : [False, validate_bool],
'timezone' : ['UTC', str],
# the verbosity setting
'verbose.level' : ['silent', validate_verbose],
'verbose.fileo' : ['sys.stdout', str],
# line props
'lines.linewidth' : [1.0, validate_float], # line width in points
'lines.linestyle' : ['-', str], # solid line
'lines.color' : ['b', validate_color], # blue
'lines.marker' : ['None', str], # black
'lines.markeredgewidth' : [0.5, validate_float],
'lines.markersize' : [6, validate_float], # markersize, in points
'lines.antialiased' : [True, validate_bool], # antialised (no jaggies)
'lines.dash_joinstyle' : ['miter', validate_joinstyle],
'lines.solid_joinstyle' : ['miter', validate_joinstyle],
'lines.dash_capstyle' : ['butt', validate_capstyle],
'lines.solid_capstyle' : ['projecting', validate_capstyle],
# patch props
'patch.linewidth' : [1.0, validate_float], # line width in points
'patch.edgecolor' : ['k', validate_color], # black
'patch.facecolor' : ['b', validate_color], # blue
'patch.antialiased' : [True, validate_bool], # antialised (no jaggies)
# font props
'font.family' : ['sans-serif', str], # used by text object
'font.style' : ['normal', str], #
'font.variant' : ['normal', str], #
'font.stretch' : ['normal', str], #
'font.weight' : ['normal', str], #
'font.size' : [12.0, validate_float], #
'font.serif' : [['Bitstream Vera Serif', 'DejaVu Serif',
'New Century Schoolbook', 'Century Schoolbook L',
'Utopia', 'ITC Bookman', 'Bookman',
'Nimbus Roman No9 L','Times New Roman',
'Times','Palatino','Charter','serif'],
validate_stringlist],
'font.sans-serif' : [['Bitstream Vera Sans', 'DejaVu Sans',
'Lucida Grande', 'Verdana', 'Geneva', 'Lucid',
'Arial', 'Helvetica', 'Avant Garde', 'sans-serif'],
validate_stringlist],
'font.cursive' : [['Apple Chancery','Textile','Zapf Chancery',
'Sand','cursive'], validate_stringlist],
'font.fantasy' : [['Comic Sans MS','Chicago','Charcoal','Impact'
'Western','fantasy'], validate_stringlist],
'font.monospace' : [['Bitstream Vera Sans Mono', 'DejaVu Sans Mono',
'Andale Mono', 'Nimbus Mono L', 'Courier New',
'Courier','Fixed', 'Terminal','monospace'],
validate_stringlist],
# text props
'text.color' : ['k', validate_color], # black
'text.usetex' : [False, validate_bool],
'text.latex.unicode' : [False, validate_bool],
'text.latex.preamble' : [[''], validate_stringlist],
'text.dvipnghack' : [None, validate_bool_maybe_none],
'text.fontstyle' : ['normal', str],
'text.fontangle' : ['normal', str],
'text.fontvariant' : ['normal', str],
'text.fontweight' : ['normal', str],
'text.fontsize' : ['medium', validate_fontsize],
'mathtext.cal' : ['cursive', validate_font_properties],
'mathtext.rm' : ['serif', validate_font_properties],
'mathtext.tt' : ['monospace', validate_font_properties],
'mathtext.it' : ['serif:italic', validate_font_properties],
'mathtext.bf' : ['serif:bold', validate_font_properties],
'mathtext.sf' : ['sans\-serif', validate_font_properties],
'mathtext.fontset' : ['cm', validate_fontset],
'mathtext.fallback_to_cm' : [True, validate_bool],
'image.aspect' : ['equal', validate_aspect], # equal, auto, a number
'image.interpolation' : ['bilinear', str],
'image.cmap' : ['jet', str], # one of gray, jet, etc
'image.lut' : [256, validate_int], # lookup table
'image.origin' : ['upper', str], # lookup table
'image.resample' : [False, validate_bool],
'contour.negative_linestyle' : ['dashed', validate_negative_linestyle_legacy],
# axes props
'axes.axisbelow' : [False, validate_bool],
'axes.hold' : [True, validate_bool],
'axes.facecolor' : ['w', validate_color], # background color; white
'axes.edgecolor' : ['k', validate_color], # edge color; black
'axes.linewidth' : [1.0, validate_float], # edge linewidth
'axes.titlesize' : ['large', validate_fontsize], # fontsize of the axes title
'axes.grid' : [False, validate_bool], # display grid or not
'axes.labelsize' : ['medium', validate_fontsize], # fontsize of the x any y labels
'axes.labelcolor' : ['k', validate_color], # color of axis label
'axes.formatter.limits' : [[-7, 7], validate_nseq_int(2)],
# use scientific notation if log10
# of the axis range is smaller than the
# first or larger than the second
'axes.unicode_minus' : [True, validate_bool],
'polaraxes.grid' : [True, validate_bool], # display polar grid or not
#legend properties
'legend.fancybox' : [False,validate_bool],
'legend.loc' : ['upper right',validate_legend_loc], # at some point, this should be changed to 'best'
'legend.isaxes' : [True,validate_bool], # this option is internally ignored - it never served any useful purpose
'legend.numpoints' : [2, validate_int], # the number of points in the legend line
'legend.fontsize' : ['large', validate_fontsize],
'legend.pad' : [0, validate_float], # was 0.2, deprecated; the fractional whitespace inside the legend border
'legend.borderpad' : [0.4, validate_float], # units are fontsize
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.02, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
'legend.labelspacing' : [0.5, validate_float], # the vertical space between the legend entries
'legend.handlelength' : [2., validate_float], # the length of the legend lines
'legend.handletextpad' : [.8, validate_float], # the space between the legend line and legend text
'legend.borderaxespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.columnspacing' : [2., validate_float], # the border between the axes and legend edge
'legend.markerscale' : [1.0, validate_float], # the relative size of legend markers vs. original
# the following dimensions are in axes coords
'legend.labelsep' : [0.010, validate_float], # the vertical space between the legend entries
'legend.handlelen' : [0.05, validate_float], # the length of the legend lines
'legend.handletextsep' : [0.02, validate_float], # the space between the legend line and legend text
'legend.axespad' : [0.5, validate_float], # the border between the axes and legend edge
'legend.shadow' : [False, validate_bool],
# tick properties
'xtick.major.size' : [4, validate_float], # major xtick size in points
'xtick.minor.size' : [2, validate_float], # minor xtick size in points
'xtick.major.pad' : [4, validate_float], # distance to label in points
'xtick.minor.pad' : [4, validate_float], # distance to label in points
'xtick.color' : ['k', validate_color], # color of the xtick labels
'xtick.labelsize' : ['medium', validate_fontsize], # fontsize of the xtick labels
'xtick.direction' : ['in', str], # direction of xticks
'ytick.major.size' : [4, validate_float], # major ytick size in points
'ytick.minor.size' : [2, validate_float], # minor ytick size in points
'ytick.major.pad' : [4, validate_float], # distance to label in points
'ytick.minor.pad' : [4, validate_float], # distance to label in points
'ytick.color' : ['k', validate_color], # color of the ytick labels
'ytick.labelsize' : ['medium', validate_fontsize], # fontsize of the ytick labels
'ytick.direction' : ['in', str], # direction of yticks
'grid.color' : ['k', validate_color], # grid color
'grid.linestyle' : [':', str], # dotted
'grid.linewidth' : [0.5, validate_float], # in points
# figure props
# figure size in inches: width by height
'figure.figsize' : [ [8.0,6.0], validate_nseq_float(2)],
'figure.dpi' : [ 80, validate_float], # DPI
'figure.facecolor' : [ '0.75', validate_color], # facecolor; scalar gray
'figure.edgecolor' : [ 'w', validate_color], # edgecolor; white
'figure.autolayout' : [ False, validate_autolayout],
'figure.subplot.left' : [0.125, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.right' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.bottom' : [0.1, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.top' : [0.9, ValidateInterval(0, 1, closedmin=True, closedmax=True)],
'figure.subplot.wspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'figure.subplot.hspace' : [0.2, ValidateInterval(0, 1, closedmin=True, closedmax=False)],
'savefig.dpi' : [100, validate_float], # DPI
'savefig.facecolor' : ['w', validate_color], # facecolor; white
'savefig.edgecolor' : ['w', validate_color], # edgecolor; white
'savefig.orientation' : ['portrait', validate_orientation], # edgecolor; white
'cairo.format' : ['png', validate_cairo_format],
'tk.window_focus' : [False, validate_bool], # Maintain shell focus for TkAgg
'tk.pythoninspect' : [False, validate_bool], # Set PYTHONINSPECT
'ps.papersize' : ['letter', validate_ps_papersize], # Set the papersize/type
'ps.useafm' : [False, validate_bool], # Set PYTHONINSPECT
'ps.usedistiller' : [False, validate_ps_distiller], # use ghostscript or xpdf to distill ps output
'ps.distiller.res' : [6000, validate_int], # dpi
'ps.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'pdf.compression' : [6, validate_int], # compression level from 0 to 9; 0 to disable
'pdf.inheritcolor' : [False, validate_bool], # ignore any color-setting commands from the frontend
'pdf.use14corefonts' : [False, validate_bool], # use only the 14 PDF core fonts
# embedded in every PDF viewing application
'pdf.fonttype' : [3, validate_fonttype], # 3 (Type3) or 42 (Truetype)
'svg.image_inline' : [True, validate_bool], # write raster image data directly into the svg file
'svg.image_noscale' : [False, validate_bool], # suppress scaling of raster data embedded in SVG
'svg.embed_char_paths' : [True, validate_bool], # True to save all characters as paths in the SVG
'docstring.hardcopy' : [False, validate_bool], # set this when you want to generate hardcopy docstring
'plugins.directory' : ['.matplotlib_plugins', str], # where plugin directory is locate
'path.simplify' : [False, validate_bool],
'agg.path.chunksize' : [0, validate_int] # 0 to disable chunking;
# recommend about 20000 to
# enable. Experimental.
}
if __name__ == '__main__':
rc = defaultParams
rc['datapath'][0] = '/'
for key in rc:
if not rc[key][1](rc[key][0]) == rc[key][0]:
print "%s: %s != %s"%(key, rc[key][1](rc[key][0]), rc[key][0])
| agpl-3.0 |
williamdlees/TRIgS | PlotIdentity.py | 2 | 6306 | # The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Using BLAST, create a CSV file that lists the % identity of the specified sequence to all sequences from the specified germline
__author__ = 'William Lees'
__docformat__ = "restructuredtext en"
import os.path
import sys
import argparse
import csv
import re
import subprocess
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio import pairwise2
from Bio.Alphabet import generic_nucleotide
from Bio import SeqIO
from Bio import Phylo
from itertools import izip
def main(argv):
parser = argparse.ArgumentParser(description='Create an Identity/Divergence plot.')
parser.add_argument('repertoire', help='file containing repertoire sequence identities (CSV)')
parser.add_argument('-a', '--adjust', help='Adjust labels to prevent overlap (requires package adjustText)', action='store_true')
parser.add_argument('-b', '--bar', help='Plot a colour bar', action='store_true')
parser.add_argument('-c', '--colourmap', help='colourmap')
parser.add_argument('-g', '--background', help='Set the contour colourwhere the density is zero')
parser.add_argument('-mx', '--maxx', help='max divergence value to show')
parser.add_argument('-my', '--miny', help='min identity value to show')
parser.add_argument('-p', '--points', help='comma-seperated list of identity files and formats')
parser.add_argument('-s', '--save', help='Save output to file (as opposed to interactive display)')
args = parser.parse_args()
if args.adjust:
from adjustText import adjust_text
colourmap = args.colourmap if args.colourmap else 'hot_r'
plist = args.points.split(',')
points = []
repertoire = read_file(args.repertoire)
def pairwise(iterable):
a = iter(iterable)
return izip(a, a)
if len(plist) > 0:
try:
for file, format in pairwise(plist):
points.append((read_file(file), format))
except IOError:
print 'file "%s" cannot be opened.' % file
except:
print '"points" must consist of pairs of files and formats.'
quit()
max_divergence = int(args.maxx) if args.maxx else None
min_identity = int(args.miny) if args.miny else None
savefile = args.save if args.save else None
if not max_divergence:
max_divergence = max(repertoire['GermlineDist'])
for point in points:
max_divergence = max(max_divergence, max(point[0]['GermlineDist']))
max_divergence = int(max_divergence) + 1
if not min_identity:
min_identity = min(repertoire['TargetDist'])
for point in points:
min_identity = min(min_identity, min(point[0]['TargetDist']))
min_identity = int(min_identity)
H, yedges, xedges = np.histogram2d(repertoire['TargetDist'], repertoire['GermlineDist'], bins=[101-min_identity, max_divergence+1], range=[[min_identity, 101], [-1, max_divergence]], normed=False)
# For alternative interpolations and plots, see http://docs.scipy.org/doc/numpy/reference/generated/numpy.histogram2d.html
# For colour maps, see http://matplotlib.org/examples/color/colormaps_reference.html
fig = plt.figure()
cm = plt.cm.get_cmap(colourmap)
if args.background:
cm.set_under(color=args.background)
ax = fig.add_subplot(1,1,1)
im = plt.imshow(H, interpolation='bilinear', origin='low', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], vmin=0.0000001, cmap=cm)
ax.set_xlim(xedges[0], xedges[-1])
ax.set_ylim(yedges[0], yedges[-1])
if args.bar:
cb = plt.colorbar(im, shrink=0.8, extend='neither')
cb.ax.set_ylabel('sequences', rotation=90)
texts = []
for point in points:
df, format = point
markersize = 5
label = False
labelcolour = 'black'
fmt = format.split('/')
format = fmt[0]
for f in fmt[1:]:
if f[0] == 'm':
markersize = int(f[1:])
elif f[0] == 'l':
label = True
if len(f) > 1:
labelcolour = f[1:]
else:
print 'Unrecognised format string: %s' % format
for index, row in df.iterrows():
if label:
if args.adjust:
texts.append(plt.text(row['GermlineDist'], row['TargetDist'], row['SequenceId'], bbox={'pad':0, 'alpha':0}, fontdict={ 'color': labelcolour}))
else:
texts.append(plt.text(row['GermlineDist'] + 0.2, row['TargetDist'] - 0.2, row['SequenceId'], bbox={'pad':0, 'alpha':0}, fontdict={ 'color': labelcolour}))
ax.plot(row['GermlineDist'], row['TargetDist'], format, markersize=markersize)
if args.adjust:
adjust_text(texts)
plt.xlabel('Germline Divergence (%)')
plt.ylabel('Target Ab Identity (%)')
if savefile:
plt.savefig(savefile)
else:
plt.show()
def read_file(file):
df = pd.read_csv(file, converters={'SequenceId': lambda x: x})
for key in ("SequenceId", "TargetDist", "GermlineDist"):
if key not in df.keys():
print 'File %s does not contain a column "%s"' % (file, key)
quit()
for index, row in df.iterrows():
try:
x = row[1] * row[2] # check they behave like numbers
except:
print 'Error in file %s: malformed row at %s.' % (file, row[0])
if len(df) < 1:
print '%s: empty file.' % file
quit()
return df
if __name__=="__main__":
main(sys.argv)
| mit |
cuemacro/chartpy | chartpy_examples/subplot_example.py | 1 | 2359 | __author__ = 'saeedamen' # Saeed Amen
#
# Copyright 2016 Cuemacro
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas
# support Quandl 3.x.x
try:
import quandl as Quandl
except:
# if import fails use Quandl 2.x.x
import Quandl
from chartpy import Chart, Style
# get your own free bQuandl API key from https://www.quandl.com/
try:
from chartpy.chartcred import ChartCred
cred = ChartCred()
quandl_api_key = cred.quandl_api_key
except:
quandl_api_key = "x"
# choose run_example = 0 for everything
# run_example = 1 - plot US GDP QoQ (real) and nominal with Plotly/Bokeh/Matplotlib with subplots for each line
# run_example = 2 - plot US GDP QoQ (real + nominal) in two double plots (passing an array of dataframes)
run_example = 0
if run_example == 1 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA", "FRED/A191RP1Q027SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ", "Nominal QoQ"]
# set the style of the plot
style = Style(title="US GDP", source="Quandl/Fred", subplots=True)
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style)
chart.plot(engine='matplotlib')
chart.plot(engine='bokeh')
chart.plot(engine='plotly')
if run_example == 2 or run_example == 0:
df = Quandl.get(["FRED/A191RL1Q225SBEA", "FRED/A191RP1Q027SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ", "Nominal QoQ"]
df = [df, df]
# set the style of the plot
style = Style(title="US GDP double plot", source="Quandl/Fred", subplots=True)
# Chart object is initialised with the dataframe and our chart style
chart = Chart(df=df, chart_type='line', style=style)
chart.plot(engine='bokeh')
chart.plot(engine='matplotlib')
chart.plot(engine='plotly') # TODO fix legends though
| apache-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/skimage/viewer/utils/core.py | 19 | 6555 | import numpy as np
from ..qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
from ..._shared.utils import warn
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| mit |
ilyes14/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
timmie/cartopy | lib/cartopy/mpl/ticker.py | 3 | 10493 | # (C) British Crown Copyright 2014 - 2016, Met Office
#
# This file is part of cartopy.
#
# cartopy is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# cartopy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with cartopy. If not, see <https://www.gnu.org/licenses/>.
"""This module contains tools for handling tick marks in cartopy."""
from __future__ import (absolute_import, division, print_function)
from matplotlib.ticker import Formatter
import cartopy.crs as ccrs
from cartopy.mpl.geoaxes import GeoAxes
class _PlateCarreeFormatter(Formatter):
"""
Base class for formatting ticks on geographical axes using a
rectangular projection (e.g. Plate Carree, Mercator).
"""
_target_projection = ccrs.PlateCarree()
def __init__(self, degree_symbol=u'\u00B0', number_format='g',
transform_precision=1e-8):
"""
Base class for simpler implementation of specialised formatters
for latitude and longitude axes.
"""
self._degree_symbol = degree_symbol
self._number_format = number_format
self._transform_precision = transform_precision
def __call__(self, value, pos=None):
if not isinstance(self.axis.axes, GeoAxes):
raise TypeError("This formatter can only be "
"used with cartopy axes.")
# We want to produce labels for values in the familiar Plate Carree
# projection, so convert the tick values from their own projection
# before formatting them.
source = self.axis.axes.projection
if not isinstance(source, (ccrs._RectangularProjection,
ccrs.Mercator)):
raise TypeError("This formatter cannot be used with "
"non-rectangular projections.")
projected_value = self._apply_transform(value, self._target_projection,
source)
# Round the transformed value using a given precision for display
# purposes. Transforms can introduce minor rounding errors that make
# the tick values look bad, these need to be accounted for.
f = 1. / self._transform_precision
projected_value = round(f * projected_value) / f
# Return the formatted values, the formatter has both the re-projected
# tick value and the original axis value available to it.
return self._format_value(projected_value, value)
def _format_value(self, value, original_value):
hemisphere = self._hemisphere(value, original_value)
fmt_string = u'{value:{number_format}}{degree}{hemisphere}'
return fmt_string.format(value=abs(value),
number_format=self._number_format,
degree=self._degree_symbol,
hemisphere=hemisphere)
def _apply_transform(self, value, target_proj, source_crs):
"""
Given a single value, a target projection and a source CRS,
transforms the value from the source CRS to the target
projection, returning a single value.
"""
raise NotImplementedError("A subclass must implement this method.")
def _hemisphere(self, value, value_source_crs):
"""
Given both a tick value in the Plate Carree projection and the
same value in the source CRS returns a string indicating the
hemisphere that the value is in.
Must be over-ridden by the derived class.
"""
raise NotImplementedError("A subclass must implement this method.")
class LatitudeFormatter(_PlateCarreeFormatter):
"""Tick formatter for latitude axes."""
def __init__(self, degree_symbol=u'\u00B0', number_format='g',
transform_precision=1e-8):
"""
Tick formatter for a latitude axis.
The axis must be part of an axes defined on a rectangular
projection (e.g. Plate Carree, Mercator).
.. note::
A formatter can only be used for one axis. A new formatter
must be created for every axis that needs formatted labels.
Kwargs:
* degree_symbol (string):
The character(s) used to represent the degree symbol in the
tick labels. Defaults to u'\u00B0' which is the unicode
degree symbol. Can be an empty string if no degree symbol is
desired.
* number_format (string):
Format string to represent the tick values. Defaults to 'g'.
* transform_precision (float):
Sets the precision (in degrees) to which transformed tick
values are rounded. The default is 1e-7, and should be
suitable for most use cases. To control the appearance of
tick labels use the *number_format* keyword.
Examples:
Label latitudes from -90 to 90 on a Plate Carree projection::
ax = plt.axes(projection=PlateCarree())
ax.set_global()
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90],
crs=ccrs.PlateCarree())
lat_formatter = LatitudeFormatter()
ax.yaxis.set_major_formatter(lat_formatter)
Label latitudes from -80 to 80 on a Mercator projection, this
time omitting the degree symbol::
ax = plt.axes(projection=Mercator())
ax.set_global()
ax.set_yticks([-90, -60, -30, 0, 30, 60, 90],
crs=ccrs.PlateCarree())
lat_formatter = LatitudeFormatter(degree_symbol='')
ax.yaxis.set_major_formatter(lat_formatter)
"""
super(LatitudeFormatter, self).__init__(
degree_symbol=degree_symbol,
number_format=number_format,
transform_precision=transform_precision)
def _apply_transform(self, value, target_proj, source_crs):
return target_proj.transform_point(0, value, source_crs)[1]
def _hemisphere(self, value, value_source_crs):
if value > 0:
hemisphere = 'N'
elif value < 0:
hemisphere = 'S'
else:
hemisphere = ''
return hemisphere
class LongitudeFormatter(_PlateCarreeFormatter):
"""Tick formatter for a longitude axis."""
def __init__(self,
zero_direction_label=False,
dateline_direction_label=False,
degree_symbol=u'\u00B0',
number_format='g',
transform_precision=1e-8):
"""
Create a formatter for longitude values.
The axis must be part of an axes defined on a rectangular
projection (e.g. Plate Carree, Mercator).
.. note::
A formatter can only be used for one axis. A new formatter
must be created for every axis that needs formatted labels.
Kwargs:
* zero_direction_label (False | True):
If *True* a direction label (E or W) will be drawn next to
longitude labels with the value 0. If *False* then these
labels will not be drawn. Defaults to *False* (no direction
labels).
* dateline_direction_label (False | True):
If *True* a direction label (E or W) will be drawn next to
longitude labels with the value 180. If *False* then these
labels will not be drawn. Defaults to *False* (no direction
labels).
* degree_symbol (string):
The symbol used to represent degrees. Defaults to u'\u00B0'
which is the unicode degree symbol.
* number_format (string):
Format string to represent the longitude values. Defaults to
'g'.
* transform_precision (float):
Sets the precision (in degrees) to which transformed tick
values are rounded. The default is 1e-7, and should be
suitable for most use cases. To control the appearance of
tick labels use the *number_format* keyword.
Examples:
Label longitudes from -180 to 180 on a Plate Carree projection
with a central longitude of 0::
ax = plt.axes(projection=PlateCarree())
ax.set_global()
ax.set_xticks([-180, -120, -60, 0, 60, 120, 180],
crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
Label longitudes from 0 to 360 on a Plate Carree projection
with a central longitude of 180::
ax = plt.axes(projection=PlateCarree(central_longitude=180))
ax.set_global()
ax.set_xticks([0, 60, 120, 180, 240, 300, 360],
crs=ccrs.PlateCarree())
ont_formatter = LongitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
"""
super(LongitudeFormatter, self).__init__(
degree_symbol=degree_symbol,
number_format=number_format,
transform_precision=transform_precision)
self._zero_direction_labels = zero_direction_label
self._dateline_direction_labels = dateline_direction_label
def _apply_transform(self, value, target_proj, source_crs):
return target_proj.transform_point(value, 0, source_crs)[0]
def _hemisphere(self, value, value_source_crs):
# Perform basic hemisphere detection.
if value < 0:
hemisphere = 'W'
elif value > 0:
hemisphere = 'E'
else:
hemisphere = ''
# Correct for user preferences:
if value == 0 and self._zero_direction_labels:
# Use the original tick value to determine the hemisphere.
if value_source_crs < 0:
hemisphere = 'E'
else:
hemisphere = 'W'
if value in (-180, 180) and not self._dateline_direction_labels:
hemisphere = ''
return hemisphere
| gpl-3.0 |
ifcharming/voltdb2.1 | tools/vis.py | 1 | 5697 | #!/usr/bin/env python
# This is a visualizer which pulls TPC-C benchmark results from the MySQL
# databases and visualizes them. Four graphs will be generated, latency graph on
# sinigle node and multiple nodes, and throughput graph on single node and
# multiple nodes.
#
# Run it without any arguments to see what arguments are needed.
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))) +
os.sep + 'tests/scripts/')
import time
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from voltdbclient import *
STATS_SERVER = 'volt2'
def COLORS(k):
return (((k ** 3) % 255) / 255.0,
((k * 100) % 255) / 255.0,
((k * k) % 255) / 255.0)
MARKERS = ['+', '*', '<', '>', '^', '_',
'D', 'H', 'd', 'h', 'o', 'p']
def get_stats(hostname, port, days):
"""Get statistics of all runs
Example return value:
{ u'VoltKV': [ { 'lat95': 21,
'lat99': 35,
'nodes': 1,
'throughput': 104805,
'date': datetime object}],
u'Voter': [ { 'lat95': 20,
'lat99': 47,
'nodes': 1,
'throughput': 66287,
'date': datetime object}]}
"""
conn = FastSerializer(hostname, port)
proc = VoltProcedure(conn, 'BestOfPeriod',
[FastSerializer.VOLTTYPE_SMALLINT])
resp = proc.call([days])
conn.close()
# keyed on app name, value is a list of runs sorted chronologically
stats = dict()
run_stat_keys = ['nodes', 'date', 'tps', 'lat95', 'lat99']
for row in resp.tables[0].tuples:
app_stats = []
if row[0] not in stats:
stats[row[0]] = app_stats
else:
app_stats = stats[row[0]]
run_stats = dict(zip(run_stat_keys, row[1:]))
app_stats.append(run_stats)
# sort each one
for app_stats in stats.itervalues():
app_stats.sort(key=lambda x: x['date'])
return stats
class Plot:
DPI = 100.0
def __init__(self, title, xlabel, ylabel, filename, w, h):
self.filename = filename
self.legends = {}
w = w == None and 800 or w
h = h == None and 300 or h
fig = plt.figure(figsize=(w / self.DPI, h / self.DPI),
dpi=self.DPI)
self.ax = fig.add_subplot(111)
self.ax.set_title(title)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.ylabel(ylabel, fontsize=8)
plt.xlabel(xlabel, fontsize=8)
fig.autofmt_xdate()
def plot(self, x, y, color, marker_shape, legend):
self.ax.plot(x, y, linestyle="-", label=str(legend),
marker=marker_shape, markerfacecolor=color, markersize=4)
def close(self):
formatter = matplotlib.dates.DateFormatter("%b %d")
self.ax.xaxis.set_major_formatter(formatter)
plt.legend(prop={'size': 10}, loc=0)
plt.savefig(self.filename, format="png", transparent=False,
bbox_inches="tight", pad_inches=0.2)
def plot(title, xlabel, ylabel, filename, nodes, width, height, data,
data_type):
plot_data = dict()
for app, runs in data.iteritems():
for v in runs:
if v['nodes'] != nodes:
continue
if app not in plot_data:
plot_data[app] = {'time': [], data_type: []}
datenum = matplotlib.dates.date2num(v['date'])
plot_data[app]['time'].append(datenum)
if data_type == 'tps':
value = v['tps']/v['nodes']
else:
value = v[data_type]
plot_data[app][data_type].append(value)
if len(plot_data) == 0:
return
i = 0
pl = Plot(title, xlabel, ylabel, filename, width, height)
sorted_data = sorted(plot_data.items(), key=lambda x: x[0])
for k, v in sorted_data:
pl.plot(v['time'], v[data_type], COLORS(i), MARKERS[i], k)
i += 3
pl.close()
def usage():
print "Usage:"
print "\t", sys.argv[0], "output_dir filename_base" \
" [width] [height]"
print
print "\t", "width in pixels"
print "\t", "height in pixels"
def main():
if len(sys.argv) < 3:
usage()
exit(-1)
if not os.path.exists(sys.argv[1]):
print sys.argv[2], "does not exist"
exit(-1)
path = os.path.join(sys.argv[1], sys.argv[2])
width = None
height = None
if len(sys.argv) >= 4:
width = int(sys.argv[3])
if len(sys.argv) >= 5:
height = int(sys.argv[4])
stats = get_stats(STATS_SERVER, 21212, 30)
# Plot single node stats for all apps
plot("Average Latency on Single Node", "Time", "Latency (ms)",
path + "-latency-single.png", 1, width, height, stats, 'lat99')
plot("Single Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-single.png", 1, width, height, stats, 'tps')
# Plot 3 node stats for all apps
plot("Average Latency on 3 Nodes", "Time", "Latency (ms)",
path + "-latency-3.png", 3, width, height, stats, 'lat99')
plot("3 Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-3.png", 3, width, height, stats, 'tps')
# Plot 6 node stats for all apps
plot("Average Latency on 6 Node", "Time", "Latency (ms)",
path + "-latency-6.png", 6, width, height, stats, 'lat99')
plot("6 Node Performance", "Time", "Throughput (txns/sec)",
path + "-throughput-6.png", 6, width, height, stats, 'tps')
if __name__ == "__main__":
main()
| gpl-3.0 |
gdetor/SI-RF-Structure | Statistics/clear_data.py | 1 | 5369 | # Copyright (c) 2014, Georgios Is. Detorakis (gdetor@gmail.com) and
# Nicolas P. Rougier (nicolas.rougier@inria.fr)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file is part of the source code accompany the peer-reviewed article:
# [1] "Structure of Receptive Fields in a Computational Model of Area 3b of
# Primary Sensory Cortex", Georgios Is. Detorakis and Nicolas P. Rougier,
# Frontiers in Computational Neuroscience, 2014.
#
# This script applies all the filters and cleaning techniques to the ncRFs. You
# have to use this script before any further statistical analysis to the data.
import numpy as np
from matplotlib import rc
import matplotlib.pylab as plt
from scipy.stats.stats import pearsonr
from scipy.stats.mstats import gmean
from scipy.ndimage import gaussian_filter
def locate_noise( input ):
n = input.shape[0]
data = input.copy()
count = 0
for i in range( 1,n-1 ):
for j in range( 1,n-1 ):
if data[i,j] != 0:
if data[i+1,j] != 0 and np.sign(data[i+1,j])==np.sign(data[i,j]):
count += 1
if data[i-1,j] != 0 and np.sign(data[i-1,j])==np.sign(data[i,j]):
count += 1
if data[i,j-1] != 0 and np.sign(data[i,j-1])==np.sign(data[i,j]):
count += 1
if data[i,j+1] != 0 and np.sign(data[i,j+1])==np.sign(data[i,j]):
count += 1
if count < 2:
data[i,j] = 0
count = 0
return data
# Computing the area of the receptive fields according to Dicarlo's
# protocol described in article "Structure of Receptive Fields in area 3b...
def clear_data( RFs, n ):
p = 25
Z, T = [], []
Noise = np.load( 'noise.npy' ).reshape(n*n,p,p)
cRFs = np.zeros((n*n,p,p))
for i in range( n ):
for j in range( n ):
RF = RFs[i,j,...]
# WARNING : Centering the RF
s0,s1 = np.unravel_index(np.argmax(RF),RF.shape)
RF = np.roll(RF,13-s0,axis=0)
RF = np.roll(RF,13-s1,axis=1)
# WARNING : Centering the RF
# RF += Noise[i*n+j]
# RF = gaussian_filter( RF, sigma=2.2 )
RF += 1.5*Noise[i*n+j]
RF = gaussian_filter( RF, sigma=1.5 )
abs_max = np.max( np.abs( RF ) )
RF[np.where( ( ( RF < +0.10*abs_max ) & (RF>0) ) | ( ( RF > -0.10*abs_max ) & (RF < 0) ) ) ]=0
RF = locate_noise( RF )
cRFs[i*n+j,...] = RF
exc = 50.0 * ( RF > 0).sum()/( p * p )
inh = 50.0 * ( RF < 0).sum()/( p * p )
Z.append([exc,inh])
Z = np.array(Z)
np.nan_to_num(Z)
print '------ Excitatory ------- Inhibitory -------'
print 'Minimum :', Z[:,0].min(), Z[:,1].min()
print 'Maximum :', Z[:,0].max(), Z[:,1].max()
print 'Mean :', np.mean( Z[:,0] ), np.mean( Z[:,1] )
print 'Mean :', np.mean( np.log10(Z[:,0]) ), np.mean( np.log10(Z[:,1]) )
print 'SD : ', np.std( np.log10(Z[:,0]) ), np.std( np.log10(Z[:,1]) )
print 'GMean :', gmean( Z[:,0] ), gmean( Z[:,1] )
print "Pearson cor: ", pearsonr( Z[:,0], np.abs(Z[:,1]) )
return Z, cRFs
# Computing the SNR of the receptive fields.
def snr( signal, sigma ):
k = signal.shape[0]
# Filtering the input signal
filtered_s = gaussian_filter( signal, sigma )
# Computing background noise
noise = signal - filtered_s
# Computing noise variance
noise_var = np.var( noise )
# Computing signal and noise power
signalPow = np.sum( signal**2 )/k
noisePow = np.sum( noise**2 )/k
# Computing snr and noise index
snr = 10.0 * np.log10( signalPow/noisePow )
noise_index = noise_var/np.abs(signal).max() *100.0
return snr, noise_index, filtered_s
# Main :p
if __name__=='__main__':
np.random.seed(137)
RFs = np.load('real-rfs-ref.npy').reshape(32,32,25,25)
n, size, bins = RFs.shape[0], RFs.shape[2], 70
Z, cRFs = clear_data( RFs, n )
np.save('areas-ref', Z)
np.save('cleared-rfs', cRFs)
| gpl-3.0 |
rizac/gfz-reportgen | gfzreport/sphinxbuild/map/__init__.py | 2 | 43603 | '''
This module implements the function `plotmap` which plots scattered points on a map
retrieved using ArgGIS Server REST API. The function is highly customizable and is basically a
wrapper around the `Basemap` library (for the map background)
plus matplotlib utilities (for plotting points, shapes, labels and legend)
Created on Mar 10, 2016
@author: riccardo
'''
import numpy as np
import re
from itertools import izip, chain
from urllib2 import URLError, HTTPError
import socket
import matplotlib.pyplot as plt
import matplotlib.patheffects as PathEffects
from mpl_toolkits.basemap import Basemap
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
def parse_margins(obj, parsefunc=lambda margins: [float(val) for val in margins]):
"""Parses obj returning a 4 element numpy array denoting the top, right, bottom and left
values. This function first converts obj to a 4 element list L, and then
calls `parsefunc`, which by default converts all L values into float
:param obj: either None, a number, a list of numbers (allowed lengths: 1 to 4),
a comma/semicolon/spaces separated string (e.g. "4deg 0.0", "1, 1.2", "2km,4deg", "1 ; 2")
:param parsefunc: a function to be applied to obj converted to list. By default, returns
float(v) for any v in L
:return: a 4 element numpy array of floats denoting the top, right, bottom, left values of
the margins. The idea is the same as css margins, as depicted in the table below.
:Examples:
Called f `parsefunc`, then:
============= =========================
obj is returns
============= =========================
None [0, 0, 0, 0]
------------- -------------------------
string the list obtained after
splitting string via
regexp where comma,
semicolon and spaces
are valid separators
------------- -------------------------
x or [x] parsefunc([x, x, x, x])
------------- -------------------------
[x, y] parsefunc([x, y ,x, y])
------------- -------------------------
[x, y, z] parsefunc([x, y, z, y])
------------- -------------------------
[x, y, z, t] parsefunc([x, y, z, t])
============= =========================
"""
if obj is None:
margins = [0] * 4
elif hasattr(obj, "__iter__") and not isinstance(obj, str):
# is an iterable not string. Note the if above is py2 py3 compatible
margins = list(obj)
else:
try:
margins = [float(obj)] * 4
except (TypeError, ValueError):
margins = re.compile("(?:\\s*,\\s*|\\s*;\\s*|\\s+)").split(obj)
if len(margins) == 1:
margins *= 4
elif len(margins) == 2:
margins *= 2
elif len(margins) == 3:
margins.append(margins[1])
elif len(margins) != 4:
raise ValueError("unable to parse margins on invalid value '%s'" % obj)
return np.asarray(parsefunc(margins) if hasattr(parsefunc, "__call__") else margins)
# return margins
def parse_distance(dist, lat_0=None):
"""Returns the distance in degrees. If dist is in km or m, and lat_0 is not None,
returns w2lon, else h2lat. dist None defaults to 0
:param dist: float, int None, string. If string and has a unit, see above
"""
try:
return 0 if dist is None else float(dist)
except ValueError:
if dist[-3:].lower() == 'deg':
return float(dist[:-3])
elif dist[-2:] == 'km':
dst = 1000 * float(dist[:-2])
elif dist[-1:] == 'm':
dst = float(dist[:1])
else:
raise
return w2lon(dst, lat_0) if lat_0 is not None else h2lat(dst)
def get_lon0_lat0(min_lons, min_lats, max_lons, max_lats):
""" Calculates lat_0, lon_0, i.e., the mid point of the bounding box denoted by the
arguments
:param min_lons: the minimum of longitudes
:param min_lats: the maximum of latitudes
:param max_lons: the minimum of longitudes
:param max_lats: the maximum of latitudes
:return: the 2-element tuple denoting the mid point lon_0, lat_0
"""
lat_0 = max_lats / 2. + min_lats / 2.
lon_0 = max_lons / 2. + min_lons / 2.
if lon_0 > 180: # FIXME: necessary?? see self.get_normalized... above
lon_0 -= 360
return lon_0, lat_0
def getbounds(min_lon, min_lat, max_lon, max_lat, margins):
"""Calculates the bounds given the bounding box identified by the arguments and
given optional margins
:param min_lon: the minimum longitude (numeric, scalar)
:param min_lat: the maximum latitude (numeric, scalar)
:param max_lon: the minimum longitude (numeric, scalar)
:param max_lat: the maximum latitude (numeric, scalar)
:param margins: the margins as a css-like string (with units 'deg', 'km' or 'm'), or as
a 1 to 4 element array of numeric values (in that case denoting degrees).
As in css, a 4 element array denotes the [top, right, bottom, left] values.
None defaults to [0, 0, 0, 0].
:return: the 6-element tuple denoting lon_0, lat_0, min_lon, min_lat, max_lon, max_lat.
where min_lon, min_lat, max_lon, max_lat are the new bounds and lon_0 and lat_0 are
their midpoints (x and y, respectively)
"""
def parsefunc(mrgns):
"""parses mrgns as array of strings into array of floats
"""
return parse_distance(mrgns[0]), parse_distance(mrgns[1], max_lat), \
parse_distance(mrgns[2]), parse_distance(mrgns[3], min_lat)
top, right, btm, left = parse_margins(margins, parsefunc)
min_lon, min_lat, max_lon, max_lat = min_lon-left, min_lat-btm, max_lon+right, max_lat+top
if min_lon == max_lon:
min_lon -= 10 # in degrees
max_lon += 10 # in degrees
if min_lat == max_lat:
min_lat -= 10 # in degrees
max_lat += 10 # in degrees
# minima must be within bounds:
min_lat = max(-90, min_lat)
max_lat = min(90, max_lat)
min_lon = max(-180, min_lon)
max_lon = min(180, max_lon)
lon_0, lat_0 = get_lon0_lat0(min_lon, min_lat, max_lon, max_lat)
return lon_0, lat_0, min_lon, min_lat, max_lon, max_lat
# static constant converter (degree to meters and viceversa) for latitudes
DEG2M_LAT = 2 * np.pi * 6371 * 1000 / 360
def lat2h(distance_in_degrees):
"""converts latitude distance from degrees to height in meters
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # 2 * np.pi * 6371 * 1000 / 360
return distance_in_degrees * deg2m_lat
def h2lat(distance_in_meters):
"""converts latitude distance from height in meters to degrees
:param distance_in_degrees: a distance (python scalar or numpy array) along the great circle
espressed in degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
return distance_in_meters / deg2m_lat
def lon2w(distance_in_degrees, lat_0):
"""converts longitude distance from degrees to width in meters
:param distance_in_degrees: a distance (python scalar or numpy array)
along the lat_0 circle expressed in degrees
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude of the circle along
which the lon2w(distance_in_degrees) must be converted to meters"""
deg2m_lat = DEG2M_LAT
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_degrees * deg2m_lon
def w2lon(distance_in_meters, lat_0):
"""converts longitude distance from width in meters to degrees
:param distance_in_meters: a distance (python scalar or numpy array)
along the lat_0 circle expressed in meters
:param lat_0: if missing or None, defaults to the internal lat_0, which is set as the mean
of all points passed to this object. Otherwise, expresses the latitude (in degrees) of the
circle along which w2lon(distance_in_meters) must be converted to degrees"""
deg2m_lat = DEG2M_LAT # deg2m_lat = 2 * np.pi * 6371 * 1000 / 360
deg2m_lon = deg2m_lat * np.cos(lat_0 / 180 * np.pi)
return distance_in_meters / deg2m_lon
class MapHandler(object):
"""
Class handling bounds of a map given points (lons and lats)
"""
def __init__(self, lons, lats, map_margins):
"""Initializes a new MapHandler. If figure here is None, you **MUST**
call self.set_fig(fig) to calculate bounds and other stuff
when you have a ready figure"""
self.lons = lons if len(lons) else [0] # FIXME: use numpy arrays!!
self.lats = lats if len(lats) else [0]
self.max_lons, self.min_lons = max(self.lons), min(self.lons)
self.max_lats, self.min_lats = max(self.lats), min(self.lats)
self.lon_0, self.lat_0, self.llcrnrlon, self.llcrnrlat, self.urcrnrlon, self.urcrnrlat = \
getbounds(self.min_lons, self.min_lats, self.max_lons, self.max_lats, map_margins)
def _get_map_dims(self): # , fig_size_in_inches, colorbar=False):
"""Returns the map dimension width, height, in meters"""
max_lons, min_lons = self.urcrnrlon, self.llcrnrlon
max_lats, min_lats = self.urcrnrlat, self.llcrnrlat
height = lat2h(max_lats - min_lats)
width = lon2w(max_lons - min_lons, self.lat_0)
return width, height
def get_parallels(self, max_labels_count=8):
width, height = self._get_map_dims()
lat_0 = self.lat_0
N1 = int(np.ceil(height / max(width, height) * max_labels_count))
parallels = MapHandler._linspace(lat_0 - h2lat(height / 2),
lat_0 + h2lat(height / 2), N1)
return parallels
def get_meridians(self, max_labels_count=8):
width, height = self._get_map_dims()
lon_0 = self.lon_0
lat_0 = self.lat_0
N2 = int(np.ceil(width / max(width, height) * max_labels_count))
meridians = MapHandler._linspace(lon_0 - w2lon(width / 2, lat_0),
lon_0 + w2lon(width / 2, lat_0), N2)
meridians[meridians > 180] -= 360
return meridians
@staticmethod
def _linspace(val1, val2, N):
"""
returns around N 'nice' values between val1 and val2. Copied from obspy.plot_map
"""
dval = val2 - val1
round_pos = int(round(-np.log10(1. * dval / N)))
# Fake negative rounding as not supported by future as of now.
if round_pos < 0:
factor = 10 ** (abs(round_pos))
delta = round(2. * dval / N / factor) * factor / 2
else:
delta = round(2. * dval / N, round_pos) / 2
new_val1 = np.ceil(val1 / delta) * delta
new_val2 = np.floor(val2 / delta) * delta
N = (new_val2 - new_val1) / delta + 1
return np.linspace(new_val1, new_val2, N)
def _normalize(obj, size=None, dtype=None):
""""Casts" obj to a numpy array of the given optional size and optional dtype, and returns it.
If size is not None, the array must have length size. If not, and has length 1, it will be
resized to the specified size. Otherwise a ValueError is raised
If size is None, no resize will be in place and the array is returend as it is
Note: obj=None will be converted to the array [None], apparently in the current version of numpy
this wouldn't be the default (see argument ndmin=1)
:return an numpy array resulting to the coinversion of obj into array
:Examples:
"""
x = np.array(obj, ndmin=1) if dtype is None else np.array(obj, ndmin=1, dtype=dtype)
if size is None:
return np.array([]) if obj is None else x # if obj is None x is [None], return [] instead
try:
if len(x) == 1:
x = np.resize(x, size)
elif len(x) != size:
raise ValueError("invalid array length: %d. Expected %d" % (len(x), size))
except (ValueError, TypeError) as _err:
raise ValueError(str(_err))
return x
def torgba(html_str):
"""Converts html_str into a tuple of rgba colors all in [0, 1]
Curiously, matplotlib color functions do not provide this functionality for
'#RGBA' color formats
:param html_str: a valid html string in hexadecimal format.
Can have length 4, 7 or 9 such as #F1a, #fa98e3, #fc456a09
:return: a rgba vector, i.e. a 4-element numpy array of values in [0,1] denoting `html_str`
:raise: ValueError if html_str is invalid
"""
if len(html_str) not in (4, 7, 9) or not html_str[0] == '#':
raise ValueError("'%s' invalid html string" % html_str)
elif len(html_str) == 4:
rgb = [html_str[i:i+1]*2 for i in xrange(1, len(html_str))]
else:
rgb = [html_str[i:i+2] for i in xrange(1, len(html_str), 2)]
if len(rgb) == 3:
rgb += ['FF']
return np.true_divide(np.array([int(r, 16) for r in rgb]), 255)
def _shapeargs(lons, lats, labels, sizes, colors, markers, legend_labels):
lons = _normalize(lons, dtype=float) # basically: convert to float array if scalar (size=0)
lats = _normalize(lats, dtype=float) # basically: convert to float array if scalar (size=0)
if len(lons) != len(lats):
raise ValueError('mismatch in lengths: lons (%d) and lats (%d)' % (len(lons), len(lats)))
leng = len(lons)
labels = _normalize(labels, size=leng)
colors = _normalize(colors, size=leng)
markers = _normalize(markers, size=leng)
legend_labels = _normalize(legend_labels, size=leng)
# colors[np.isnan(colors) | (colors <= 0)] = 1.0 # nan colors default to 1 (black?)
sizes = _normalize(sizes, size=leng, dtype=float)
valid_points = np.logical_not(np.isnan(lons) | np.isnan(lats) | (sizes <= 0))
# return all points whose corresponding numeric values are not nan:
return (lons[valid_points],
lats[valid_points],
labels[valid_points],
sizes[valid_points],
colors[valid_points],
markers[valid_points],
legend_labels[valid_points])
# def get_ax_size(ax, fig):
# bbox = ax.get_window_extent().transformed(fig.dpi_scale_trans.inverted())
# return bbox.width, bbox.height
def pix2inch(pix, fig):
"""Converts pixel to inches on a given matplotlib figure"""
return pix / fig.dpi
def inch2pix(inch, fig):
"""Converts inches to pixel on a given matplotlib figure"""
return inch * fig.dpi
def _joinargs(key_prefix, kwargs, **already_supplied_args):
'''updates already_supplied_args with kwargs using a given prefix in kwargs to identify
common keys. Used in plotmap for kwargs'''
key_prefix += "_"
len_prefix = len(key_prefix)
already_supplied_args.update({k[len_prefix:]: v
for k, v in kwargs.iteritems() if k.startswith(key_prefix)})
return already_supplied_args
def _mp_set_custom_props(drawfunc_retval, lines_props, labels_props):
"""Sets custom properties on drawparallels or drawmeridians return function.
drawfunc_retval is a dict of numbers mapped to tuples where the first element is a list of
matplotlib lines, and the second element is a list of matplotlib texts"""
_setprop(chain.from_iterable((lin for lin, lab in drawfunc_retval.itervalues())), lines_props)
_setprop(chain.from_iterable((lab for lin, lab in drawfunc_retval.itervalues())), labels_props)
def _setprop(iterator_of_mp_objects, props):
'''sets the given properties of an iterator of same type matplotlib objects'''
if not props:
return
prp = {}
for obj in iterator_of_mp_objects:
if not prp:
prp = {"set_%s" % name: val for name, val in props.iteritems()
if hasattr(obj, "set_%s" % name)}
for name, val in prp.iteritems():
getattr(obj, name)(val)
# values below CAN be None but CANNOT be arrays containing None's
def plotmap(lons,
lats,
labels=None,
legendlabels=None,
markers="o",
colors="#FF4400",
sizes=20,
cmap=None,
fontsize=None,
fontweight='regular',
fontcolor='k',
labels_h_offset=0,
labels_v_offset=0,
mapmargins='0.5deg',
figmargins=2,
arcgis_service='World_Street_Map',
arcgis_xpixels=1500,
arcgis_dpi=96,
urlfail='ignore',
maxmeridians=5,
maxparallels=5,
legend_pos='bottom',
legend_borderaxespad=1.5,
legend_ncol=1,
title=None,
show=False,
**kwargs): # @UnusedVariable
"""
Makes a scatter plot of points on a map background using ArcGIS REST API.
:param lons: (array-like of length N or scalar) Longitudes of the data points, in degreee
:param lats: (array-like of length N or scalar) Latitudes of the data points, in degree
:param labels: (array-like of length N or string. Default: None, no labels) Annotations
(labels) for the individual data points on the map. If non-array (e.g. string), the same value
will be applied to all points
:param legendlabels: (array-like of length N or string. Default: None, no legend)
Annotations (labels) for the legend. You can supply a sparse array where only some points
will be displayed on the legend. All points with no legend label will not show up in the
legend
:param sizes: (array-like of length N or number. Default: 20) Sizes (in points^2) of the
individual points in the scatter plot.
:param markers: (array-like of length N,
`MarkerStyle<http://matplotlib.org/api/markers_api.html#matplotlib.markers.MarkerStyle>`_ or
string. Default: 'o' - circle) The markers (shapes) to be drawn for each point on the map.
See `markers <http://matplotlib.org/api/markers_api.html#module-matplotlib.markers>`_ for
more information on the different styles of markers scatter supports. Marker can be either
an instance of the class or the text shorthand for a particular marker.
:param colors: (array-like of length N,
`matplotlib color <http://matplotlib.org/api/colors_api.html>`_, e.g. string.
Default: "#FF4400")
Colors for the markers (fill color). You can type color transparency by supplying string of 9
elements where the last two characters denote the transparency ('00' fully transparent,
'ff' fully opaque). Note that this is a feature not implemented in `matplotlib` colors, where
transparency is given as the last element of the numeric tuple (r, g, b, a)
:param fontsize: (numeric or None. Default: None) The fontsize for all texts drawn on the
map (labels, axis tick labels, legend). None uses the default figure font size for all. Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontweight: (string or number. Default: 'regular') The font weight for all texts drawn
on the map (labels, axis tick labels, legend). Accepts the values (see
http://matplotlib.org/api/text_api.html#matplotlib.text.Text.set_weight):
```
[a numeric value in range 0-1000 | 'ultralight' | 'light' |
'normal' | 'regular' | 'book' | 'medium' | 'roman' | 'semibold' | 'demibold' | 'demi' |
'bold' | 'heavy' | 'extra bold' | 'black' ]
```
Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param fontcolor: (`matplotlib color <http://matplotlib.org/api/colors_api.html>`_ or
string. Default: 'k', black) The font color for all texts drawn on the
map (labels, axis tick labels, legend). Custom
values for the individual text types (e.g. legend texts vs labels texts) can be supplied
via the `kwargs` argument and a given prefix (see below)
:param labels_h_offset: (string, number. Defaults None=0) The horizontal offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels westward, positive values eastward. Useful for not overlapping
markers and labels.
If numeric, it is assumed to be the expressed in degrees. Otherwise, you can supply a string
with a number followed by one of the units 'm', 'km' or 'deg' (e.g., '5km', '0.5deg').
Note that this value affects the
`horizontalalignment` and `multialignment` properties of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_horizontalalignment` or `labels_ha` as optional argument will override
this behaviour (see `kwargs` below)
:param labels_v_offset: (string, number. Defaults None=0) The vertical offset to be applied
to each label on the map relative to its point coordinates. Negative values will shift the
labels southhward, positive values northward. See notes on `labels_h_offset` for details
Note that this value affects the
`verticalalignment` property of the labels
(for info see http://matplotlib.org/api/text_api.html#matplotlib.text.Text). Supplying
`labels_verticalalignment` or `labels_va` as optional argument will override
this behaviour (see `kwargs` below)
:param mapmargins: (array-like of 1,2,3,4 elements, numeric or string, or None=0.
Default: '0.5deg').
The map margins, i.e. how much the map has to 'expand/shrink' in any direction, relative
to the bounding box calculated to include all points.
If array-like, it behaves like the css 'margin' property of html: 4 elements will denote
[top, right, bottom, left], two elements will denote [top/bottom, left/right], three
elements [top, right/left, bottom], a single element array (or a single number or a string)
applies the value to all directions.
Finally, elements of the array must be expressed as the arguments `labels_h_offset` or
`labels_v_offset`: numbers denoting degrees or strings with units 'm', 'km', 'deg'. Negative
values will shrink the map.
If string, the argument will be first splitted using commas, semicolon or spaces as delimiters
(if no delimiter is found, the string is taken as a single chunk) and converted to an array-like
object.
:param figmargins: (array-like of 1,2,3,4 elements, number or None=0. Default:2) The
figure margins *in font height units* (e.g., 2 means: twice the font height). This argument
behaves exactly as `mapmargins` but expands/shrinks the distances between map and figure
(image) bounds. Useful to include axis tick labels or legend, if they overflow.
Note also that strings
are allowed only if they are parsable to float (e.g. "5,6; -12 1")
:param arcgis_service: (string, default: 'World_Street_Map'). The map image type, or
more technically the service for the map
hosted on ArcGIS server. Other values are 'ESRI_Imagery_World_2D'
(default in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_),
'World_Topo_Map', 'World_Terrain_Base'. For details, see:
http://server.arcgisonline.com/arcgis/rest/services.
:param arcgis_xpixels: (numeric, default: 3000). Requested number of image pixels
in x-direction (default is 400 in
`Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>`_).
The documentation is quite unclear but this parameter seems to set the zoom of the image. From
this `link <http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage>`_:
A bigger number will ask a bigger image, so the image will have more detail.
So when the zoom is bigger, `xsize` must be bigger to maintain the resolution
:param urlfail: (string, 'raise' or 'ignore'. Default: 'ignore'). Tells what to do if the
ArcGIS requet fails (URLError, no internet connection etcetera). By default, on failure a raw
map with continents contour, and oceans will be plotted (good for
debug). Otherwise, the exception resulting from the web request is raised
:param maxmeridians: (numeric default: 5). The number of maximum meridians to be drawn. Set to
<=0 to hide meridians. Note that also x-axis labels are drawn.
To further manipulate meridians display, use any argument starting with
'mlabels_', 'mlines_' or 'meridians' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `meridians_linewidth=0` or 'mlines_linewidth=0'.
:param maxparallels: (numeric default: 5). The number of maximum parallels to be drawn. Set to
<=0 to hide parallels. Note that also y-axis labels are drawn.
To further manipulate parallels display, use any argument starting with
'plabels_', 'plines_' or 'parallels' (see `kwargs` below). E.g., to show only the labels and not
the lines, supply as argument `parallels_linewidth=0` or 'plines_linewidth=0'.
:param legend_pos: (string in ['top'. 'bottom', 'right', 'left'], default='bottom'). The legend
location with respect to the map. It also adjusts the bounding box that the legend will be
anchored to.
For
customizing entirely the legend placement overriding this parameter, provide `legend_loc`
(and optionally `legend_bbox_to_anchor`) in `kwargs` (see below)
:param legend_borderaxespad: (numeric, default 1.5) The pad between the axes and legend border,
in font units
:param legend_ncol: (integer, default=1) The legend number of columns
:param title (string or None. Default: None): Title above plot (Note: not tested)
:param show (boolean, default: False): Whether to show the figure after plotting or not
(Note: not tested). Can be used to do further customization of the plot before showing it.
:param fig: (matplotlib figure or None, default: None). Note: deprecated, pass None as
supplying an already existing figure with other axes might break the figure layout
:param kwargs: any kind of additional argument passed to `matplotlib` and `Basemap` functions
or objects.
The name of the argument must be of the form
```
prefix_propertyname=propertyvalue
```
where prefix indicates the function/object to be called with keyword argument:
```
propertyname=propertyvalue
```
Current supported prefixes are (for available property names see links):
Prefix Passes `propertyname` to
============ ==================================================================================
arcgis `Basemap.arcgisimage <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap.arcgisimage>_
used to retrieve the background map using ArgGIS Server REST API. See also
http://basemaptutorial.readthedocs.io/en/latest/backgrounds.html#arcgisimage
basemap `Basemap <http://matplotlib.org/basemap/api/basemap_api.html#mpl_toolkits.basemap.Basemap>`_
the object responsible of drawing and managing the map. Note that
`basemap_resolution=h` and `basemap_epsg=4326` by default.
labels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the point labels on the map
legend The `legend <http://matplotlib.org/api/legend_api.html#matplotlib.legend.Legend>`_.
See the already implemented arguments `legend_borderaxespad`,
`legend_ncol`
legendlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the text labels of the legend
meridians `Basemap.drawmeridians`. For more detailed settings on meridians, see
`mlines` and `mlabels`
parallels `Basemap.drawparallels`. For more detailed settings on parallels, see
`plines` and `plabels`
plines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the parallels
plabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the parallels labels on the y axis
mlines All `lines <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_
used to display the meridians
mlabels All `texts <http://matplotlib.org/api/text_api.html#matplotlib.text.Text>`_
used to display the meridians labels on the x axis
============ ==================================================================================
Examples
--------
- `legend_title='abc'` will call `legend(..., title='abc', ...)`
- `labels_path_effects=[PathEffects.withStroke(linewidth=2, foreground='white')]` will set the
a white contour around each label text
- `meridians_labelstyle="+/-"` will call `Basemap.drawmeridians(..., labelstyle="+/-", ...)`
Notes:
------
The objects referenced by `plines`, `plabels`, `mlines`, `mlabels` and `legendlabels`
cannot be initialized directly with the given properties, which will be set after they are
created assuming that for any property `foo` passed as keyword argument in their constructor
there exist a method `set_foo(...)` (which will be called with the given propertyvalue).
This is most likely always true according to matplotlib api, but we cannot assure it works
100% of the times
"""
lons, lats, labels, sizes, colors, markers, legendlabels =\
_shapeargs(lons, lats, labels, sizes, colors, markers, legendlabels)
# convert html strings to tuples of rgba values in [0.1] if the former are in string format,
# because (maybe too old matplotlib version?) colors in the format '#RGBA' are not supported
# Also, if cmap is provided, basemap.scatter calls matplotlib.scatter which
# wants float sequenes in case of color map
if colors.dtype.char in ('U', 'S'): # pylint: disable=no-member
colors = np.array([torgba(c) for c in colors])
fig = plt.figure()
map_ax = fig.add_axes([0, 0, 1, 1]) # set axes size the same as figure
# setup handler for managing basemap coordinates and meridians / parallels calculation:
handler = MapHandler(lons, lats, mapmargins)
kwa = _joinargs('basemap', kwargs,
llcrnrlon=handler.llcrnrlon,
llcrnrlat=handler.llcrnrlat,
urcrnrlon=handler.urcrnrlon,
urcrnrlat=handler.urcrnrlat,
epsg='4326', # 4326, # 3395, # 3857,
resolution='i', # 'h',
ax=map_ax)
bmap = Basemap(**kwa)
try:
kwa = _joinargs("arcgis", kwargs, service=arcgis_service, xpixels=arcgis_xpixels,
dpi=arcgis_dpi)
# set the map image via a map service. In case you need the returned values, note that
# This function returns an ImageAxis (or AxisImage, check matplotlib doc)
bmap.arcgisimage(**kwa)
except (URLError, HTTPError, socket.error) as exc:
# failed, maybe there is not internet connection
if urlfail == 'ignore':
# Print a simple map offline
bmap.drawcoastlines()
watercolor = '#4444bb'
bmap.fillcontinents(color='#eebb66', lake_color=watercolor)
bmap.drawmapboundary(fill_color=watercolor)
else:
raise
# draw meridians and parallels. From basemap.drawmeridians / drawparallels doc:
# returns a dictionary whose keys are the meridian values, and
# whose values are tuples containing lists of the
# matplotlib.lines.Line2D and matplotlib.text.Text instances
# associated with each meridian. Deleting an item from the
# dictionary removes the correpsonding meridian from the plot.
if maxparallels > 0:
kwa = _joinargs("parallels", kwargs, linewidth=1, fontsize=fontsize,
labels=[0, 1, 1, 0], fontweight=fontweight)
parallels = handler.get_parallels(maxparallels)
# Old basemap versions have problems with non-integer parallels.
try:
# Note: the method below # returns a list of text object
# represeting the tick labels
_dict = bmap.drawparallels(parallels, **kwa)
except KeyError:
parallels = sorted(list(set(map(int, parallels))))
_dict = bmap.drawparallels(parallels, **kwa)
# set custom properties:
kwa_lines = _joinargs("plines", kwargs)
kwa_labels = _joinargs("plabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
if maxmeridians > 0:
kwa = _joinargs("meridians", kwargs, linewidth=1, fontsize=fontsize,
labels=[1, 0, 0, 1], fontweight=fontweight)
meridians = handler.get_meridians(maxmeridians)
_dict = bmap.drawmeridians(meridians, **kwa)
# set custom properties:
kwa_lines = _joinargs("mlines", kwargs)
kwa_labels = _joinargs("mlabels", kwargs, color=fontcolor)
_mp_set_custom_props(_dict, kwa_lines, kwa_labels)
# fig.get_axes()[0].tick_params(direction='out', length=15) # does not work, check basemap
fig.bmap = bmap
# compute the native bmap projection coordinates for events.
# from the docs (this is kind of outdated, however leave here for the moment):
# Calling a Basemap class instance with the arguments lon, lat will
# convert lon/lat (in degrees) to x/y map projection
# coordinates (in meters). If optional keyword ``inverse`` is
# True (default is False), the inverse transformation from x/y
# to lon/lat is performed.
# For cylindrical equidistant projection (``cyl``), this
# does nothing (i.e. x,y == lon,lat).
# For non-cylindrical projections, the inverse transformation
# always returns longitudes between -180 and 180 degrees. For
# cylindrical projections (self.projection == ``cyl``,
# ``cea``, ``mill``, ``gall`` or ``merc``)
# the inverse transformation will return longitudes between
# self.llcrnrlon and self.llcrnrlat.
# Input arguments lon, lat can be either scalar floats,
# sequences, or numpy arrays.
# parse hoffset and voffset and assure they are at least arrays of 1 elements
# (for aligning text labels, see below)
hoffset = np.array(parse_distance(labels_h_offset, lats), copy=False, ndmin=1)
voffset = np.array(parse_distance(labels_v_offset), copy=False, ndmin=1)
lbl_lons = lons + hoffset
lbl_lats = lats + voffset
# convert labels coordinates:
xlbl, ylbl = bmap(lbl_lons, lbl_lats)
# plot point labels
max_points = -1 # negative means: plot all
if max_points < 0 or len(lons) < max_points:
# Set alignments which control also the corner point reference when placing labels
# from (FIXME: add ref?)
# horizontalalignment controls whether the x positional argument for the text indicates
# the left, center or right side of the text bounding box.
# verticalalignment controls whether the y positional argument for the text indicates
# the bottom, center or top side of the text bounding box.
# multialignment, for newline separated strings only, controls whether the different lines
# are left, center or right justified
ha = 'left' if hoffset[0] > 0 else 'right' if hoffset[0] < 0 else 'center'
va = 'bottom' if voffset[0] > 0 else 'top' if voffset[0] < 0 else 'center'
ma = ha
kwa = _joinargs("labels", kwargs, fontweight=fontweight, color=fontcolor,
zorder=100, fontsize=fontsize, horizontalalignment=ha,
verticalalignment=va, multialignment=ma)
for name, xpt, ypt in zip(labels, xlbl, ylbl):
# Check if the point can actually be seen with the current bmap
# projection. The bmap object will set the coordinates to very
# large values if it cannot project a point.
if xpt > 1e25:
continue
map_ax.text(xpt, ypt, name, **kwa)
# plot points
x, y = bmap(lons, lats)
# store handles to points, and relative labels, if any
leg_handles, leg_labels = [], []
# bmap.scatter accepts all array-like args except markers. Avoid several useless loops
# and do only those for distinct markers:
# unique markers (sorted according to their index in markers, not their value):
mrks = markers[np.sort(np.unique(markers, return_index=True)[1])]
for mrk in mrks:
# Note using masks with '==' (numpy==1.11.3):
#
# >>> a = np.array([1,2,3])
# >>> a == 3
# array([False, False, True], dtype=bool) # OK
# >>> a == None
# False # NOT AS EXPECTED!
# >>> np.equal(a, None)
# array([False, False, False], dtype=bool) # OK
#
# (Note also that a == None issues:
# FutureWarning: comparison to `None` will result in an elementwise object
# comparison in the future.)
#
# So the correct way is to write
# mask = np.equal(array, val) if val is None else (a == val)
m_mask = np.equal(markers, mrk) if mrk is None else markers == mrk # see above
__x = x[m_mask]
__y = y[m_mask]
__m = mrk
__s = sizes[m_mask]
__c = colors[m_mask]
__l = legendlabels[m_mask]
# unique legends (sorted according to their index in __l, not their value):
for leg in __l[np.sort(np.unique(__l, return_index=True)[1])]:
l_mask = np.equal(__l, leg) if leg is None else __l == leg # see above
_scatter = bmap.scatter(__x[l_mask],
__y[l_mask],
marker=mrk,
s=__s[l_mask],
c=__c[l_mask],
cmap=cmap,
zorder=10)
if leg:
leg_handles.append(_scatter)
leg_labels.append(leg)
if leg_handles:
# if we provided `legend_loc`, use that:
loc = kwargs.get('legend_loc', None)
bbox_to_anchor = None # defaults in matplotlib legend
# we do have legend to show. Adjust legend reference corner:
if loc is None:
if legend_pos == 'bottom':
loc = 'upper center'
bbox_to_anchor = (0.5, -0.05)
elif legend_pos == 'top':
loc = 'lower center'
bbox_to_anchor = (0.5, 1.05)
elif legend_pos == 'left':
loc = 'center right'
bbox_to_anchor = (-0.05, 0.5)
elif legend_pos == 'right':
loc = 'center left'
bbox_to_anchor = (1, 0.5)
else:
raise ValueError('invalid legend_pos value:"%s"' % legend_pos)
# The plt.legend has the prop argument which sets the font properties:
# family, style, variant, weight, stretch, size, fname. See
# http://matplotlib.org/api/font_manager_api.html#matplotlib.font_manager.FontProperties
# However, that property does not allow to set font color. So we
# use the get_text method of Legend. Note that we pass font size *now* even if
# setting it later works as well (the legend frame is resized accordingly)
kwa = _joinargs("legend", kwargs, scatterpoints=1, ncol=legend_ncol, loc=loc,
bbox_to_anchor=bbox_to_anchor, borderaxespad=legend_borderaxespad,
fontsize=fontsize)
# http://stackoverflow.com/questions/17411940/matplotlib-scatter-plot-legend
leg = map_ax.legend(leg_handles, leg_labels, **kwa)
# set properties supplied via 'legend_'
_setprop(leg.get_texts(), _joinargs("legendlabels", kwargs, color=fontcolor))
# re-position the axes. The REAL map aspect ratio seems to be this:
realratio_h_w = bmap.aspect
fig_w, fig_h = fig.get_size_inches()
figratio_h_w = np.true_divide(fig_h, fig_w)
if figratio_h_w >= realratio_h_w:
# we have margins (blank space) above and below
# thus, we assume:
map_w = fig_w
# and we calculate map_h
map_h = map_w * realratio_h_w
# assume there is the same amount of space above and below:
vpad = (fig_h - map_h) / 2.0
# hpad is zero:
hpad = 0
else:
# we have margins (blank space) left and right
# thus, we assume:
map_h = fig_h
# and consequently:
map_w = map_h / realratio_h_w
# assume there is the same amount of space above and below:
hpad = (fig_w - map_w) / 2.0
# wpad is zero:
vpad = 0
# calculate new fig dimensions EXACTLY as contour of the map
new_fig_w = fig_w - 2 * hpad
new_fig_h = fig_h - 2 * vpad
# now margins:
marginz = parse_margins(figmargins) # margins are in fontheight units. Get font height:
fontsize_inch = 0
if len(np.nonzero(marginz)[0]):
# Calculate the font size in pixels.
# We want to be consistent with matplotlib way of getting fontsize.
# inspecting matplotlib.legend.Legend.draw we end up with:
# 1. Get the renderer
rend = fig.canvas.get_renderer()
# 2. get the fontsize in points. We might use `fontsize` but it might be None and we want
# the default in case. There are several 'defaults' (rcParams['font.size'],
# rcParams["legend.fontsize"])... we don't care for now, use the first. How to get
# rcParams['font.size'] ? Either this: (see at matplotlib.Legend.__init__):
# fontsize_pt = FontProperties(size=fontsize, weight=fontweight).get_size_in_points()
# or simply do:
fontsize_pt = fontsize or rcParams['font.size']
# Now use renderer to convert to pixels:
# For info see matplotlib.text.Text.get_window_extent
fontsize_px = rend.points_to_pixels(fontsize_pt)
# finally inches:
fontsize_inch = pix2inch(rend.points_to_pixels(fontsize_px), fig)
# calculate insets in inches (top right bottom left)
insets_inch = marginz * fontsize_inch
# set to fig dimensions
new_fig_w += insets_inch[1] + insets_inch[3]
new_fig_h += insets_inch[0] + insets_inch[2]
fig.set_size_inches(new_fig_w, new_fig_h, forward=True)
# (forward necessary if fig is in GUI, let's set for safety)
# now the axes which are relative to the figure. Thus first normalize inches:
insets_inch /= [fig_h, fig_w, fig_h, fig_w]
# pos1 = map_ax.get_position() # get the original position
# NOTE: it seems that pos[0], pos[1] indicate the x and y of the LOWER LEFT corner, not
# upper left!
pos2 = [insets_inch[3], insets_inch[2],
1 - (insets_inch[1] + insets_inch[3]),
1 - (insets_inch[0] + insets_inch[2])]
map_ax.set_position(pos2)
if title:
plt.suptitle(title)
if show:
plt.show()
return fig
| gpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/matplotlib/dates.py | 6 | 52305 | """
Matplotlib provides sophisticated date plotting capabilities, standing on the
shoulders of python :mod:`datetime`, the add-on modules :mod:`pytz` and
:mod:`dateutil`. :class:`datetime` objects are converted to floating point
numbers which represent time in days since 0001-01-01 UTC, plus 1. For
example, 0001-01-01, 06:00 is 1.25, not 0.25. The helper functions
:func:`date2num`, :func:`num2date` and :func:`drange` are used to facilitate
easy conversion to and from :mod:`datetime` and numeric ranges.
.. note::
Like Python's datetime, mpl uses the Gregorian calendar for all
conversions between dates and floating point numbers. This practice
is not universal, and calendar differences can cause confusing
differences between what Python and mpl give as the number of days
since 0001-01-01 and what other software and databases yield. For
example, the US Naval Observatory uses a calendar that switches
from Julian to Gregorian in October, 1582. Hence, using their
calculator, the number of days between 0001-01-01 and 2006-04-01 is
732403, whereas using the Gregorian calendar via the datetime
module we find::
In [31]:date(2006,4,1).toordinal() - date(1,1,1).toordinal()
Out[31]:732401
A wide range of specific and general purpose date tick locators and
formatters are provided in this module. See
:mod:`matplotlib.ticker` for general information on tick locators
and formatters. These are described below.
All the matplotlib date converters, tickers and formatters are
timezone aware, and the default timezone is given by the timezone
parameter in your :file:`matplotlibrc` file. If you leave out a
:class:`tz` timezone instance, the default from your rc file will be
assumed. If you want to use a custom time zone, pass a
:class:`pytz.timezone` instance with the tz keyword argument to
:func:`num2date`, :func:`plot_date`, and any custom date tickers or
locators you create. See `pytz <http://pythonhosted.org/pytz/>`_ for
information on :mod:`pytz` and timezone handling.
The `dateutil module <https://dateutil.readthedocs.io/en/stable/>`_ provides
additional code to handle date ticking, making it easy to place ticks
on any kinds of dates. See examples below.
Date tickers
------------
Most of the date tickers can locate single or multiple values. For
example::
# import constants for the days of the week
from matplotlib.dates import MO, TU, WE, TH, FR, SA, SU
# tick on mondays every week
loc = WeekdayLocator(byweekday=MO, tz=tz)
# tick on mondays and saturdays
loc = WeekdayLocator(byweekday=(MO, SA))
In addition, most of the constructors take an interval argument::
# tick on mondays every second week
loc = WeekdayLocator(byweekday=MO, interval=2)
The rrule locator allows completely general date ticking::
# tick every 5th easter
rule = rrulewrapper(YEARLY, byeaster=1, interval=5)
loc = RRuleLocator(rule)
Here are all the date tickers:
* :class:`MinuteLocator`: locate minutes
* :class:`HourLocator`: locate hours
* :class:`DayLocator`: locate specifed days of the month
* :class:`WeekdayLocator`: Locate days of the week, e.g., MO, TU
* :class:`MonthLocator`: locate months, e.g., 7 for july
* :class:`YearLocator`: locate years that are multiples of base
* :class:`RRuleLocator`: locate using a
:class:`matplotlib.dates.rrulewrapper`. The
:class:`rrulewrapper` is a simple wrapper around a
:class:`dateutil.rrule` (`dateutil
<https://dateutil.readthedocs.io/en/stable/>`_) which allow almost
arbitrary date tick specifications. See `rrule example
<../examples/pylab_examples/date_demo_rrule.html>`_.
* :class:`AutoDateLocator`: On autoscale, this class picks the best
:class:`MultipleDateLocator` to set the view limits and the tick
locations.
Date formatters
---------------
Here all all the date formatters:
* :class:`AutoDateFormatter`: attempts to figure out the best format
to use. This is most useful when used with the :class:`AutoDateLocator`.
* :class:`DateFormatter`: use :func:`strftime` format strings
* :class:`IndexDateFormatter`: date plots with implicit *x*
indexing.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import zip
from matplotlib import rcParams
import re
import time
import math
import datetime
import warnings
from dateutil.rrule import (rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY,
MONTHLY, WEEKLY, DAILY, HOURLY, MINUTELY,
SECONDLY)
from dateutil.relativedelta import relativedelta
import dateutil.parser
import numpy as np
import matplotlib
import matplotlib.units as units
import matplotlib.cbook as cbook
import matplotlib.ticker as ticker
__all__ = ('date2num', 'num2date', 'drange', 'epoch2num',
'num2epoch', 'mx2num', 'DateFormatter',
'IndexDateFormatter', 'AutoDateFormatter', 'DateLocator',
'RRuleLocator', 'AutoDateLocator', 'YearLocator',
'MonthLocator', 'WeekdayLocator',
'DayLocator', 'HourLocator', 'MinuteLocator',
'SecondLocator', 'MicrosecondLocator',
'rrule', 'MO', 'TU', 'WE', 'TH', 'FR', 'SA', 'SU',
'YEARLY', 'MONTHLY', 'WEEKLY', 'DAILY',
'HOURLY', 'MINUTELY', 'SECONDLY', 'MICROSECONDLY', 'relativedelta',
'seconds', 'minutes', 'hours', 'weeks')
# Make a simple UTC instance so we don't always have to import
# pytz. From the python datetime library docs:
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return str("UTC")
def dst(self, dt):
return datetime.timedelta(0)
UTC = _UTC()
def _get_rc_timezone():
"""
Retrieve the preferred timeszone from the rcParams dictionary.
"""
s = matplotlib.rcParams['timezone']
if s == 'UTC':
return UTC
import pytz
return pytz.timezone(s)
"""
Time-related constants.
"""
EPOCH_OFFSET = float(datetime.datetime(1970, 1, 1).toordinal())
JULIAN_OFFSET = 1721424.5 # Julian date at 0001-01-01
MICROSECONDLY = SECONDLY + 1
HOURS_PER_DAY = 24.
MIN_PER_HOUR = 60.
SEC_PER_MIN = 60.
MONTHS_PER_YEAR = 12.
DAYS_PER_WEEK = 7.
DAYS_PER_MONTH = 30.
DAYS_PER_YEAR = 365.0
MINUTES_PER_DAY = MIN_PER_HOUR * HOURS_PER_DAY
SEC_PER_HOUR = SEC_PER_MIN * MIN_PER_HOUR
SEC_PER_DAY = SEC_PER_HOUR * HOURS_PER_DAY
SEC_PER_WEEK = SEC_PER_DAY * DAYS_PER_WEEK
MUSECONDS_PER_DAY = 1e6 * SEC_PER_DAY
MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY = (
MO, TU, WE, TH, FR, SA, SU)
WEEKDAYS = (MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY)
def _to_ordinalf(dt):
"""
Convert :mod:`datetime` or :mod:`date` to the Gregorian date as UTC float
days, preserving hours, minutes, seconds and microseconds. Return value
is a :func:`float`.
"""
# Convert to UTC
tzi = getattr(dt, 'tzinfo', None)
if tzi is not None:
dt = dt.astimezone(UTC)
tzi = UTC
base = float(dt.toordinal())
# If it's sufficiently datetime-like, it will have a `date()` method
cdate = getattr(dt, 'date', lambda: None)()
if cdate is not None:
# Get a datetime object at midnight UTC
midnight_time = datetime.time(0, tzinfo=tzi)
rdt = datetime.datetime.combine(cdate, midnight_time)
# Append the seconds as a fraction of a day
base += (dt - rdt).total_seconds() / SEC_PER_DAY
return base
# a version of _to_ordinalf that can operate on numpy arrays
_to_ordinalf_np_vectorized = np.vectorize(_to_ordinalf)
def _from_ordinalf(x, tz=None):
"""
Convert Gregorian float of the date, preserving hours, minutes,
seconds and microseconds. Return value is a :class:`datetime`.
The input date `x` is a float in ordinal days at UTC, and the output will
be the specified :class:`datetime` object corresponding to that time in
timezone `tz`, or if `tz` is `None`, in the timezone specified in
`rcParams['timezone']`.
"""
if tz is None:
tz = _get_rc_timezone()
ix = int(x)
dt = datetime.datetime.fromordinal(ix).replace(tzinfo=UTC)
remainder = float(x) - ix
# Round down to the nearest microsecond.
dt += datetime.timedelta(microseconds=int(remainder * MUSECONDS_PER_DAY))
# Compensate for rounding errors
if dt.microsecond < 10:
dt = dt.replace(microsecond=0)
elif dt.microsecond > 999990:
dt += datetime.timedelta(microseconds=1e6 - dt.microsecond)
return dt.astimezone(tz)
# a version of _from_ordinalf that can operate on numpy arrays
_from_ordinalf_np_vectorized = np.vectorize(_from_ordinalf)
class strpdate2num(object):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing.
"""
def __init__(self, fmt):
""" fmt: any valid strptime format is supported """
self.fmt = fmt
def __call__(self, s):
"""s : string to be converted
return value: a date2num float
"""
return date2num(datetime.datetime(*time.strptime(s, self.fmt)[:6]))
class bytespdate2num(strpdate2num):
"""
Use this class to parse date strings to matplotlib datenums when
you know the date format string of the date you are parsing. See
:file:`examples/pylab_examples/load_converter.py`.
"""
def __init__(self, fmt, encoding='utf-8'):
"""
Args:
fmt: any valid strptime format is supported
encoding: encoding to use on byte input (default: 'utf-8')
"""
super(bytespdate2num, self).__init__(fmt)
self.encoding = encoding
def __call__(self, b):
"""
Args:
b: byte input to be converted
Returns:
A date2num float
"""
s = b.decode(self.encoding)
return super(bytespdate2num, self).__call__(s)
# a version of dateutil.parser.parse that can operate on nump0y arrays
_dateutil_parser_parse_np_vectorized = np.vectorize(dateutil.parser.parse)
def datestr2num(d, default=None):
"""
Convert a date string to a datenum using
:func:`dateutil.parser.parse`.
Parameters
----------
d : string or sequence of strings
The dates to convert.
default : datetime instance
The default date to use when fields are missing in `d`.
"""
if cbook.is_string_like(d):
dt = dateutil.parser.parse(d, default=default)
return date2num(dt)
else:
if default is not None:
d = [dateutil.parser.parse(s, default=default) for s in d]
d = np.asarray(d)
if not d.size:
return d
return date2num(_dateutil_parser_parse_np_vectorized(d))
def date2num(d):
"""
*d* is either a :class:`datetime` instance or a sequence of datetimes.
Return value is a floating point number (or sequence of floats)
which gives the number of days (fraction part represents hours,
minutes, seconds) since 0001-01-01 00:00:00 UTC, *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
"""
if not cbook.iterable(d):
return _to_ordinalf(d)
else:
d = np.asarray(d)
if not d.size:
return d
return _to_ordinalf_np_vectorized(d)
def julian2num(j):
"""
Convert a Julian date (or sequence) to a matplotlib date (or sequence).
"""
if cbook.iterable(j):
j = np.asarray(j)
return j - JULIAN_OFFSET
def num2julian(n):
"""
Convert a matplotlib date (or sequence) to a Julian date (or sequence).
"""
if cbook.iterable(n):
n = np.asarray(n)
return n + JULIAN_OFFSET
def num2date(x, tz=None):
"""
*x* is a float value which gives the number of days
(fraction part represents hours, minutes, seconds) since
0001-01-01 00:00:00 UTC *plus* *one*.
The addition of one here is a historical artifact. Also, note
that the Gregorian calendar is assumed; this is not universal
practice. For details, see the module docstring.
Return value is a :class:`datetime` instance in timezone *tz* (default to
rcparams TZ value).
If *x* is a sequence, a sequence of :class:`datetime` objects will
be returned.
"""
if tz is None:
tz = _get_rc_timezone()
if not cbook.iterable(x):
return _from_ordinalf(x, tz)
else:
x = np.asarray(x)
if not x.size:
return x
return _from_ordinalf_np_vectorized(x, tz).tolist()
def drange(dstart, dend, delta):
"""
Return a date range as float Gregorian ordinals. *dstart* and
*dend* are :class:`datetime` instances. *delta* is a
:class:`datetime.timedelta` instance.
"""
f1 = _to_ordinalf(dstart)
f2 = _to_ordinalf(dend)
step = delta.total_seconds() / SEC_PER_DAY
# calculate the difference between dend and dstart in times of delta
num = int(np.ceil((f2 - f1) / step))
# calculate end of the interval which will be generated
dinterval_end = dstart + num * delta
# ensure, that an half open interval will be generated [dstart, dend)
if dinterval_end >= dend:
# if the endpoint is greated than dend, just subtract one delta
dinterval_end -= delta
num -= 1
f2 = _to_ordinalf(dinterval_end) # new float-endpoint
return np.linspace(f1, f2, num + 1)
### date tickers and formatters ###
class DateFormatter(ticker.Formatter):
"""
Tick location is seconds since the epoch. Use a :func:`strftime`
format string.
Python only supports :mod:`datetime` :func:`strftime` formatting
for years greater than 1900. Thanks to Andrew Dalke, Dalke
Scientific Software who contributed the :func:`strftime` code
below to include dates earlier than this year.
"""
illegal_s = re.compile(r"((^|[^%])(%%)*%s)")
def __init__(self, fmt, tz=None):
"""
*fmt* is a :func:`strftime` format string; *tz* is the
:class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
if x == 0:
raise ValueError('DateFormatter found a value of x=0, which is '
'an illegal date. This usually occurs because '
'you have not informed the axis that it is '
'plotting dates, e.g., with ax.xaxis_date()')
dt = num2date(x, self.tz)
return self.strftime(dt, self.fmt)
def set_tzinfo(self, tz):
self.tz = tz
def _replace_common_substr(self, s1, s2, sub1, sub2, replacement):
"""Helper function for replacing substrings sub1 and sub2
located at the same indexes in strings s1 and s2 respectively,
with the string replacement. It is expected that sub1 and sub2
have the same length. Returns the pair s1, s2 after the
substitutions.
"""
# Find common indexes of substrings sub1 in s1 and sub2 in s2
# and make substitutions inplace. Because this is inplace,
# it is okay if len(replacement) != len(sub1), len(sub2).
i = 0
while True:
j = s1.find(sub1, i)
if j == -1:
break
i = j + 1
if s2[j:j + len(sub2)] != sub2:
continue
s1 = s1[:j] + replacement + s1[j + len(sub1):]
s2 = s2[:j] + replacement + s2[j + len(sub2):]
return s1, s2
def strftime_pre_1900(self, dt, fmt=None):
"""Call time.strftime for years before 1900 by rolling
forward a multiple of 28 years.
*fmt* is a :func:`strftime` format string.
Dalke: I hope I did this math right. Every 28 years the
calendar repeats, except through century leap years excepting
the 400 year leap years. But only if you're using the Gregorian
calendar.
"""
if fmt is None:
fmt = self.fmt
# Since python's time module's strftime implementation does not
# support %f microsecond (but the datetime module does), use a
# regular expression substitution to replace instances of %f.
# Note that this can be useful since python's floating-point
# precision representation for datetime causes precision to be
# more accurate closer to year 0 (around the year 2000, precision
# can be at 10s of microseconds).
fmt = re.sub(r'((^|[^%])(%%)*)%f',
r'\g<1>{0:06d}'.format(dt.microsecond), fmt)
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to between the years 1973 and 2000
year1 = year + ((2000 - year) // 28) * 28
year2 = year1 + 28
timetuple = dt.timetuple()
# Generate timestamp string for year and year+28
s1 = time.strftime(fmt, (year1,) + timetuple[1:])
s2 = time.strftime(fmt, (year2,) + timetuple[1:])
# Replace instances of respective years (both 2-digit and 4-digit)
# that are located at the same indexes of s1, s2 with dt's year.
# Note that C++'s strftime implementation does not use padded
# zeros or padded whitespace for %y or %Y for years before 100, but
# uses padded zeros for %x. (For example, try the runnable examples
# with .tm_year in the interval [-1900, -1800] on
# http://en.cppreference.com/w/c/chrono/strftime.) For ease of
# implementation, we always use padded zeros for %y, %Y, and %x.
s1, s2 = self._replace_common_substr(s1, s2,
"{0:04d}".format(year1),
"{0:04d}".format(year2),
"{0:04d}".format(dt.year))
s1, s2 = self._replace_common_substr(s1, s2,
"{0:02d}".format(year1 % 100),
"{0:02d}".format(year2 % 100),
"{0:02d}".format(dt.year % 100))
return cbook.unicode_safe(s1)
def strftime(self, dt, fmt=None):
"""Refer to documentation for datetime.strftime.
*fmt* is a :func:`strftime` format string.
Warning: For years before 1900, depending upon the current
locale it is possible that the year displayed with %x might
be incorrect. For years before 100, %y and %Y will yield
zero-padded strings.
"""
if fmt is None:
fmt = self.fmt
fmt = self.illegal_s.sub(r"\1", fmt)
fmt = fmt.replace("%s", "s")
if dt.year >= 1900:
# Note: in python 3.3 this is okay for years >= 1000,
# refer to http://bugs.python.org/issue177742
return cbook.unicode_safe(dt.strftime(fmt))
return self.strftime_pre_1900(dt, fmt)
class IndexDateFormatter(ticker.Formatter):
"""
Use with :class:`~matplotlib.ticker.IndexLocator` to cycle format
strings by index.
"""
def __init__(self, t, fmt, tz=None):
"""
*t* is a sequence of dates (floating point days). *fmt* is a
:func:`strftime` format string.
"""
if tz is None:
tz = _get_rc_timezone()
self.t = t
self.fmt = fmt
self.tz = tz
def __call__(self, x, pos=0):
'Return the label for time *x* at position *pos*'
ind = int(np.round(x))
if ind >= len(self.t) or ind <= 0:
return ''
dt = num2date(self.t[ind], self.tz)
return cbook.unicode_safe(dt.strftime(self.fmt))
class AutoDateFormatter(ticker.Formatter):
"""
This class attempts to figure out the best format to use. This is
most useful when used with the :class:`AutoDateLocator`.
The AutoDateFormatter has a scale dictionary that maps the scale
of the tick (the distance in days between one major tick) and a
format string. The default looks like this::
self.scaled = {
DAYS_PER_YEAR: rcParams['date.autoformat.year'],
DAYS_PER_MONTH: rcParams['date.autoformat.month'],
1.0: rcParams['date.autoformat.day'],
1. / HOURS_PER_DAY: rcParams['date.autoformat.hour'],
1. / (MINUTES_PER_DAY): rcParams['date.autoformat.minute'],
1. / (SEC_PER_DAY): rcParams['date.autoformat.second'],
1. / (MUSECONDS_PER_DAY): rcParams['date.autoformat.microsecond'],
}
The algorithm picks the key in the dictionary that is >= the
current scale and uses that format string. You can customize this
dictionary by doing::
>>> locator = AutoDateLocator()
>>> formatter = AutoDateFormatter(locator)
>>> formatter.scaled[1/(24.*60.)] = '%M:%S' # only show min and sec
A custom :class:`~matplotlib.ticker.FuncFormatter` can also be used.
The following example shows how to use a custom format function to strip
trailing zeros from decimal seconds and adds the date to the first
ticklabel::
>>> def my_format_function(x, pos=None):
... x = matplotlib.dates.num2date(x)
... if pos == 0:
... fmt = '%D %H:%M:%S.%f'
... else:
... fmt = '%H:%M:%S.%f'
... label = x.strftime(fmt)
... label = label.rstrip("0")
... label = label.rstrip(".")
... return label
>>> from matplotlib.ticker import FuncFormatter
>>> formatter.scaled[1/(24.*60.)] = FuncFormatter(my_format_function)
"""
# This can be improved by providing some user-level direction on
# how to choose the best format (precedence, etc...)
# Perhaps a 'struct' that has a field for each time-type where a
# zero would indicate "don't show" and a number would indicate
# "show" with some sort of priority. Same priorities could mean
# show all with the same priority.
# Or more simply, perhaps just a format string for each
# possibility...
def __init__(self, locator, tz=None, defaultfmt='%Y-%m-%d'):
"""
Autoformat the date labels. The default format is the one to use
if none of the values in ``self.scaled`` are greater than the unit
returned by ``locator._get_unit()``.
"""
self._locator = locator
self._tz = tz
self.defaultfmt = defaultfmt
self._formatter = DateFormatter(self.defaultfmt, tz)
self.scaled = {DAYS_PER_YEAR: rcParams['date.autoformatter.year'],
DAYS_PER_MONTH: rcParams['date.autoformatter.month'],
1.0: rcParams['date.autoformatter.day'],
1. / HOURS_PER_DAY: rcParams['date.autoformatter.hour'],
1. / (MINUTES_PER_DAY):
rcParams['date.autoformatter.minute'],
1. / (SEC_PER_DAY):
rcParams['date.autoformatter.second'],
1. / (MUSECONDS_PER_DAY):
rcParams['date.autoformatter.microsecond']}
def __call__(self, x, pos=None):
locator_unit_scale = float(self._locator._get_unit())
fmt = self.defaultfmt
# Pick the first scale which is greater than the locator unit.
for possible_scale in sorted(self.scaled):
if possible_scale >= locator_unit_scale:
fmt = self.scaled[possible_scale]
break
if isinstance(fmt, six.string_types):
self._formatter = DateFormatter(fmt, self._tz)
result = self._formatter(x, pos)
elif six.callable(fmt):
result = fmt(x, pos)
else:
raise TypeError('Unexpected type passed to {0!r}.'.format(self))
return result
class rrulewrapper(object):
def __init__(self, freq, **kwargs):
self._construct = kwargs.copy()
self._construct["freq"] = freq
self._rrule = rrule(**self._construct)
def set(self, **kwargs):
self._construct.update(kwargs)
self._rrule = rrule(**self._construct)
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return getattr(self._rrule, name)
class DateLocator(ticker.Locator):
"""
Determines the tick locations when plotting dates.
"""
hms0d = {'byhour': 0, 'byminute': 0, 'bysecond': 0}
def __init__(self, tz=None):
"""
*tz* is a :class:`tzinfo` instance.
"""
if tz is None:
tz = _get_rc_timezone()
self.tz = tz
def set_tzinfo(self, tz):
"""
Set time zone info.
"""
self.tz = tz
def datalim_to_dt(self):
"""
Convert axis data interval to datetime objects.
"""
dmin, dmax = self.axis.get_data_interval()
if dmin > dmax:
dmin, dmax = dmax, dmin
return num2date(dmin, self.tz), num2date(dmax, self.tz)
def viewlim_to_dt(self):
"""
Converts the view interval to datetime objects.
"""
vmin, vmax = self.axis.get_view_interval()
if vmin > vmax:
vmin, vmax = vmax, vmin
return num2date(vmin, self.tz), num2date(vmax, self.tz)
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return 1
def nonsingular(self, vmin, vmax):
"""
Given the proposed upper and lower extent, adjust the range
if it is too close to being singular (i.e. a range of ~0).
"""
unit = self._get_unit()
interval = self._get_interval()
if abs(vmax - vmin) < 1e-6:
vmin -= 2 * unit * interval
vmax += 2 * unit * interval
return vmin, vmax
class RRuleLocator(DateLocator):
# use the dateutil rrule instance
def __init__(self, o, tz=None):
DateLocator.__init__(self, tz)
self.rule = o
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
delta = relativedelta(vmax, vmin)
# We need to cap at the endpoints of valid datetime
try:
start = vmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = vmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dates = self.rule.between(vmin, vmax, True)
if len(dates) == 0:
return date2num([vmin, vmax])
return self.raise_if_exceeds(date2num(dates))
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
freq = self.rule._rrule._freq
return self.get_unit_generic(freq)
@staticmethod
def get_unit_generic(freq):
if freq == YEARLY:
return DAYS_PER_YEAR
elif freq == MONTHLY:
return DAYS_PER_MONTH
elif freq == WEEKLY:
return DAYS_PER_WEEK
elif freq == DAILY:
return 1.0
elif freq == HOURLY:
return 1.0 / HOURS_PER_DAY
elif freq == MINUTELY:
return 1.0 / MINUTES_PER_DAY
elif freq == SECONDLY:
return 1.0 / SEC_PER_DAY
else:
# error
return -1 # or should this just return '1'?
def _get_interval(self):
return self.rule._rrule._interval
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
delta = relativedelta(dmax, dmin)
# We need to cap at the endpoints of valid datetime
try:
start = dmin - delta
except ValueError:
start = _from_ordinalf(1.0)
try:
stop = dmax + delta
except ValueError:
# The magic number!
stop = _from_ordinalf(3652059.9999999)
self.rule.set(dtstart=start, until=stop)
dmin, dmax = self.datalim_to_dt()
vmin = self.rule.before(dmin, True)
if not vmin:
vmin = dmin
vmax = self.rule.after(dmax, True)
if not vmax:
vmax = dmax
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class AutoDateLocator(DateLocator):
"""
On autoscale, this class picks the best
:class:`DateLocator` to set the view limits and the tick
locations.
"""
def __init__(self, tz=None, minticks=5, maxticks=None,
interval_multiples=False):
"""
*minticks* is the minimum number of ticks desired, which is used to
select the type of ticking (yearly, monthly, etc.).
*maxticks* is the maximum number of ticks desired, which controls
any interval between ticks (ticking every other, every 3, etc.).
For really fine-grained control, this can be a dictionary mapping
individual rrule frequency constants (YEARLY, MONTHLY, etc.)
to their own maximum number of ticks. This can be used to keep
the number of ticks appropriate to the format chosen in
:class:`AutoDateFormatter`. Any frequency not specified in this
dictionary is given a default value.
*tz* is a :class:`tzinfo` instance.
*interval_multiples* is a boolean that indicates whether ticks
should be chosen to be multiple of the interval. This will lock
ticks to 'nicer' locations. For example, this will force the
ticks to be at hours 0,6,12,18 when hourly ticking is done at
6 hour intervals.
The AutoDateLocator has an interval dictionary that maps the
frequency of the tick (a constant from dateutil.rrule) and a
multiple allowed for that ticking. The default looks like this::
self.intervald = {
YEARLY : [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY : [1, 2, 3, 4, 6],
DAILY : [1, 2, 3, 7, 14],
HOURLY : [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000],
}
The interval is used to specify multiples that are appropriate for
the frequency of ticking. For instance, every 7 days is sensible
for daily ticks, but for minutes/seconds, 15 or 30 make sense.
You can customize this dictionary by doing::
locator = AutoDateLocator()
locator.intervald[HOURLY] = [3] # only show every 3 hours
"""
DateLocator.__init__(self, tz)
self._locator = YearLocator()
self._freq = YEARLY
self._freqs = [YEARLY, MONTHLY, DAILY, HOURLY, MINUTELY,
SECONDLY, MICROSECONDLY]
self.minticks = minticks
self.maxticks = {YEARLY: 11, MONTHLY: 12, DAILY: 11, HOURLY: 12,
MINUTELY: 11, SECONDLY: 11, MICROSECONDLY: 8}
if maxticks is not None:
try:
self.maxticks.update(maxticks)
except TypeError:
# Assume we were given an integer. Use this as the maximum
# number of ticks for every frequency and create a
# dictionary for this
self.maxticks = dict(zip(self._freqs,
[maxticks] * len(self._freqs)))
self.interval_multiples = interval_multiples
self.intervald = {
YEARLY: [1, 2, 4, 5, 10, 20, 40, 50, 100, 200, 400, 500,
1000, 2000, 4000, 5000, 10000],
MONTHLY: [1, 2, 3, 4, 6],
DAILY: [1, 2, 3, 7, 14, 21],
HOURLY: [1, 2, 3, 4, 6, 12],
MINUTELY: [1, 5, 10, 15, 30],
SECONDLY: [1, 5, 10, 15, 30],
MICROSECONDLY: [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000,
5000, 10000, 20000, 50000, 100000, 200000, 500000,
1000000]}
self._byranges = [None, range(1, 13), range(1, 32),
range(0, 24), range(0, 60), range(0, 60), None]
def __call__(self):
'Return the locations of the ticks'
self.refresh()
return self._locator()
def tick_values(self, vmin, vmax):
return self.get_locator(vmin, vmax).tick_values(vmin, vmax)
def nonsingular(self, vmin, vmax):
# whatever is thrown at us, we can scale the unit.
# But default nonsingular date plots at an ~4 year period.
if vmin == vmax:
vmin = vmin - DAYS_PER_YEAR * 2
vmax = vmax + DAYS_PER_YEAR * 2
return vmin, vmax
def set_axis(self, axis):
DateLocator.set_axis(self, axis)
self._locator.set_axis(axis)
def refresh(self):
'Refresh internal information based on current limits.'
dmin, dmax = self.viewlim_to_dt()
self._locator = self.get_locator(dmin, dmax)
def _get_unit(self):
if self._freq in [MICROSECONDLY]:
return 1. / MUSECONDS_PER_DAY
else:
return RRuleLocator.get_unit_generic(self._freq)
def autoscale(self):
'Try to choose the view limits intelligently.'
dmin, dmax = self.datalim_to_dt()
self._locator = self.get_locator(dmin, dmax)
return self._locator.autoscale()
def get_locator(self, dmin, dmax):
'Pick the best locator based on a distance.'
delta = relativedelta(dmax, dmin)
tdelta = dmax - dmin
# take absolute difference
if dmin > dmax:
delta = -delta
tdelta = -tdelta
# The following uses a mix of calls to relativedelta and timedelta
# methods because there is incomplete overlap in the functionality of
# these similar functions, and it's best to avoid doing our own math
# whenever possible.
numYears = float(delta.years)
numMonths = (numYears * MONTHS_PER_YEAR) + delta.months
numDays = tdelta.days # Avoids estimates of days/month, days/year
numHours = (numDays * HOURS_PER_DAY) + delta.hours
numMinutes = (numHours * MIN_PER_HOUR) + delta.minutes
numSeconds = np.floor(tdelta.total_seconds())
numMicroseconds = np.floor(tdelta.total_seconds() * 1e6)
nums = [numYears, numMonths, numDays, numHours, numMinutes,
numSeconds, numMicroseconds]
use_rrule_locator = [True] * 6 + [False]
# Default setting of bymonth, etc. to pass to rrule
# [unused (for year), bymonth, bymonthday, byhour, byminute,
# bysecond, unused (for microseconds)]
byranges = [None, 1, 1, 0, 0, 0, None]
# Loop over all the frequencies and try to find one that gives at
# least a minticks tick positions. Once this is found, look for
# an interval from an list specific to that frequency that gives no
# more than maxticks tick positions. Also, set up some ranges
# (bymonth, etc.) as appropriate to be passed to rrulewrapper.
for i, (freq, num) in enumerate(zip(self._freqs, nums)):
# If this particular frequency doesn't give enough ticks, continue
if num < self.minticks:
# Since we're not using this particular frequency, set
# the corresponding by_ to None so the rrule can act as
# appropriate
byranges[i] = None
continue
# Find the first available interval that doesn't give too many
# ticks
for interval in self.intervald[freq]:
if num <= interval * (self.maxticks[freq] - 1):
break
else:
# We went through the whole loop without breaking, default to
# the last interval in the list and raise a warning
warnings.warn('AutoDateLocator was unable to pick an '
'appropriate interval for this date range. '
'It may be necessary to add an interval value '
"to the AutoDateLocator's intervald dictionary."
' Defaulting to {0}.'.format(interval))
# Set some parameters as appropriate
self._freq = freq
if self._byranges[i] and self.interval_multiples:
byranges[i] = self._byranges[i][::interval]
interval = 1
else:
byranges[i] = self._byranges[i]
# We found what frequency to use
break
else:
raise ValueError('No sensible date limit could be found in the '
'AutoDateLocator.')
if use_rrule_locator[i]:
_, bymonth, bymonthday, byhour, byminute, bysecond, _ = byranges
rrule = rrulewrapper(self._freq, interval=interval,
dtstart=dmin, until=dmax,
bymonth=bymonth, bymonthday=bymonthday,
byhour=byhour, byminute=byminute,
bysecond=bysecond)
locator = RRuleLocator(rrule, self.tz)
else:
locator = MicrosecondLocator(interval, tz=self.tz)
locator.set_axis(self.axis)
locator.set_view_interval(*self.axis.get_view_interval())
locator.set_data_interval(*self.axis.get_data_interval())
return locator
class YearLocator(DateLocator):
"""
Make ticks on a given day of each year that is a multiple of base.
Examples::
# Tick every year on Jan 1st
locator = YearLocator()
# Tick every 5 years on July 4th
locator = YearLocator(5, month=7, day=4)
"""
def __init__(self, base=1, month=1, day=1, tz=None):
"""
Mark years that are multiple of base on a given month and day
(default jan 1).
"""
DateLocator.__init__(self, tz)
self.base = ticker.Base(base)
self.replaced = {'month': month,
'day': day,
'hour': 0,
'minute': 0,
'second': 0,
'tzinfo': tz
}
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
ymin = self.base.le(vmin.year)
ymax = self.base.ge(vmax.year)
ticks = [vmin.replace(year=ymin, **self.replaced)]
while 1:
dt = ticks[-1]
if dt.year >= ymax:
return date2num(ticks)
year = dt.year + self.base.get_base()
ticks.append(dt.replace(year=year, **self.replaced))
def autoscale(self):
"""
Set the view limits to include the data range.
"""
dmin, dmax = self.datalim_to_dt()
ymin = self.base.le(dmin.year)
ymax = self.base.ge(dmax.year)
vmin = dmin.replace(year=ymin, **self.replaced)
vmax = dmax.replace(year=ymax, **self.replaced)
vmin = date2num(vmin)
vmax = date2num(vmax)
return self.nonsingular(vmin, vmax)
class MonthLocator(RRuleLocator):
"""
Make ticks on occurances of each month month, e.g., 1, 3, 12.
"""
def __init__(self, bymonth=None, bymonthday=1, interval=1, tz=None):
"""
Mark every month in *bymonth*; *bymonth* can be an int or
sequence. Default is ``range(1,13)``, i.e. every month.
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurance.
"""
if bymonth is None:
bymonth = range(1, 13)
elif isinstance(bymonth, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonth = [x.item() for x in bymonth.astype(int)]
rule = rrulewrapper(MONTHLY, bymonth=bymonth, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class WeekdayLocator(RRuleLocator):
"""
Make ticks on occurances of each weekday.
"""
def __init__(self, byweekday=1, interval=1, tz=None):
"""
Mark every weekday in *byweekday*; *byweekday* can be a number or
sequence.
Elements of *byweekday* must be one of MO, TU, WE, TH, FR, SA,
SU, the constants from :mod:`dateutil.rrule`, which have been
imported into the :mod:`matplotlib.dates` namespace.
*interval* specifies the number of weeks to skip. For example,
``interval=2`` plots every second week.
"""
if isinstance(byweekday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
[x.item() for x in byweekday.astype(int)]
rule = rrulewrapper(DAILY, byweekday=byweekday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class DayLocator(RRuleLocator):
"""
Make ticks on occurances of each day of the month. For example,
1, 15, 30.
"""
def __init__(self, bymonthday=None, interval=1, tz=None):
"""
Mark every day in *bymonthday*; *bymonthday* can be an int or
sequence.
Default is to tick every day of the month: ``bymonthday=range(1,32)``
"""
if not interval == int(interval) or interval < 1:
raise ValueError("interval must be an integer greater than 0")
if bymonthday is None:
bymonthday = range(1, 32)
elif isinstance(bymonthday, np.ndarray):
# This fixes a bug in dateutil <= 2.3 which prevents the use of
# numpy arrays in (among other things) the bymonthday, byweekday
# and bymonth parameters.
bymonthday = [x.item() for x in bymonthday.astype(int)]
rule = rrulewrapper(DAILY, bymonthday=bymonthday,
interval=interval, **self.hms0d)
RRuleLocator.__init__(self, rule, tz)
class HourLocator(RRuleLocator):
"""
Make ticks on occurances of each hour.
"""
def __init__(self, byhour=None, interval=1, tz=None):
"""
Mark every hour in *byhour*; *byhour* can be an int or sequence.
Default is to tick every hour: ``byhour=range(24)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byhour is None:
byhour = range(24)
rule = rrulewrapper(HOURLY, byhour=byhour, interval=interval,
byminute=0, bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class MinuteLocator(RRuleLocator):
"""
Make ticks on occurances of each minute.
"""
def __init__(self, byminute=None, interval=1, tz=None):
"""
Mark every minute in *byminute*; *byminute* can be an int or
sequence. Default is to tick every minute: ``byminute=range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if byminute is None:
byminute = range(60)
rule = rrulewrapper(MINUTELY, byminute=byminute, interval=interval,
bysecond=0)
RRuleLocator.__init__(self, rule, tz)
class SecondLocator(RRuleLocator):
"""
Make ticks on occurances of each second.
"""
def __init__(self, bysecond=None, interval=1, tz=None):
"""
Mark every second in *bysecond*; *bysecond* can be an int or
sequence. Default is to tick every second: ``bysecond = range(60)``
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second occurrence.
"""
if bysecond is None:
bysecond = range(60)
rule = rrulewrapper(SECONDLY, bysecond=bysecond, interval=interval)
RRuleLocator.__init__(self, rule, tz)
class MicrosecondLocator(DateLocator):
"""
Make ticks on occurances of each microsecond.
"""
def __init__(self, interval=1, tz=None):
"""
*interval* is the interval between each iteration. For
example, if ``interval=2``, mark every second microsecond.
"""
self._interval = interval
self._wrapped_locator = ticker.MultipleLocator(interval)
self.tz = tz
def set_axis(self, axis):
self._wrapped_locator.set_axis(axis)
return DateLocator.set_axis(self, axis)
def set_view_interval(self, vmin, vmax):
self._wrapped_locator.set_view_interval(vmin, vmax)
return DateLocator.set_view_interval(self, vmin, vmax)
def set_data_interval(self, vmin, vmax):
self._wrapped_locator.set_data_interval(vmin, vmax)
return DateLocator.set_data_interval(self, vmin, vmax)
def __call__(self):
# if no data have been set, this will tank with a ValueError
try:
dmin, dmax = self.viewlim_to_dt()
except ValueError:
return []
return self.tick_values(dmin, dmax)
def tick_values(self, vmin, vmax):
nmin, nmax = date2num((vmin, vmax))
nmin *= MUSECONDS_PER_DAY
nmax *= MUSECONDS_PER_DAY
ticks = self._wrapped_locator.tick_values(nmin, nmax)
ticks = [tick / MUSECONDS_PER_DAY for tick in ticks]
return ticks
def _get_unit(self):
"""
Return how many days a unit of the locator is; used for
intelligent autoscaling.
"""
return 1. / MUSECONDS_PER_DAY
def _get_interval(self):
"""
Return the number of units for each tick.
"""
return self._interval
def _close_to_dt(d1, d2, epsilon=5):
"""
Assert that datetimes *d1* and *d2* are within *epsilon* microseconds.
"""
delta = d2 - d1
mus = abs(delta.total_seconds() * 1e6)
assert mus < epsilon
def _close_to_num(o1, o2, epsilon=5):
"""
Assert that float ordinals *o1* and *o2* are within *epsilon*
microseconds.
"""
delta = abs((o2 - o1) * MUSECONDS_PER_DAY)
assert delta < epsilon
def epoch2num(e):
"""
Convert an epoch or sequence of epochs to the new date format,
that is days since 0001.
"""
return EPOCH_OFFSET + np.asarray(e) / SEC_PER_DAY
def num2epoch(d):
"""
Convert days since 0001 to epoch. *d* can be a number or sequence.
"""
return (np.asarray(d) - EPOCH_OFFSET) * SEC_PER_DAY
def mx2num(mxdates):
"""
Convert mx :class:`datetime` instance (or sequence of mx
instances) to the new date format.
"""
scalar = False
if not cbook.iterable(mxdates):
scalar = True
mxdates = [mxdates]
ret = epoch2num([m.ticks() for m in mxdates])
if scalar:
return ret[0]
else:
return ret
def date_ticker_factory(span, tz=None, numticks=5):
"""
Create a date locator with *numticks* (approx) and a date formatter
for *span* in days. Return value is (locator, formatter).
"""
if span == 0:
span = 1 / HOURS_PER_DAY
mins = span * MINUTES_PER_DAY
hrs = span * HOURS_PER_DAY
days = span
wks = span / DAYS_PER_WEEK
months = span / DAYS_PER_MONTH # Approx
years = span / DAYS_PER_YEAR # Approx
if years > numticks:
locator = YearLocator(int(years / numticks), tz=tz) # define
fmt = '%Y'
elif months > numticks:
locator = MonthLocator(tz=tz)
fmt = '%b %Y'
elif wks > numticks:
locator = WeekdayLocator(tz=tz)
fmt = '%a, %b %d'
elif days > numticks:
locator = DayLocator(interval=int(math.ceil(days / numticks)), tz=tz)
fmt = '%b %d'
elif hrs > numticks:
locator = HourLocator(interval=int(math.ceil(hrs / numticks)), tz=tz)
fmt = '%H:%M\n%b %d'
elif mins > numticks:
locator = MinuteLocator(interval=int(math.ceil(mins / numticks)),
tz=tz)
fmt = '%H:%M:%S'
else:
locator = MinuteLocator(tz=tz)
fmt = '%H:%M:%S'
formatter = DateFormatter(fmt, tz=tz)
return locator, formatter
def seconds(s):
"""
Return seconds as days.
"""
return float(s) / SEC_PER_DAY
def minutes(m):
"""
Return minutes as days.
"""
return float(m) / MINUTES_PER_DAY
def hours(h):
"""
Return hours as days.
"""
return h / HOURS_PER_DAY
def weeks(w):
"""
Return weeks as days.
"""
return w * DAYS_PER_WEEK
class DateConverter(units.ConversionInterface):
"""
Converter for datetime.date and datetime.datetime data,
or for date/time data represented as it would be converted
by :func:`date2num`.
The 'unit' tag for such data is None or a tzinfo instance.
"""
@staticmethod
def axisinfo(unit, axis):
"""
Return the :class:`~matplotlib.units.AxisInfo` for *unit*.
*unit* is a tzinfo instance or None.
The *axis* argument is required but not used.
"""
tz = unit
majloc = AutoDateLocator(tz=tz)
majfmt = AutoDateFormatter(majloc, tz=tz)
datemin = datetime.date(2000, 1, 1)
datemax = datetime.date(2010, 1, 1)
return units.AxisInfo(majloc=majloc, majfmt=majfmt, label='',
default_limits=(datemin, datemax))
@staticmethod
def convert(value, unit, axis):
"""
If *value* is not already a number or sequence of numbers,
convert it with :func:`date2num`.
The *unit* and *axis* arguments are not used.
"""
if units.ConversionInterface.is_numlike(value):
return value
return date2num(value)
@staticmethod
def default_units(x, axis):
"""
Return the tzinfo instance of *x* or of its first element, or None
"""
if isinstance(x, np.ndarray):
x = x.ravel()
try:
x = cbook.safe_first_element(x)
except (TypeError, StopIteration):
pass
try:
return x.tzinfo
except AttributeError:
pass
return None
units.registry[datetime.date] = DateConverter()
units.registry[datetime.datetime] = DateConverter()
| gpl-3.0 |
RachitKansal/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
bhargav/scikit-learn | doc/conf.py | 26 | 8446 | # -*- coding: utf-8 -*-
#
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from __future__ import print_function
import sys
import os
from sklearn.externals.six import u
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinxext'))
from github_link import make_linkcode_resolve
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst',
'sphinx.ext.autodoc', 'sphinx.ext.autosummary',
'sphinx.ext.pngmath', 'numpy_ext.numpydoc',
'sphinx.ext.linkcode',
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u('scikit-learn')
copyright = u('2010 - 2015, scikit-learn developers (BSD License)')
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
version = sklearn.__version__
# The full version, including alpha/beta/rc tags.
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be
# searched for source files.
exclude_trees = ['_build', 'templates', 'includes']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'scikit-learn'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'oldversion': False, 'collapsiblesidebar': True,
'google_analytics': True, 'surveybanner': False,
'sprintbanner': True}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'scikit-learn'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/scikit-learn-logo-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['images']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'scikit-learndoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [('index', 'user_guide.tex', u('scikit-learn user guide'),
u('scikit-learn developers'), 'manual'), ]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}\usepackage{morefloats}
\usepackage{enumitem} \setlistdepth{10}
"""
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
# to hide/show the prompt in code examples:
app.add_javascript('js/copybutton.js')
app.connect('autodoc-process-docstring', generate_example_rst)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve('sklearn',
u'https://github.com/scikit-learn/'
'scikit-learn/blob/{revision}/'
'{package}/{path}#L{lineno}')
| bsd-3-clause |
JamesSample/ecosystem_services_impacts | Code/01_es_lu_cc.py | 1 | 21539 | #------------------------------------------------------------------------------
# Name: 01_es_lu_cc.py
# Purpose: Processing for the CREW project on ES, LUC and CC.
#
# Author: James Sample
#
# Created: 14/01/2015
# Copyright: (c) James Sample and JHI, 2015
# License: https://github.com/JamesSample/ecosystem_services_impacts/blob/master/LICENSE
#------------------------------------------------------------------------------
""" Processes the Future Flows (FF) climate data and estimate climate and land
use change effects on Ecosystem Services (ES). Reads workshop outputs and
performs the following steps:
1. For each ES, reads monthly rainfall and ET grids for the months
specified for both baseline and future periods. For the seasons of
interest, calculates the % change in rainfall and ET between
baseline and future.
2. Combines rainfall and runoff percentage changes into a qualitative
grid of change in runoff.
3. Estimates impacts grids for each ES for CC only, LUC only and CC &
LUC combined.
Inputs grids are supplied in HDF5 file format.
"""
import pandas as pd, h5py, numpy as np, matplotlib, matplotlib.pyplot as plt
import os, sys
from mpl_toolkits.axes_grid1 import ImageGrid
from osgeo import gdal, gdalconst, osr
def read_array_from_h5(h5, variable, model, year, month):
""" Read an array from a specified location in an H5 file.
Args:
h5: The open HDF5 file object
variable: The variable of interest ('rainfall' or 'pet')
model: The code for the climate model of interest (string)
year: Year (integer)
month: Month (integer)
Returns:
array
"""
dset_path = r'/ff_data/%s/%s/%s_%s' % (model, variable, variable, year)
data = h5.get(dset_path)[:,:,month-1].astype(float)
# Set NoData to NaN
data[data==-99] = np.nan
# Convert units
data = data/100
return data
def avg_rain_et(h5, st_yr, end_yr, months):
""" Calculate average rainfall and ET grids for the specified years and
months.
Args:
h5: The open HDF5 file object
st_yr: Start year for period of interest (integer)
end_yr: End year for period of interest (integer)
months: List of months of interest (integers)
Returns:
Tuple of arrays (average rainfall, average PET)
"""
# Empty arrays to store rainfall and ET totals
rn_tot = np.zeros((715, 485))
et_tot = np.zeros((715, 485))
# Total number of years to average over
years = end_yr + 1 - st_yr
# Loop over rainfall and ET
for year in range(st_yr, end_yr+1):
for month in months:
# Read rainfall and ET grids
rn = read_array_from_h5(h5, 'rainfall', model, year, month)
et = read_array_from_h5(h5, 'pet', model, year, month)
# Add to totals
rn_tot += rn
et_tot += et
# Average
rn_av = rn_tot/years
et_av = et_tot/years
return (rn_av, et_av)
def plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av):
""" Plot the average rainfall and ET grids. Used for testing.
Args:
base_rn_av: Average rainfall grid for baseline period.
base_et_av: Average PET grid for baseline period.
fut_rn_av: Average rainfall grid for future period.
fut_et_av: Average PET grid for future period.
Returns:
None. Displays maps of each grid using same colour scale.
"""
# Get min and max values from grids
rnmin = min(np.nanmin(base_rn_av), np.nanmin(fut_rn_av))
rnmax = max(np.nanmax(base_rn_av), np.nanmax(fut_rn_av))
etmin = min(np.nanmin(base_et_av), np.nanmin(fut_et_av))
etmax = max(np.nanmax(base_et_av), np.nanmax(fut_et_av))
# Plot
fig = plt.figure()
grid = ImageGrid(fig, 111,
nrows_ncols = (1, 4),
axes_pad=0.5,
cbar_mode='each')
im0 = grid[0].imshow(base_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[0].colorbar(im0)
im1 = grid[1].imshow(fut_rn_av, vmin=rnmin, vmax=rnmax,
interpolation='nearest')
grid.cbar_axes[1].colorbar(im1)
im2 = grid[2].imshow(base_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[2].colorbar(im2)
im3 = grid[3].imshow(fut_et_av, vmin=etmin, vmax=etmax,
interpolation='nearest')
grid.cbar_axes[3].colorbar(im3)
plt.show()
def plot_reclassified_grid(array, out_path, sup_title='Main title',
title='Sub-title'):
""" Plot and save the reclassified grid.
Args:
array: Grid of integers in range -2 to +2
out_path: Output file path (PNG or PDF)
sup_title: Main title for plot (string)
title: Sub-title for plot (string)
Returns:
None. Saves a plot to the specified path.
"""
# Make a color map of fixed colors
cmap = matplotlib.colors.ListedColormap(['Red', 'Orange', 'LimeGreen',
'DeepSkyBlue', 'Blue'])
bounds=[-2.5, -1.5, -0.5, 0.5, 1.5, 2.5]
norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)
# Create axes for plot (A4 size)
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(8.3,11.7))
# Plot the array, using the colours specified
img = axes.imshow(array, interpolation='nearest', origin='upper',
cmap=cmap, norm=norm)
# Add labels to plot
plt.title(title)
plt.suptitle(sup_title, fontsize=16, y=0.95)
plt.ylabel('Northing')
plt.xlabel('Easting')
plt.grid(True)
# Reformat the axis labels (mainly change the Y values into northings)
axes.set_yticks([35, 135, 235, 335, 435, 535, 635, 735])
axes.set_yticklabels([1200, 1100, 1000, 900, 800, 700, 600, 500])
axes.set_xticks([100, 200, 300, 400])
# Add axes for the color bar
cax = fig.add_axes([0.2, 0.785, 0.02, 0.10])
# Add the colour bar and set labels
cbar = fig.colorbar(img, cax=cax, cmap=cmap, norm=norm, boundaries=bounds,
ticks=[-2.2,-1.2,-0.2,0.8,1.8])
cbar.set_ticklabels(['Large decrease',
'Small decrease',
'Neutral',
'Small increase',
'Large increase'], update_ticks=True)
# Make the cbar ticks invisible
ticks = cbar.ax.get_yticklines()
for tick in ticks:
plt.setp(tick, alpha=0)
cbar_labels = plt.getp(cbar.ax.axes, 'yticklabels')
plt.setp(cbar_labels, fontsize=10)
# Save fig
plt.savefig(out_path, dpi=300)
## plt.show()
plt.clf()
plt.close()
def reclass_rn_et_grid(array):
""" Take an array of percentage changes and reclassify it according to:
% change | Class
x<=-15 | -2
-15<x<=-5 | -1
-5<x<=5 | 0
5<x<=15 | +1
15<x | +2
Args:
array: Array of percentage changes to be reclassified.
Returns:
Reclassified array
"""
# Create copy of array for reclass values
rc = array.copy()
rc[array<=-15] = -2
rc[(-15<array) & (array<=-5)] = -1
rc[(-5<array) & (array<=5)] = 0
rc[(5<array) & (array<=15)] = 1
rc[15<array] = 2
return rc
def reclass_ro(matrix_path, rn, et):
""" Generate reclassification matrix for runoff based on reclassified
change grids for rainfall and PET and the runoff reclassification
matrix from the workshop.
Args:
matrix_path: Path to CSV file representing runoff matrix.
rn: Reclassified rainfall grid from reclass_rn_et_grid
et: Reclassified PET grid from reclass_rn_et_grid
Returns:
Array (grid of integers representing change in runoff)
"""
# Read matrix
df = pd.read_csv(matrix_path, index_col=0)
# Grid of NaNs wih correct shape
ro = rn.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(ro.shape):
# Get values for change in rainfall and ET
et_ch = et[x, y]
rn_ch = rn[x, y]
# If both are not nan, reclassify
if (np.isfinite(et_ch) and np.isfinite(rn_ch)):
rc_val = df.ix[int(et_ch), str(int(rn_ch))]
ro[x, y] = rc_val
return ro
def reclass_es_ro(es_idx, ro):
""" Reclassify the runoff grid to estimate effects of runoff change on each
ES.
Args:
es_idx: The ID of the ES of interest in data frame ro_df
ro: The runoff change grid from reclass_ro
Returns:
Array (grid of integers representing change in ES)
"""
# Make a copy of the ro grid to update
es = ro.copy()
# Reclassify
for chng in [-2, -1, 0, 1, 2]:
es[ro==chng] = ro_df.ix[es_idx, 'RO_%d' % chng]
return es
def read_ascii(ascii_path,
xmin=0,
xmax=485000,
ymin=520000,
ymax=1235000,
exptd_rows=715,
exptd_cols=485,
exptd_px_wd=1000,
exptd_px_ht=-1000,
exptd_ndv=-9999):
""" Read an ASCII grid file, clip it to the specified bounding box and
return a numpy array.
Args:
xmin: Minimum Easting in OSGB1936 metres.
xmax: Maximum Easting in OSGB1936 metres.
ymin: Minimum Northing in OSGB1936 metres.
ymax: Maximum Northing in OSGB1936 metres.
exptd_rows: No. of rows expected in file.
exptd_cols: No. of columns expected in file.
exptd_px_wd: Cell width.
exptd_px_ht: Cell height.
exptd_ndv: No data value.
Returns:
Array (floats).
"""
# Register drivers
gdal.AllRegister()
# Process the file with GDAL
ds = gdal.Open(ascii_path, gdalconst.GA_ReadOnly)
if ds is None:
print 'Could not open ' + ascii_path
sys.exit(1)
# In order to select the first cell correctly, choose a point just within
# the top left corner of the specified bounding box.
x = xmin + 10
y = ymax - 10
# Dataset properties
geotransform = ds.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
# Calculate number of rows and cols to return
rows = abs(int((ymax-ymin)/pixelHeight))
cols = int((xmax-xmin)/pixelWidth)
# Select starting pixel
xOffset = int((x - originX) / pixelWidth)
yOffset = int((y - originY) / pixelHeight)
band = ds.GetRasterBand(1)
no_data_val = band.GetNoDataValue()
# Simple checking
assert rows == exptd_rows
assert cols == exptd_cols
assert pixelWidth == exptd_px_wd
assert pixelHeight == exptd_px_ht
assert no_data_val == exptd_ndv
# Read the data to an array
data = band.ReadAsArray(xOffset, yOffset, cols, rows)
# Close the dataset
ds = None
return data.astype(float)
def process_land_use_change(lu_mat_path, base, fut, esid, codes_df):
""" Estimate land use change (LUC) only effects for the specified ES.
Args:
lu_mat_path: Excel file containing land use matrices from the workshop.
base: Baseline land luse grid.
fut: Future land luse grid.
esid: ES ID from land use matrices Excel file
codes_df: Land use code look-up table (as data frame)
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use')
# Get row for start of matrix
st_row = (lu_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lu_mat = pd.read_excel(lu_mat_path, sheetname='Land Use', skiprows=st_row,
skip_footer=(120-6-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = base.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(base.shape):
# Get values for baseline and future LU
base_lu = base[x, y]
fut_lu = fut[x, y]
# If both are not nan, reclassify
if (np.isfinite(base_lu) and np.isfinite(fut_lu)):
# Get the base and fut LU as a string
base_str = codes_df.ix[int(base_lu)]['LU_Class']
fut_str = codes_df.ix[int(fut_lu)]['LU_Class']
rc_val = lu_mat.ix[base_str, fut_str]
rc[x, y] = rc_val
return rc
def process_land_use_and_climate_change(lucc_mat_path, lugrid, ccgrid, esid):
""" Estimate combined land use and climate change effects for the specified
ES.
Args:
lucc_mat_path: Excel file containing matrices from the workshop.
lugrid: The grid of land use change effects.
ccgrid: The grid of climate change effects.
esid: ES ID from workshop matrices Excel file.
Returns:
Array (grid of integers representing change in ES)
"""
# Read matrix for this ES
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU')
# Get row for start of matrix
st_row = (lucc_mat['ES_ID']==esid).nonzero()[0][0] + 2
# Read matrix of interest
lucc_mat = pd.read_excel(lucc_mat_path, sheetname='CC_LU', skiprows=st_row,
skip_footer=(108-5-st_row), parse_cols='C:I',
index_col=0)
# Perform reclassification
# Grid of NaNs wih correct shape
rc = lugrid.copy()*np.nan
# Loop over inidces
for x, y in np.ndindex(lugrid.shape):
# Get values for baseline and future LU
lu = lugrid[x, y]
cc = ccgrid[x, y]
# If both are not nan, reclassify
if (np.isfinite(lu) and np.isfinite(cc)):
# Get the base and fut LU as a string
rc_val = lucc_mat.ix[int(lu), int(cc)]
rc[x, y] = rc_val
return rc
def array_to_gtiff(out_path, data_array, ndv=-9999, xmin=0, ymax=1235000,
cell_size=1000):
""" Convert numpy array to 16-bit integer GeoTiff.
Args:
out_path: The .tif file to be created.
data_array: The (integer) data array to save.
ndv: No data value.
xmin: Minimum x (Easting) co-ordinate, in OSGB1936 metres
ymax: Maximim y (Northing) co-ordinate, in OSGB1936 metres
cell_size: Cell size (metres)
Returns:
None. Array is saved to specified path.
"""
# Copy data_array so that it is not modified
data = data_array.copy()
# Convert NaNs to NDV
data[np.isnan(data)] = ndv
# Get array shape
cols = data.shape[1]
rows = data.shape[0]
# Get driver
driver = gdal.GetDriverByName('GTiff') # NB can't directly create ArcInfo ASCII grids in this way
# Create a new raster data source
out_ds = driver.Create(out_path, cols, rows, 1, gdal.GDT_Int16)
# Get spatial ref details
srs = osr.SpatialReference()
srs.ImportFromEPSG(27700) # From EPSG for OSGB36 grid
# Write metadata
out_ds.SetGeoTransform((xmin, cell_size, 0.0, ymax, 0.0, -1*cell_size)) #(xmin, cellsize, 0, ymax, 0, -cellsize)
out_ds.SetProjection(srs.ExportToWkt())
out_band = out_ds.GetRasterBand(1)
out_band.SetNoDataValue(ndv)
out_band.WriteArray(data)
# Tidy up
del out_ds, out_band
# #############################################################################
# User input
# Climate data
ff_h5_path = r'D:\WBM_Development_2014\WBM_2014_Monthly_Input_File.h5'
# Runoff matrices
ro_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Runoff_Impacts_Grp1.csv'
ro_matrix_15 = r'D:\Eco_Services_Impacts\Matrices_Development\02_Common_Matrices\Runoff_Matrix_15pct.csv'
# Land use data
base_path = r'D:\Eco_Services_Impacts\Land_Use\baseline_lu_lcm07.txt'
fut_path = r'D:\Eco_Services_Impacts\Land_Use\future_lu_2050.txt'
# Land use matrices
lu_classes_path = r'D:\Eco_Services_Impacts\Land_Use\Land_Use_Classes.csv'
lu_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Land_Use_Matrices_Grp1.xlsx'
# Land use and climate combined matrices
lucc_matrices_path = r'D:\Eco_Services_Impacts\Matrices_Development\03_Group_1_Matrices\Climate_And_Land_Use_Matrices_Grp1.xlsx'
# Output folders
out_pdf_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\PDF'
out_array_fold = r'D:\Eco_Services_Impacts\Model_Output\02_Group_1_Output\GeoTiffs'
# Time periods to compare
base_st_yr, base_end_yr = 1961, 1990
fut_st_yr, fut_end_yr = 2041, 2070
# Future Flows models of interest
models = ['afixa', 'afixc', 'afixl', 'afixm', 'afixo', 'afixh',
'afixi', 'afixj', 'afixk', 'afgcx', 'afixq']
# #############################################################################
# Read LU grids
base = read_ascii(base_path)
base[base==-9999] = np.nan
fut = read_ascii(fut_path)
fut[fut==-9999] = np.nan
# Read LU class codes
codes_df = pd.read_csv(lu_classes_path, index_col=0)
# Read the runoff matrices
ro_df = pd.read_csv(ro_path, index_col=0)
# Open H5 file
h5 = h5py.File(ff_h5_path, 'r')
# Iterate over each ES
for idx in ro_df.index:
print '\nProcessing land use change impacts for %s.' % ro_df.ix[idx, 'ES']
# 1. Process land use change only
luc = process_land_use_change(lu_matrices_path, base, fut, idx, codes_df)
# Prepare to save
out_name = 'ES%02d_LUC' % idx
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, luc)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(luc, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(land use change only)' )
# 2. Process climate change only
# Get the relevant months for this ES
months = [int(i) for i in ro_df.ix[idx, 'Key_Months'].split(',')]
# Loop over climate models of interest
for model in models:
print ('Processing climate change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# 2.1. Baseline
base_rn_av, base_et_av = avg_rain_et(h5, base_st_yr, base_end_yr,
months)
# 2.2. Future
fut_rn_av, fut_et_av = avg_rain_et(h5, fut_st_yr, fut_end_yr,
months)
# Plot
# plot_avg_grids(base_rn_av, base_et_av, fut_rn_av, fut_et_av)
# Calculate % change
rn_pct = 100*(fut_rn_av - base_rn_av)/base_rn_av
et_pct = 100*(fut_et_av - base_et_av)/base_et_av
# Reclassify
rn_rc = reclass_rn_et_grid(rn_pct)
et_rc = reclass_rn_et_grid(et_pct)
# plot_reclassified_grid(rn_rc)
# plot_reclassified_grid(et_rc)
# Generate runoff grid
ro = reclass_ro(ro_matrix_15, rn_rc, et_rc)
# # Plot runoff grid
# plot_reclassified_grid(ro,
# sup_title='Change in runoff',
# title='(Model %s; %s)' % (model, months))
# Reclass ro grid to estimate ES impact
es = reclass_es_ro(idx, ro)
# Prepare to save
out_name = 'ES%02d_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, es)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(es, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate model %s only)' % model)
# 3. Process combined land use and climate effects
print ('Processing climate and land use change impacts for '
'%s (model %s).' % (ro_df.ix[idx, 'ES'], model))
# Reclassify to get CC and LUC effects
cc_lu = process_land_use_and_climate_change(lucc_matrices_path, luc,
es, idx)
# Prepare to save
out_name = 'ES%02d_LUC_%s' % (idx, model)
# Save array
out_array = os.path.join(out_array_fold, '%s.tif' % out_name)
array_to_gtiff(out_array, cc_lu)
# Save PDF
out_pdf = os.path.join(out_pdf_fold, '%s.pdf' % out_name)
plot_reclassified_grid(cc_lu, out_pdf,
sup_title='Change in %s' % ro_df.ix[idx, 'ES'],
title='(climate and land use change together)')
# Close H5 file
h5.close()
print '\nFinished.' | mit |
sniemi/SamPy | sandbox/src1/examples/multi_image.py | 1 | 1769 | #!/usr/bin/env python
'''
Make a set of images with a single colormap, norm, and colorbar.
It also illustrates colorbar tick labelling with a multiplier.
'''
from matplotlib.pyplot import figure, show, sci
from matplotlib import cm, colors
from matplotlib.font_manager import FontProperties
from numpy import amin, amax, ravel
from numpy.random import rand
Nr = 3
Nc = 2
fig = figure()
cmap = cm.cool
figtitle = 'Multiple images'
t = fig.text(0.5, 0.95, figtitle,
horizontalalignment='center',
fontproperties=FontProperties(size=16))
cax = fig.add_axes([0.2, 0.08, 0.6, 0.04])
w = 0.4
h = 0.22
ax = []
images = []
vmin = 1e40
vmax = -1e40
for i in range(Nr):
for j in range(Nc):
pos = [0.075 + j*1.1*w, 0.18 + i*1.2*h, w, h]
a = fig.add_axes(pos)
if i > 0:
a.set_xticklabels([])
# Make some fake data with a range that varies
# somewhat from one plot to the next.
data =((1+i+j)/10.0)*rand(10,20)*1e-6
dd = ravel(data)
# Manually find the min and max of all colors for
# use in setting the color scale.
vmin = min(vmin, amin(dd))
vmax = max(vmax, amax(dd))
images.append(a.imshow(data, cmap=cmap))
ax.append(a)
# Set the first image as the master, with all the others
# observing it for changes in cmap or norm.
norm = colors.Normalize(vmin=vmin, vmax=vmax)
for i, im in enumerate(images):
im.set_norm(norm)
if i > 0:
images[0].add_observer(im)
# The colorbar is also based on this master image.
fig.colorbar(images[0], cax, orientation='horizontal')
# We need the following only if we want to run this
# script interactively and be able to change the colormap.
sci(images[0])
show()
| bsd-2-clause |
suranap/qiime | qiime/quality_scores_plot.py | 9 | 6918 | #!/usr/bin/env python
# File created Sept 29, 2010
from __future__ import division
__author__ = "William Walters"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["William Walters", "Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.9.1-dev"
__maintainer__ = "William Walters"
__email__ = "William.A.Walters@colorado.edu"
from matplotlib import use
use('Agg', warn=False)
from skbio.parse.sequences import parse_fasta
from numpy import arange, std, average
from pylab import plot, savefig, xlabel, ylabel, text, \
hist, figure, legend, title, show, xlim, ylim, xticks, yticks,\
scatter, subplot
from matplotlib.font_manager import fontManager, FontProperties
from qiime.util import gzip_open
from qiime.parse import parse_qual_score
def bin_qual_scores(qual_scores):
""" Bins qual score according to nucleotide position
qual_scores: Dict of label: numpy array of base scores
"""
qual_bins = []
qual_lens = []
for l in qual_scores.values():
qual_lens.append(len(l))
max_seq_size = max(qual_lens)
for base_position in range(max_seq_size):
qual_bins.append([])
for scores in qual_scores.values():
# Add score if exists in base position, otherwise skip
try:
qual_bins[base_position].append(scores[base_position])
except IndexError:
continue
return qual_bins
def get_qual_stats(qual_bins, score_min):
""" Generates bins of averages, std devs, total NT from quality bins"""
ave_bins = []
std_dev_bins = []
total_bases_bins = []
found_first_poor_qual_pos = False
suggested_trunc_pos = None
for base_position in qual_bins:
total_bases_bins.append(len(base_position))
std_dev_bins.append(std(base_position))
ave_bins.append(average(base_position))
if not found_first_poor_qual_pos:
if average(base_position) < score_min:
suggested_trunc_pos = qual_bins.index(base_position)
found_first_poor_qual_pos = True
return ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos
def plot_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
score_min,
output_dir):
""" Plots, saves graph showing quality score averages, stddev.
Additionally, the total nucleotide count for each position is shown on
a second subplot
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
score_min: lowest value that a given base call can be and still be
acceptable. Used to generate a dotted line on the graph for easy assay
of the poor scoring positions.
output_dir: output directory
"""
t = arange(0, len(ave_bins), 1)
std_dev_plus = []
std_dev_minus = []
for n in range(len(ave_bins)):
std_dev_plus.append(ave_bins[n] + std_dev_bins[n])
std_dev_minus.append(ave_bins[n] - std_dev_bins[n])
figure_num = 0
f = figure(figure_num, figsize=(8, 10))
figure_title = "Quality Scores Report"
f.text(.5, .93, figure_title, horizontalalignment='center', size="large")
subplot(2, 1, 1)
plot(t, ave_bins, linewidth=2.0, color="black")
plot(t, std_dev_plus, linewidth=0.5, color="red")
dashed_line = [score_min] * len(ave_bins)
l, = plot(dashed_line, '--', color='gray')
plot(t, std_dev_minus, linewidth=0.5, color="red")
legend(
('Quality Score Average',
'Std Dev',
'Score Threshold'),
loc='lower left')
xlabel("Nucleotide Position")
ylabel("Quality Score")
subplot(2, 1, 2)
plot(t, total_bases_bins, linewidth=2.0, color="blue")
xlabel("Nucleotide Position")
ylabel("Nucleotide Counts")
outfile_name = output_dir + "/quality_scores_plot.pdf"
savefig(outfile_name)
def write_qual_report(ave_bins,
std_dev_bins,
total_bases_bins,
output_dir,
suggested_trunc_pos):
""" Writes data in bins to output text file
ave_bins: list with average quality score for each base position
std_dev_bins: list with standard deviation for each base position
total_bases_bins: list with total counts of bases for each position
output_dir: output directory
suggested_trunc_pos: Position where average quality score dropped below
the score minimum (25 by default)
"""
outfile_name = output_dir + "/quality_bins.txt"
outfile = open(outfile_name, "w")
outfile.write("# Suggested nucleotide truncation position (None if " +
"quality score average did not drop below the score minimum threshold)" +
": %s\n" % suggested_trunc_pos)
outfile.write("# Average quality score bins\n")
outfile.write(",".join(str("%2.3f" % ave) for ave in ave_bins) + "\n")
outfile.write("# Standard deviation bins\n")
outfile.write(",".join(str("%2.3f" % std) for std in std_dev_bins) + "\n")
outfile.write("# Total bases per nucleotide position bins\n")
outfile.write(",".join(str("%d" %
total_bases) for total_bases in total_bases_bins))
def generate_histogram(qual_fp,
output_dir,
score_min=25,
verbose=True,
qual_parser=parse_qual_score):
""" Main program function for generating quality score histogram
qual_fp: quality score filepath
output_dir: output directory
score_min: minimum score to be considered a reliable base call, used
to generate dotted line on histogram for easy visualization of poor
quality scores.
qual_parser : function to apply to extract quality scores
"""
if qual_fp.endswith('.gz'):
qual_lines = gzip_open(qual_fp)
else:
qual_lines = open(qual_fp, "U")
qual_scores = qual_parser(qual_lines)
# Sort bins according to base position
qual_bins = bin_qual_scores(qual_scores)
# Get average, std dev, and total nucleotide counts for each base position
ave_bins, std_dev_bins, total_bases_bins, suggested_trunc_pos =\
get_qual_stats(qual_bins, score_min)
plot_qual_report(ave_bins, std_dev_bins, total_bases_bins, score_min,
output_dir)
# Save values to output text file
write_qual_report(ave_bins, std_dev_bins, total_bases_bins, output_dir,
suggested_trunc_pos)
if verbose:
print "Suggested nucleotide truncation position (None if quality " +\
"score average did not fall below the minimum score parameter): %s\n" %\
suggested_trunc_pos
| gpl-2.0 |
jeffery-do/Vizdoombot | doom/lib/python3.5/site-packages/numpy/core/function_base.py | 23 | 6891 | from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
from .numeric import result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, TooHardError
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
Return evenly spaced numbers over a specified interval.
Returns `num` evenly spaced samples, calculated over the
interval [`start`, `stop`].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
.. versionadded:: 1.9.0
Returns
-------
samples : ndarray
There are `num` equally spaced samples in the closed interval
``[start, stop]`` or the half-open interval ``[start, stop)``
(depending on whether `endpoint` is True or False).
step : float
Only returned if `retstep` is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
logspace : Samples uniformly distributed in log space.
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([ 2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([ 2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
num = int(num)
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
start = start * 1.
stop = stop * 1.
dt = result_type(start, stop, float(num))
if dtype is None:
dtype = dt
y = _nx.arange(0, num, dtype=dt)
delta = stop - start
if num > 1:
step = delta / div
if step == 0:
# Special handling for denormal numbers, gh-5437
y /= div
y = y * delta
else:
# One might be tempted to use faster, in-place multiplication here,
# but this prevents step from overriding what class is produced,
# and thus prevents, e.g., use of Quantities; see gh-7142.
y = y * step
else:
# 0 and 1 item long sequences have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
y += start
if endpoint and num > 1:
y[-1] = stop
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
"""
Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Parameters
----------
start : float
``base ** start`` is the starting value of the sequence.
stop : float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length ``num``) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
... # doctest: +SKIP
>>> power(base, y).astype(dtype)
... # doctest: +SKIP
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([ 100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([ 4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 10
>>> x1 = np.logspace(0.1, 1, N, endpoint=True)
>>> x2 = np.logspace(0.1, 1, N, endpoint=False)
>>> y = np.zeros(N)
>>> plt.plot(x1, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2, y + 0.5, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
"""
y = linspace(start, stop, num=num, endpoint=endpoint)
if dtype is None:
return _nx.power(base, y)
return _nx.power(base, y).astype(dtype)
| mit |
yilei0620/3D_Conditional_Gan | GenSample_obj.py | 1 | 4544 | import sys
sys.path.append('..')
import os
import json
from time import time
import numpy as np
from sklearn.externals import joblib
import scipy
from scipy import io
# from matplotlib import pyplot as plt
# from sklearn.externals import joblib
import theano
import theano.tensor as T
from lib import activations
from lib import updates
from lib import inits
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, conv, dropout
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data
from lib.metrics import nnc_score, nnd_score
from load import load_shapenet_train, load_shapenet_test
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
bce = T.nnet.binary_crossentropy
parameters = {'objectNumber': 2, 'Nz' : 200, 'Channel' :(1,64,128,256,512), 'kernal':(4,4,4,4), 'batchsize': 50, 'Convlayersize':(64,32,16,8,4), 'Genlrt' : 0.001, 'Discrimlrt' : 0.00001 , 'beta' : 0.5, 'l2':2.5e-5, 'Genk' : 2 , 'niter':50, 'niter_decay' : 150}
for p in parameters:
tmp = p + " = parameters[p]"
exec(tmp)
# print conditional,type(batchsize),Channel[-1],kernal
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
## filter_shape: (output channels, input channels, filter height, filter width, filter depth)
## load the parameters
# gen_params = [gw1, gw2, gw3, gw4, gw5, gwx]
# discrim_params = [dw1, dw2, dw3, dw4, dw5, dwy]
temp = joblib.load('models%d/50_gen_params.jl'%objectNumber)
gw1 = sharedX(temp[0])
gg1 = sharedX(temp[1])
gb1 = sharedX(temp[2])
gw2 = sharedX(temp[3])
gg2 = sharedX(temp[4])
gb2 = sharedX(temp[5])
gw3 = sharedX(temp[6])
gg3 = sharedX(temp[7])
gb3 = sharedX(temp[8])
gw4 = sharedX(temp[9])
gg4 = sharedX(temp[10])
gb4 = sharedX(temp[11])
gwx = sharedX(temp[12])
gen_params = [gw1, gg1, gb1, gw2, gg2, gb2, gw3, gg3, gb3, gw4 ,gg4, gb4, gwx]
##
def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1))
Gl1 = Gl1.reshape((Gl1.shape[0],Channel[-1],Convlayersize[-1],Convlayersize[-1],Convlayersize[-1]))
input_shape = (None , None,Convlayersize[-1],Convlayersize[-1],Convlayersize[-1])
filter_shape = (Channel[-1] , Channel[-2], kernal[-1], kernal[-1], kernal[-1])
Gl2 = relu(batchnorm(conv(Gl1,w2,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g2, b = b2))
input_shape = (None , None,Convlayersize[-2],Convlayersize[-2],Convlayersize[-2])
filter_shape = (Channel[-2] , Channel[-3], kernal[-2], kernal[-2], kernal[-2])
Gl3 = relu(batchnorm(conv(Gl2,w3,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g3, b = b3))
input_shape = (None , None,Convlayersize[-3],Convlayersize[-3],Convlayersize[-3])
filter_shape = (Channel[-3] , Channel[-4], kernal[-3], kernal[-3], kernal[-3])
Gl4 = relu(batchnorm(conv(Gl3,w4,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'),g = g4, b= b4))
input_shape = (None, None, Convlayersize[-4],Convlayersize[-4],Convlayersize[-4])
filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4], kernal[-4])
GlX = sigmoid(conv(Gl4,wx,filter_shape = filter_shape, input_shape = input_shape, conv_mode = 'deconv'))
return GlX
X = T.tensor5()
Z = T.matrix()
gX = gen(Z, *gen_params)
print 'COMPILING'
t = time()
# _train_g = theano.function([X, Z, Y], cost, updates=g_updates)
# _train_d = theano.function([X, Z, Y], cost, updates=d_updates)
_gen = theano.function([Z], gX)
print '%.2f seconds to compile theano functions'%(time()-t)
# trX, trY, ntrain = load_shapenet_train()
n = 10
nbatch = 10
rng = np.random.RandomState(int(time()))
# sample_ymb = floatX(np.asarray(np.eye(3)))
z_dist = scipy.io.loadmat('Z_dist_class2.mat')
z_mean = z_dist['mean']
z_mean = np.reshape(z_mean,(Nz,1))
z_std = z_dist['std']
z_std = np.reshape(z_std,(Nz,1))
def gen_z(z_dist,nbatch):
ret = np.zeros((nbatch,Nz))
for j in xrange(Nz):
z_tmp = np_rng.normal(z_mean[j],z_std[j],nbatch)
ret[:,j] = z_tmp
# print ret
return ret
try:
os.mkdir('Gen_models%d'%objectNumber)
except:
pass
for j in xrange(n/nbatch):
sample_zmb = floatX(gen_z(z_dist,nbatch))
samples = np.asarray(_gen(sample_zmb))
for i in xrange(nbatch):
io.savemat('Gen_models%d/Gen_example_%d.mat'%(objectNumber,nbatch*j+i),{'instance':samples[i,:,:,:],'Z':sample_zmb[i,:]})
# niter = 1
# niter_decay = 1
| mit |
nmayorov/scikit-learn | examples/applications/plot_outlier_detection_housing.py | 243 | 5577 | """
====================================
Outlier detection on a real data set
====================================
This example illustrates the need for robust covariance estimation
on a real data set. It is useful both for outlier detection and for
a better understanding of the data structure.
We selected two sets of two variables from the Boston housing data set
as an illustration of what kind of analysis can be done with several
outlier detection tools. For the purpose of visualization, we are working
with two-dimensional examples, but one should be aware that things are
not so trivial in high-dimension, as it will be pointed out.
In both examples below, the main result is that the empirical covariance
estimate, as a non-robust one, is highly influenced by the heterogeneous
structure of the observations. Although the robust covariance estimate is
able to focus on the main mode of the data distribution, it sticks to the
assumption that the data should be Gaussian distributed, yielding some biased
estimation of the data structure, but yet accurate to some extent.
The One-Class SVM algorithm
First example
-------------
The first example illustrates how robust covariance estimation can help
concentrating on a relevant cluster when another one exists. Here, many
observations are confounded into one and break down the empirical covariance
estimation.
Of course, some screening tools would have pointed out the presence of two
clusters (Support Vector Machines, Gaussian Mixture Models, univariate
outlier detection, ...). But had it been a high-dimensional example, none
of these could be applied that easily.
Second example
--------------
The second example shows the ability of the Minimum Covariance Determinant
robust estimator of covariance to concentrate on the main mode of the data
distribution: the location seems to be well estimated, although the covariance
is hard to estimate due to the banana-shaped distribution. Anyway, we can
get rid of some outlying observations.
The One-Class SVM is able to capture the real data structure, but the
difficulty is to adjust its kernel bandwidth parameter so as to obtain
a good compromise between the shape of the data scatter matrix and the
risk of over-fitting the data.
"""
print(__doc__)
# Author: Virgile Fritsch <virgile.fritsch@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn.covariance import EllipticEnvelope
from sklearn.svm import OneClassSVM
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.datasets import load_boston
# Get data
X1 = load_boston()['data'][:, [8, 10]] # two clusters
X2 = load_boston()['data'][:, [5, 12]] # "banana"-shaped
# Define "classifiers" to be used
classifiers = {
"Empirical Covariance": EllipticEnvelope(support_fraction=1.,
contamination=0.261),
"Robust Covariance (Minimum Covariance Determinant)":
EllipticEnvelope(contamination=0.261),
"OCSVM": OneClassSVM(nu=0.261, gamma=0.05)}
colors = ['m', 'g', 'b']
legend1 = {}
legend2 = {}
# Learn a frontier for outlier detection with several classifiers
xx1, yy1 = np.meshgrid(np.linspace(-8, 28, 500), np.linspace(3, 40, 500))
xx2, yy2 = np.meshgrid(np.linspace(3, 10, 500), np.linspace(-5, 45, 500))
for i, (clf_name, clf) in enumerate(classifiers.items()):
plt.figure(1)
clf.fit(X1)
Z1 = clf.decision_function(np.c_[xx1.ravel(), yy1.ravel()])
Z1 = Z1.reshape(xx1.shape)
legend1[clf_name] = plt.contour(
xx1, yy1, Z1, levels=[0], linewidths=2, colors=colors[i])
plt.figure(2)
clf.fit(X2)
Z2 = clf.decision_function(np.c_[xx2.ravel(), yy2.ravel()])
Z2 = Z2.reshape(xx2.shape)
legend2[clf_name] = plt.contour(
xx2, yy2, Z2, levels=[0], linewidths=2, colors=colors[i])
legend1_values_list = list( legend1.values() )
legend1_keys_list = list( legend1.keys() )
# Plot the results (= shape of the data points cloud)
plt.figure(1) # two clusters
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X1[:, 0], X1[:, 1], color='black')
bbox_args = dict(boxstyle="round", fc="0.8")
arrow_args = dict(arrowstyle="->")
plt.annotate("several confounded points", xy=(24, 19),
xycoords="data", textcoords="data",
xytext=(13, 10), bbox=bbox_args, arrowprops=arrow_args)
plt.xlim((xx1.min(), xx1.max()))
plt.ylim((yy1.min(), yy1.max()))
plt.legend((legend1_values_list[0].collections[0],
legend1_values_list[1].collections[0],
legend1_values_list[2].collections[0]),
(legend1_keys_list[0], legend1_keys_list[1], legend1_keys_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("accessibility to radial highways")
plt.xlabel("pupil-teacher ratio by town")
legend2_values_list = list( legend2.values() )
legend2_keys_list = list( legend2.keys() )
plt.figure(2) # "banana" shape
plt.title("Outlier detection on a real data set (boston housing)")
plt.scatter(X2[:, 0], X2[:, 1], color='black')
plt.xlim((xx2.min(), xx2.max()))
plt.ylim((yy2.min(), yy2.max()))
plt.legend((legend2_values_list[0].collections[0],
legend2_values_list[1].collections[0],
legend2_values_list[2].collections[0]),
(legend2_values_list[0], legend2_values_list[1], legend2_values_list[2]),
loc="upper center",
prop=matplotlib.font_manager.FontProperties(size=12))
plt.ylabel("% lower status of the population")
plt.xlabel("average number of rooms per dwelling")
plt.show()
| bsd-3-clause |
moutai/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 56 | 3596 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
garywu/pypedream | pypedream/plot/_filt.py | 1 | 2685 | import numpy
has_matplotlib = True
try:
from matplotlib import pyplot, figure
except ImportError:
has_matplotlib = False
from dagpype._core import filters
def _make_relay_call(fn, name):
def new_fn(*args, **kwargs):
@filters
def _dagpype_internal_fn_act(target):
try:
while True:
target.send((yield))
except GeneratorExit:
fn(*args, **kwargs)
target.close()
return _dagpype_internal_fn_act
new_fn.__name__ = name
new_fn.__doc__ = """
Convenience filter utility for corresponding function in pyplot.
Example:
>>> source([1, 2, 3, 4]) | plot.xlabel('x') | plot.ylabel('y') | plot.title('xy') | (plot.plot() | plot.savefig('foo.png'))
"""
return new_fn
_try_fns = [
'annotate',
'arrow',
'autogen_docstring',
'autoscale',
'autumn',
'axes',
'axhline',
'axhspan',
'axis',
'axvline',
'axvspan',
'barbs',
'bone',
'box',
'broken_barh',
'cla',
'clabel',
'clf',
'clim',
'cm',
'cohere',
'colorbar',
'colormaps',
'colors',
'connect',
'cool',
'copper',
'csd',
'dedent',
'delaxes',
'docstring',
'draw',
'figaspect',
'figimage',
'figlegend',
'figtext',
'figure',
'fill',
'fill_between',
'fill_betweenx',
'flag',
'gca',
'gcf',
'gci',
'get',
'gray',
'grid',
'hold',
'hot',
'hsv',
'jet',
'locator_params',
'margins',
'minorticks_off',
'minorticks_on',
'normalize',
'over',
'pcolor',
'pcolormesh',
'pink',
'plotfile',
'plotting',
'polar',
'prism',
'psd',
'quiver',
'quiverkey',
'rc',
'register_cmap',
'rgrids',
'sca',
'sci',
'set_cmap',
'setp',
'silent_list',
'specgram',
'spectral',
'spring',
'spy',
'stem',
'step',
'subplot',
'subplot2grid',
'subplot_tool',
'subplots',
'subplots_adjust',
'summer',
'suptitle',
'table',
'text',
'thetagrids',
'tick_params',
'ticklabel_format',
'tight_layout',
'title',
'tricontour',
'tricontourf',
'tripcolor',
'triplot',
'twinx',
'twiny',
'winter',
'xlabel',
'xlim',
'xscale',
'xticks',
'ylabel',
'ylim',
'yscale',
'yticks']
_fns = []
if has_matplotlib:
for fn in _try_fns:
try:
exec('%s = _make_relay_call(pyplot.%s, "%s")' % (fn, fn, fn))
_fns.append(fn)
except AttributeError:
pass
| bsd-3-clause |
nliolios24/textrank | share/doc/networkx-1.9.1/examples/graph/unix_email.py | 62 | 2683 | #!/usr/bin/env python
"""
Create a directed graph, allowing multiple edges and self loops, from
a unix mailbox. The nodes are email addresses with links
that point from the sender to the recievers. The edge data
is a Python email.Message object which contains all of
the email message data.
This example shows the power of XDiGraph to hold edge data
of arbitrary Python objects (in this case a list of email messages).
By default, load the sample unix email mailbox called "unix_email.mbox".
You can load your own mailbox by naming it on the command line, eg
python unixemail.py /var/spool/mail/username
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2005 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import email
from email.utils import getaddresses,parseaddr
import mailbox
import sys
# unix mailbox recipe
# see http://www.python.org/doc/current/lib/module-mailbox.html
def msgfactory(fp):
try:
return email.message_from_file(fp)
except email.Errors.MessageParseError:
# Don't return None since that will stop the mailbox iterator
return ''
if __name__ == '__main__':
import networkx as nx
try:
import matplotlib.pyplot as plt
except:
pass
if len(sys.argv)==1:
filePath = "unix_email.mbox"
else:
filePath = sys.argv[1]
mbox = mailbox.mbox(filePath, msgfactory) # parse unix mailbox
G=nx.MultiDiGraph() # create empty graph
# parse each messages and build graph
for msg in mbox: # msg is python email.Message.Message object
(source_name,source_addr) = parseaddr(msg['From']) # sender
# get all recipients
# see http://www.python.org/doc/current/lib/module-email.Utils.html
tos = msg.get_all('to', [])
ccs = msg.get_all('cc', [])
resent_tos = msg.get_all('resent-to', [])
resent_ccs = msg.get_all('resent-cc', [])
all_recipients = getaddresses(tos + ccs + resent_tos + resent_ccs)
# now add the edges for this mail message
for (target_name,target_addr) in all_recipients:
G.add_edge(source_addr,target_addr,message=msg)
# print edges with message subject
for (u,v,d) in G.edges_iter(data=True):
print("From: %s To: %s Subject: %s"%(u,v,d['message']["Subject"]))
try: # draw
pos=nx.spring_layout(G,iterations=10)
nx.draw(G,pos,node_size=0,alpha=0.4,edge_color='r',font_size=16)
plt.savefig("unix_email.png")
plt.show()
except: # matplotlib not available
pass
| mit |
prabhjyotsingh/incubator-zeppelin | flink/interpreter/src/main/resources/python/zeppelin_pyflink.py | 10 | 2806 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyflink.common import *
from pyflink.dataset import *
from pyflink.datastream import *
from pyflink.table import *
from pyflink.table.catalog import *
from pyflink.table.descriptors import *
from pyflink.table.window import *
from pyflink.table.udf import *
import pyflink
from py4j.java_gateway import java_import
intp = gateway.entry_point
pyflink.java_gateway._gateway = gateway
pyflink.java_gateway.import_flink_view(gateway)
pyflink.java_gateway.install_exception_handler()
b_env = pyflink.dataset.ExecutionEnvironment(intp.getJavaExecutionEnvironment())
s_env = StreamExecutionEnvironment(intp.getJavaStreamExecutionEnvironment())
if intp.isFlink110():
bt_env = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("blink"), True)
bt_env_2 = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("flink"), False)
st_env = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("blink"), True)
st_env_2 = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("flink"), False)
else:
bt_env = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("blink"))
bt_env_2 = BatchTableEnvironment(intp.getJavaBatchTableEnvironment("flink"))
st_env = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("blink"))
st_env_2 = StreamTableEnvironment(intp.getJavaStreamTableEnvironment("flink"))
from zeppelin_context import PyZeppelinContext
#TODO(zjffdu) merge it with IPyFlinkZeppelinContext
class PyFlinkZeppelinContext(PyZeppelinContext):
def __init__(self, z, gateway):
super(PyFlinkZeppelinContext, self).__init__(z, gateway)
def show(self, obj, **kwargs):
from pyflink.table import Table
if isinstance(obj, Table):
if 'stream_type' in kwargs:
self.z.show(obj._j_table, kwargs['stream_type'], kwargs)
else:
print(self.z.showData(obj._j_table))
else:
super(PyFlinkZeppelinContext, self).show(obj, **kwargs)
z = __zeppelin__ = PyFlinkZeppelinContext(intp.getZeppelinContext(), gateway)
__zeppelin__._setup_matplotlib()
| apache-2.0 |
prasunroypr/digit-recognizer | source/defs.py | 1 | 6607 | ################################################################################
"""
Functions for Digit Recognition
Created on Wed Jun 01 00:00:00 2016
@author: Prasun Roy
@e-mail: prasunroy.pr@gmail.com
"""
################################################################################
# import modules
import matplotlib.pyplot as pplt
import numpy as np
import os
import pandas as pd
import skimage.feature as skim
import sklearn.preprocessing as pp
import time
from conf import _config
from conf import _configinfo
################################################################################
def _fscale(data, split=False, load=False, verbose=False):
# initialize scaler
scaler = pp.MinMaxScaler()
# initialize variables
config = _configinfo()
sdpath = config['root_data_path'] + 'scaled.npy'
# scale data
if verbose: print('scaling features............... ', end = '')
data = np.array(data, dtype='float64')
if load and os.path.isfile(sdpath):
m = np.load(sdpath)[0]
r = np.load(sdpath)[1]
r[r==0] = 1
data = (data - m) / r
elif split:
train = data[:config['train_d']]
valid = data[config['train_d']:]
scaler.fit(train)
m = scaler.data_min_
r = scaler.data_range_
train = scaler.transform(train)
valid = scaler.transform(valid)
data = np.vstack((train, valid))
else:
data = scaler.fit_transform(data)
m = scaler.data_min_
r = scaler.data_range_
if verbose: print('done')
# save scaled config
if not load: np.save(sdpath, np.vstack((m, r)))
# return scaled data
return data
################################################################################
def _haar(data, load=True, save=False, verbose=False):
return data
################################################################################
def _hogs(data, load=True, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
datapath = config['hogs_data_path']
data_hog = []
# load hog data if exists
if load and os.path.isfile(datapath):
if verbose: print('loading descriptors............ ', end = '')
data_hog = np.load(datapath)
if verbose: print('done')
# calculate hog data otherwise
else:
# initialize variables
ix = config['shape_x']
iy = config['shape_y']
bn = config['bins_n']
cx = config['cell_x']
cy = config['cell_y']
bw = config['blok_w']
bh = config['blok_h']
# perform hog
t_beg = time.time()
size = data.shape[0]
loop = 0
for image in data:
if verbose: print('\rextracting descriptors......... %d%%'
%(loop*100//size), end = '')
desc = skim.hog(image.reshape(ix, iy), orientations=bn,
pixels_per_cell=(cx, cy), cells_per_block=(bw, bh))
data_hog.append(desc)
loop = loop + 1
data_hog = np.array(data_hog, dtype='float64')
t_end = time.time()
if verbose: print('\rextracting descriptors......... done @ %8.2f sec'
%(t_end - t_beg))
# save data
if save:
if verbose: print('saving descriptors............. ', end = '')
np.save(datapath, data_hog)
if verbose: print('done')
# return hog
return data_hog
################################################################################
def _sift(data, load=True, save=False, verbose=False):
return data
################################################################################
def _surf(data, load=True, save=False, verbose=False):
return data
################################################################################
def _plot(classifier, train, valid, step=None, save=False, verbose=False):
# initialize config
config = _config()
# initialize variables
if step is None: step = config['steps_d']
plot_figs_head = config['classifier'] + '-' + config['preprocess']
plot_data_path = config['plot_data_path']
plot_figs_path = config['plot_figs_path']
m_train = train.shape[0]
m_valid = valid.shape[0]
X_valid = valid[:, 1:]
y_valid = valid[:, 0]
error_train = []
error_valid = []
sizes_train = []
# calculate data for plot
for i in range(0, m_train, step):
if verbose: print('\rgenerating plot................ %d%%'
%(i*100//m_train), end = '')
# randomly shuffle training data
np.random.shuffle(train)
# select subset of randomized training data
X_train = train[:i+step, 1:]
y_train = train[:i+step, 0]
# train classifier with selected data
classifier.fit(X_train, y_train)
# cross-validate classifier
p_train = classifier.predict(X_train)
p_valid = classifier.predict(X_valid)
# estimate errors
error_train.append(sum(y_train != p_train) / len(y_train))
error_valid.append(sum(y_valid != p_valid) / m_valid)
sizes_train.append(i+step)
error_train = np.array(error_train, dtype='float64')
error_valid = np.array(error_valid, dtype='float64')
sizes_train = np.array(sizes_train, dtype='uint32')
if verbose: print('\rgenerating plot................ done')
# plot data
pplt.plot(sizes_train, error_train, 'rs-', label='training error')
pplt.plot(sizes_train, error_valid, 'gs-', label='cross-validation error')
pplt.title(plot_figs_head.upper()+' Learning Curve')
pplt.xlabel('number of training instances')
pplt.ylabel('classification error')
pplt.legend()
xmin,xmax = pplt.xlim()
ymin,ymax = pplt.ylim()
pplt.axis([xmin, xmax+step, ymin, ymax+0.01])
pplt.grid(True)
# save data
if save:
if verbose: print('saving plot.................... ', end = '')
data = pd.DataFrame({'x1_TrainSizes':sizes_train,
'y1_TrainError':error_train,
'y2_ValidError':error_valid})
data.to_csv(plot_data_path, index=False)
pplt.savefig(plot_figs_path)
if verbose: print('done')
# display plot
pplt.show()
################################################################################
| gpl-3.0 |
endolith/scikit-image | skimage/feature/tests/test_util.py | 35 | 2818 | import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
from numpy.testing import assert_equal, assert_raises
from skimage.feature.util import (FeatureDetector, DescriptorExtractor,
_prepare_grayscale_input_2D,
_mask_border_keypoints, plot_matches)
def test_feature_detector():
assert_raises(NotImplementedError, FeatureDetector().detect, None)
def test_descriptor_extractor():
assert_raises(NotImplementedError, DescriptorExtractor().extract,
None, None)
def test_prepare_grayscale_input_2D():
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 3, 3)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1)))
assert_raises(ValueError, _prepare_grayscale_input_2D, np.zeros((3, 1, 1)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3)))
img = _prepare_grayscale_input_2D(np.zeros((3, 3, 1)))
img = _prepare_grayscale_input_2D(np.zeros((1, 3, 3)))
def test_mask_border_keypoints():
keypoints = np.array([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4]])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 0),
[1, 1, 1, 1, 1])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 2),
[0, 0, 1, 1, 1])
assert_equal(_mask_border_keypoints((4, 4), keypoints, 2),
[0, 0, 1, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 5),
[0, 0, 0, 0, 0])
assert_equal(_mask_border_keypoints((10, 10), keypoints, 4),
[0, 0, 0, 0, 1])
@np.testing.decorators.skipif(plt is None)
def test_plot_matches():
fig, ax = plt.subplots(nrows=1, ncols=1)
shapes = (((10, 10), (10, 10)),
((10, 10), (12, 10)),
((10, 10), (10, 12)),
((10, 10), (12, 12)),
((12, 10), (10, 10)),
((10, 12), (10, 10)),
((12, 12), (10, 10)))
keypoints1 = 10 * np.random.rand(10, 2)
keypoints2 = 10 * np.random.rand(10, 2)
idxs1 = np.random.randint(10, size=10)
idxs2 = np.random.randint(10, size=10)
matches = np.column_stack((idxs1, idxs2))
for shape1, shape2 in shapes:
img1 = np.zeros(shape1)
img2 = np.zeros(shape2)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
only_matches=True)
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
keypoints_color='r')
plot_matches(ax, img1, img2, keypoints1, keypoints2, matches,
matches_color='r')
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| bsd-3-clause |
alexsavio/scikit-learn | examples/model_selection/plot_roc_crossval.py | 21 | 3477 | """
=============================================================
Receiver Operating Characteristic (ROC) with cross validation
=============================================================
Example of Receiver Operating Characteristic (ROC) metric to evaluate
classifier output quality using cross-validation.
ROC curves typically feature true positive rate on the Y axis, and false
positive rate on the X axis. This means that the top left corner of the plot is
the "ideal" point - a false positive rate of zero, and a true positive rate of
one. This is not very realistic, but it does mean that a larger area under the
curve (AUC) is usually better.
The "steepness" of ROC curves is also important, since it is ideal to maximize
the true positive rate while minimizing the false positive rate.
This example shows the ROC response of different datasets, created from K-fold
cross-validation. Taking all of these curves, it is possible to calculate the
mean area under curve, and see the variance of the curve when the
training set is split into different subsets. This roughly shows how the
classifier output is affected by changes in the training data, and how
different the splits generated by K-fold cross-validation are from one another.
.. note::
See also :func:`sklearn.metrics.auc_score`,
:func:`sklearn.model_selection.cross_val_score`,
:ref:`sphx_glr_auto_examples_model_selection_plot_roc.py`,
"""
print(__doc__)
import numpy as np
from scipy import interp
import matplotlib.pyplot as plt
from itertools import cycle
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import StratifiedKFold
###############################################################################
# Data IO and generation
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
X, y = X[y != 2], y[y != 2]
n_samples, n_features = X.shape
# Add noisy features
random_state = np.random.RandomState(0)
X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
###############################################################################
# Classification and ROC analysis
# Run classifier with cross-validation and plot ROC curves
cv = StratifiedKFold(n_splits=6)
classifier = svm.SVC(kernel='linear', probability=True,
random_state=random_state)
mean_tpr = 0.0
mean_fpr = np.linspace(0, 1, 100)
colors = cycle(['cyan', 'indigo', 'seagreen', 'yellow', 'blue', 'darkorange'])
lw = 2
i = 0
for (train, test), color in zip(cv.split(X, y), colors):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
mean_tpr += interp(mean_fpr, fpr, tpr)
mean_tpr[0] = 0.0
roc_auc = auc(fpr, tpr)
plt.plot(fpr, tpr, lw=lw, color=color,
label='ROC fold %d (area = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=lw, color='k',
label='Luck')
mean_tpr /= cv.get_n_splits(X, y)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
plt.plot(mean_fpr, mean_tpr, color='g', linestyle='--',
label='Mean ROC (area = %0.2f)' % mean_auc, lw=lw)
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
| bsd-3-clause |
chaowu2009/stereo-vo | tools/capture_TwoCameras_saveImagesOnly.py | 1 | 2289 | import numpy as np
import cv2
import time
import matplotlib.pylab as plt
"""
Make sure that you hold the checkerboard horizontally (more checkers horizontally than vertically).
In order to get a good calibration you will need to move the checkerboard around in the camera frame such that:
the checkerboard is detected at the left and right edges of the field of view (X calibration)
the checkerboard is detected at the top and bottom edges of the field of view (Y calibration)
the checkerboard is detected at various angles to the camera ("Skew")
the checkerboard fills the entire field of view (Size calibration)
checkerboard tilted to the left, right, top and bottom (X,Y, and Size calibration)
"""
left = 1
right = 2
time_in_ms= 1000/100
#folder = "/home/cwu/Downloads/";
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
folder = "/home/hillcrest/project/stereo-calibration/calib_imgs/ARC/"
#folder = "D:/vision/stereo-calibration/calib_imgs/ARC/"
fp = open(folder + "timeStamp.txt","w")
WIDTH = 1280
HEIGHT = 720
WIDTH = 640
HEIGHT = 480
for counter in range(1,31):
millis = int(round(time.time() * 1000))
cap1 = cv2.VideoCapture(left)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame1 = cap1.read()
cap1.release()
cap2 = cv2.VideoCapture(right)
cap2.set(cv2.CAP_PROP_FRAME_WIDTH,WIDTH)
cap2.set(cv2.CAP_PROP_FRAME_HEIGHT,HEIGHT)
cv2.waitKey(100)
ret, frame2 = cap2.read()
cap2.release()
#frame1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
#frame2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
plt.subplot(121)
plt.imshow(frame1)
plt.title('left')
plt.subplot(122)
plt.imshow(frame2)
plt.title('right')
plt.show()
print('another capture', counter)
cv2.waitKey(100)
cv2.imwrite(folder + "img_left/left_" + str(counter) + ".jpg", frame1)
cv2.waitKey(time_in_ms)
cv2.imwrite(folder + "img_right/right_" + str(counter) + ".jpg", frame2)
fp.write(str(counter)+ ","+ str(millis) + "\n")
print("the ", counter, " pairs")
cv2.destroyAllWindows()
fp.close()
print('All Done \n')
| mit |
MartialD/hyperspy | hyperspy/drawing/tiles.py | 4 | 2899 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from hyperspy.drawing.figure import BlittedFigure
from hyperspy.drawing import utils
class HistogramTilePlot(BlittedFigure):
def __init__(self):
self.figure = None
self.title = ''
self.ax = None
def create_axis(self, ncols=1, nrows=1, number=1, title=''):
ax = self.figure.add_subplot(ncols, nrows, number)
ax.set_title(title)
ax.hspy_fig = self
return ax
def plot(self, db, **kwargs):
if self.figure is None:
self.create_figure()
ncomps = len(db)
if not ncomps:
return
else:
self.update(db, **kwargs)
def update(self, db, **kwargs):
ncomps = len(db)
# get / set axes
i = -1
for c_n, v in db.items():
i += 1
ncols = len(v)
istart = ncols * i
j = 0
for p_n, (hist, bin_edges) in v.items():
j += 1
mask = hist > 0
if np.any(mask):
title = c_n + ' ' + p_n
ax = self.create_axis(ncomps, ncols, istart + j, title)
self.ax = ax
# remove previous
while ax.patches:
ax.patches[0].remove()
# set new; only draw non-zero height bars
ax.bar(
bin_edges[
:-1][mask],
hist[mask],
np.diff(bin_edges)[mask],
# animated=True,
**kwargs)
width = bin_edges[-1] - bin_edges[0]
ax.set_xlim(
bin_edges[0] - width * 0.1, bin_edges[-1] + width * 0.1)
ax.set_ylim(0, np.max(hist) * 1.1)
# ax.set_title(c_n + ' ' + p_n)
self.figure.canvas.draw_idle()
def close(self):
try:
plt.close(self.figure)
except BaseException:
pass
self.figure = None
| gpl-3.0 |
huytd/dejavu | dejavu/fingerprint.py | 1 | 6020 | import numpy as np
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import (generate_binary_structure,
iterate_structure, binary_erosion)
import hashlib
from operator import itemgetter
IDX_FREQ_I = 0
IDX_TIME_J = 1
######################################################################
# Sampling rate, related to the Nyquist conditions, which affects
# the range frequencies we can detect.
DEFAULT_FS = 44100
######################################################################
# Size of the FFT window, affects frequency granularity
DEFAULT_WINDOW_SIZE = 4096
######################################################################
# Ratio by which each sequential window overlaps the last and the
# next window. Higher overlap will allow a higher granularity of offset
# matching, but potentially more fingerprints.
DEFAULT_OVERLAP_RATIO = 0.5
######################################################################
# Degree to which a fingerprint can be paired with its neighbors --
# higher will cause more fingerprints, but potentially better accuracy.
DEFAULT_FAN_VALUE = 15
######################################################################
# Minimum amplitude in spectrogram in order to be considered a peak.
# This can be raised to reduce number of fingerprints, but can negatively
# affect accuracy.
DEFAULT_AMP_MIN = 10
######################################################################
# Number of cells around an amplitude peak in the spectrogram in order
# for Dejavu to consider it a spectral peak. Higher values mean less
# fingerprints and faster matching, but can potentially affect accuracy.
PEAK_NEIGHBORHOOD_SIZE = 20
######################################################################
# Thresholds on how close or far fingerprints can be in time in order
# to be paired as a fingerprint. If your max is too low, higher values of
# DEFAULT_FAN_VALUE may not perform as expected.
MIN_HASH_TIME_DELTA = 0
MAX_HASH_TIME_DELTA = 200
######################################################################
# If True, will sort peaks temporally for fingerprinting;
# not sorting will cut down number of fingerprints, but potentially
# affect performance.
PEAK_SORT = True
######################################################################
# Number of bits to throw away from the front of the SHA1 hash in the
# fingerprint calculation. The more you throw away, the less storage, but
# potentially higher collisions and misclassifications when identifying songs.
FINGERPRINT_REDUCTION = 20
def fingerprint(channel_samples, Fs=DEFAULT_FS,
wsize=DEFAULT_WINDOW_SIZE,
wratio=DEFAULT_OVERLAP_RATIO,
fan_value=DEFAULT_FAN_VALUE,
amp_min=DEFAULT_AMP_MIN):
"""
FFT the channel, log transform output, find local maxima, then return
locally sensitive hashes.
"""
# FFT the signal and extract frequency components
arr2D = mlab.specgram(
channel_samples,
NFFT=wsize,
Fs=Fs,
window=mlab.window_hanning,
noverlap=int(wsize * wratio))[0]
# apply log transform since specgram() returns linear array
arr2D = 10 * np.log10(arr2D)
arr2D[arr2D == -np.inf] = 0 # replace infs with zeros
# find local maxima
local_maxima = get_2D_peaks(arr2D, plot=False, amp_min=amp_min)
# return hashes
return generate_hashes(local_maxima, fan_value=fan_value)
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
# http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
struct = generate_binary_structure(2, 1)
neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)
# find local maxima using our fliter shape
local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
background = (arr2D == 0)
eroded_background = binary_erosion(background, structure=neighborhood,
border_value=1)
# Boolean mask of arr2D with True at peaks
detected_peaks = local_max - eroded_background
# extract peaks
amps = arr2D[detected_peaks]
j, i = np.where(detected_peaks)
# filter peaks
amps = amps.flatten()
peaks = zip(i, j, amps)
peaks_filtered = [x for x in peaks if x[2] > amp_min] # freq, time, amp
# get indices for frequency and time
frequency_idx = [x[1] for x in peaks_filtered]
time_idx = [x[0] for x in peaks_filtered]
if plot:
# scatter of the peaks
fig, ax = plt.subplots()
ax.imshow(arr2D)
ax.scatter(time_idx, frequency_idx)
ax.set_xlabel('Time')
ax.set_ylabel('Frequency')
ax.set_title("Spectrogram")
plt.gca().invert_yaxis()
plt.show()
return zip(frequency_idx, time_idx)
def generate_hashes(peaks, fan_value=DEFAULT_FAN_VALUE):
"""
Hash list structure:
sha1_hash[0:20] time_offset
[(e05b341a9b77a51fd26, 32), ... ]
"""
fingerprinted = set() # to avoid rehashing same pairs
if PEAK_SORT:
peaks.sort(key=itemgetter(1))
for i in range(len(peaks)):
for j in range(1, fan_value):
if (i + j) < len(peaks) and not (i, i + j) in fingerprinted:
freq1 = peaks[i][IDX_FREQ_I]
freq2 = peaks[i + j][IDX_FREQ_I]
t1 = peaks[i][IDX_TIME_J]
t2 = peaks[i + j][IDX_TIME_J]
t_delta = t2 - t1
if t_delta >= MIN_HASH_TIME_DELTA and t_delta <= MAX_HASH_TIME_DELTA:
h = hashlib.sha1(
"%s|%s|%s" % (str(freq1), str(freq2), str(t_delta)))
yield (h.hexdigest()[0:FINGERPRINT_REDUCTION], t1)
# ensure we don't repeat hashing
fingerprinted.add((i, i + j))
| mit |
pypot/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 204 | 4452 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', beta=5.0,
tol=5e-3, sparseness='components'),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
hahnicity/ace | chapter1/problem3.py | 1 | 1222 | """
Problem 3.
calculate the time series
yt = 5 + .05 * t + Et (Where E is epsilon)
for years 1960, 1961, ..., 2001 assuming Et independently and
identically distributed with mean 0 and sigma 0.2.
"""
from random import uniform
from matplotlib.pyplot import plot, show
from numpy import array, polyfit, poly1d
def create_distribution(size):
"""
Create a distribution, identically distributed, with mean 0 and
sigma 0.2
"""
# Shit it's way easier to just do some uniform distribution
# This is a bit over my head, and not possible for me without
# pen and paper
return array([uniform(-0.2, .2) for _ in xrange(size)])
def create_time_series(start_year, end_year):
"""
Create the time series, yt, then perform a regress on yt, plot yt and the
its trendline
"""
t_array = array(range(start_year, end_year + 1))
epsilon_t = create_distribution(len(t_array))
yt = array([5 + .05 * t_i + epsilon_t[i] for i, t_i in enumerate(t_array)])
fit = polyfit(t_array, yt, 1)
fit_func = poly1d(fit)
plot(t_array, yt, "yo", t_array, fit_func(t_array), "--k")
show()
def main():
create_time_series(1960, 2001)
if __name__ == "__main__":
main()
| unlicense |
gnu-sandhi/sandhi | modules/gr36/gnuradio-core/src/examples/pfb/interpolate.py | 17 | 8253 | #!/usr/bin/env python
#
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blks2
import sys, time
try:
import scipy
from scipy import fftpack
except ImportError:
print "Error: Program requires scipy (see: www.scipy.org)."
sys.exit(1)
try:
import pylab
from pylab import mlab
except ImportError:
print "Error: Program requires matplotlib (see: matplotlib.sourceforge.net)."
sys.exit(1)
class pfb_top_block(gr.top_block):
def __init__(self):
gr.top_block.__init__(self)
self._N = 100000 # number of samples to use
self._fs = 2000 # initial sampling rate
self._interp = 5 # Interpolation rate for PFB interpolator
self._ainterp = 5.5 # Resampling rate for the PFB arbitrary resampler
# Frequencies of the signals we construct
freq1 = 100
freq2 = 200
# Create a set of taps for the PFB interpolator
# This is based on the post-interpolation sample rate
self._taps = gr.firdes.low_pass_2(self._interp, self._interp*self._fs, freq2+50, 50,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Create a set of taps for the PFB arbitrary resampler
# The filter size is the number of filters in the filterbank; 32 will give very low side-lobes,
# and larger numbers will reduce these even farther
# The taps in this filter are based on a sampling rate of the filter size since it acts
# internally as an interpolator.
flt_size = 32
self._taps2 = gr.firdes.low_pass_2(flt_size, flt_size*self._fs, freq2+50, 150,
attenuation_dB=120, window=gr.firdes.WIN_BLACKMAN_hARRIS)
# Calculate the number of taps per channel for our own information
tpc = scipy.ceil(float(len(self._taps)) / float(self._interp))
print "Number of taps: ", len(self._taps)
print "Number of filters: ", self._interp
print "Taps per channel: ", tpc
# Create a couple of signals at different frequencies
self.signal1 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq1, 0.5)
self.signal2 = gr.sig_source_c(self._fs, gr.GR_SIN_WAVE, freq2, 0.5)
self.signal = gr.add_cc()
self.head = gr.head(gr.sizeof_gr_complex, self._N)
# Construct the PFB interpolator filter
self.pfb = blks2.pfb_interpolator_ccf(self._interp, self._taps)
# Construct the PFB arbitrary resampler filter
self.pfb_ar = blks2.pfb_arb_resampler_ccf(self._ainterp, self._taps2, flt_size)
self.snk_i = gr.vector_sink_c()
#self.pfb_ar.pfb.print_taps()
#self.pfb.pfb.print_taps()
# Connect the blocks
self.connect(self.signal1, self.head, (self.signal,0))
self.connect(self.signal2, (self.signal,1))
self.connect(self.signal, self.pfb)
self.connect(self.signal, self.pfb_ar)
self.connect(self.signal, self.snk_i)
# Create the sink for the interpolated signals
self.snk1 = gr.vector_sink_c()
self.snk2 = gr.vector_sink_c()
self.connect(self.pfb, self.snk1)
self.connect(self.pfb_ar, self.snk2)
def main():
tb = pfb_top_block()
tstart = time.time()
tb.run()
tend = time.time()
print "Run time: %f" % (tend - tstart)
if 1:
fig1 = pylab.figure(1, figsize=(12,10), facecolor="w")
fig2 = pylab.figure(2, figsize=(12,10), facecolor="w")
fig3 = pylab.figure(3, figsize=(12,10), facecolor="w")
Ns = 10000
Ne = 10000
fftlen = 8192
winfunc = scipy.blackman
# Plot input signal
fs = tb._fs
d = tb.snk_i.data()[Ns:Ns+Ne]
sp1_f = fig1.add_subplot(2, 1, 1)
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_in = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_in = scipy.arange(-fs/2.0, fs/2.0, fs/float(X_in.size))
p1_f = sp1_f.plot(f_in, X_in, "b")
sp1_f.set_xlim([min(f_in), max(f_in)+1])
sp1_f.set_ylim([-200.0, 50.0])
sp1_f.set_title("Input Signal", weight="bold")
sp1_f.set_xlabel("Frequency (Hz)")
sp1_f.set_ylabel("Power (dBW)")
Ts = 1.0/fs
Tmax = len(d)*Ts
t_in = scipy.arange(0, Tmax, Ts)
x_in = scipy.array(d)
sp1_t = fig1.add_subplot(2, 1, 2)
p1_t = sp1_t.plot(t_in, x_in.real, "b-o")
#p1_t = sp1_t.plot(t_in, x_in.imag, "r-o")
sp1_t.set_ylim([-2.5, 2.5])
sp1_t.set_title("Input Signal", weight="bold")
sp1_t.set_xlabel("Time (s)")
sp1_t.set_ylabel("Amplitude")
# Plot output of PFB interpolator
fs_int = tb._fs*tb._interp
sp2_f = fig2.add_subplot(2, 1, 1)
d = tb.snk1.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_int/2.0, fs_int/2.0, fs_int/float(X_o.size))
p2_f = sp2_f.plot(f_o, X_o, "b")
sp2_f.set_xlim([min(f_o), max(f_o)+1])
sp2_f.set_ylim([-200.0, 50.0])
sp2_f.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_f.set_xlabel("Frequency (Hz)")
sp2_f.set_ylabel("Power (dBW)")
Ts_int = 1.0/fs_int
Tmax = len(d)*Ts_int
t_o = scipy.arange(0, Tmax, Ts_int)
x_o1 = scipy.array(d)
sp2_t = fig2.add_subplot(2, 1, 2)
p2_t = sp2_t.plot(t_o, x_o1.real, "b-o")
#p2_t = sp2_t.plot(t_o, x_o.imag, "r-o")
sp2_t.set_ylim([-2.5, 2.5])
sp2_t.set_title("Output Signal from PFB Interpolator", weight="bold")
sp2_t.set_xlabel("Time (s)")
sp2_t.set_ylabel("Amplitude")
# Plot output of PFB arbitrary resampler
fs_aint = tb._fs * tb._ainterp
sp3_f = fig3.add_subplot(2, 1, 1)
d = tb.snk2.data()[Ns:Ns+(tb._interp*Ne)]
X,freq = mlab.psd(d, NFFT=fftlen, noverlap=fftlen/4, Fs=fs,
window = lambda d: d*winfunc(fftlen),
scale_by_freq=True)
X_o = 10.0*scipy.log10(abs(fftpack.fftshift(X)))
f_o = scipy.arange(-fs_aint/2.0, fs_aint/2.0, fs_aint/float(X_o.size))
p3_f = sp3_f.plot(f_o, X_o, "b")
sp3_f.set_xlim([min(f_o), max(f_o)+1])
sp3_f.set_ylim([-200.0, 50.0])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Frequency (Hz)")
sp3_f.set_ylabel("Power (dBW)")
Ts_aint = 1.0/fs_aint
Tmax = len(d)*Ts_aint
t_o = scipy.arange(0, Tmax, Ts_aint)
x_o2 = scipy.array(d)
sp3_f = fig3.add_subplot(2, 1, 2)
p3_f = sp3_f.plot(t_o, x_o2.real, "b-o")
p3_f = sp3_f.plot(t_o, x_o1.real, "m-o")
#p3_f = sp3_f.plot(t_o, x_o2.imag, "r-o")
sp3_f.set_ylim([-2.5, 2.5])
sp3_f.set_title("Output Signal from PFB Arbitrary Resampler", weight="bold")
sp3_f.set_xlabel("Time (s)")
sp3_f.set_ylabel("Amplitude")
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
tbtraltaa/medianshape | medianshape/simplicial/surfgen.py | 1 | 10038 | # encoding: utf-8
'''
2D surface embedded in 3D
-------------------------
'''
from __future__ import absolute_import
import importlib
import os
import numpy as np
from medianshape.simplicial import pointgen3d, mesh, utils
from medianshape.simplicial.meshgen import meshgen2d
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from medianshape.viz import plot2d, plot3d
from distmesh.plotting import axes_simpplot3d
from medianshape.simplicial.utils import boundary_points
def func(x, y, sign=1):
'''
:math:`\sin\pi x \cos \pi y`.
'''
return np.sin(np.pi*x)*np.cos(np.pi*y)
def sample_surf(scale, step=0.2):
'''
Returns a tuple X, Y, Z of a surface for an experiment.
'''
x = y = np.arange(-4.0, 4.0, step)
X, Y = np.meshgrid(x, y)
from matplotlib.mlab import bivariate_normal
'''
Z1 = bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
#Z3 = bivariate_normal(X, Y, 1, 1, -2, -2)
Z = Z2 - Z1
'''
# Ups
ZU1 = bivariate_normal(X,Y, 1.5, 1, 0,-2)
ZU2 = bivariate_normal(X, Y, 1.5, 1.5, 4, 1)
ZU3 = bivariate_normal(X, Y, 1, 1, -4, 1)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 4, 0.5, 0, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1.5, 1, 0, 1)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -4, -2)
ZD3 = bivariate_normal(X, Y, 1, 1, 4, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, 4)
Z1 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax1 = np.abs(np.amax(Z1))
Z1 = Z1/Zmax1 * scale[2]
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z1, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
# Ups
ZU1 = bivariate_normal(X,Y, 2, 1, 1,1)
ZU2 = bivariate_normal(X, Y, 3, 1, -2, 4)
ZU3 = bivariate_normal(X, Y, 1.5, 1.5, -2, -2)
#ZU4 = bivariate_normal(X, Y, 1.5, 1.5, -4, -4)
#ZU5 = bivariate_normal(X, Y, 1, 1, 4, -4)
ZU4 = bivariate_normal(X, Y, 2, 2, 3, -4)
# Downs
ZD1 = bivariate_normal(X, Y, 1, 2, 4, 2)
ZD2 = bivariate_normal(X, Y, 1.5, 1.5, -2, 2)
ZD3 = bivariate_normal(X, Y, 1.5, 1.5, 1, -2)
ZD4 = bivariate_normal(X, Y, 4, 1, 0, -4)
Z2 = ZU1 + ZU2 + ZU3 - ZD1 - ZD2 - ZD3 - ZD4
Zmax2 = np.abs(np.amax(Z2))
Z2 = Z2/Zmax2 * scale[2]
X = X * scale[0]/4.0
Y = Y * scale[1]/4.0
# Visualization
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.plot_surface(X, Y, Z2, rstride=1, cstride=1, cmap=cm.winter,
linewidth=0, antialiased=False)
plt.show()
return X, Y, Z1, Z2
def interpolate_surf(points, values, ipoints, method = "nearest"):
from scipy.interpolate import griddata
'''
Used to interpolate a sample surface to a surface in a mesh.
'''
return griddata(points, values, ipoints, method= method)
def surfgen_shared_boundary(bbox=[-10,-10,-10, 10,10,10], l=3):
'''
Generates two surfaces in 3D with shared boundary for an experiment.
Writes the two surface as .poly file for tetgen.
'''
# Generating point grids for two surfaces
xmin = bbox[0]
xmax = bbox[3]
ymin = bbox[1]
ymax = bbox[4]
zmin = bbox[2]
zmax = bbox[5]
Xmin, Ymin, Zmin, Xmax, Ymax, Zmax = np.array(bbox)*0.8
X, Y, Z1, Z2 = sample_surf([Xmax, Ymax, zmax*0.3], step=0.8)
Z1 = Z1 + zmax*0.4
Z2 = Z2 - zmax*0.4
#Symmertic surfs
#Z2 = -Z1 - zmax*0.4
'''
# Plotting the two surfaces
fig = plt.figure()
ax = fig.gca(projection="3d")
surf = ax.scatter(X, Y, Z1.reshape(-1,1), color='b')
surf = ax.scatter(X, Y, Z2.reshape(-1,1), color='r')
plt.show()
'''
mesh = meshgen2d([Xmin, Ymin, Xmax, Ymax], l, include_corners=True)
sample_points = np.hstack((X.reshape(-1,1), Y.reshape(-1,1)))
# Interpolating the surface mesh into two different surfaces
# similar to the the sample surfaces generated before
Z1 = interpolate_surf(sample_points, Z1.reshape(-1,1), mesh.points)
Z2 = interpolate_surf(sample_points, Z2.reshape(-1,1), mesh.points)
# Integrating two surfaces
points1 = np.hstack((mesh.points, Z1))
print points1.shape
points2 = np.hstack((mesh.points, Z2))
print points2.shape
corners = utils.boundary_points(bbox)
midcorners = utils.mid_corners(bbox)
offset1 = len(corners) +len(midcorners) + 1
offset2 = len(corners) + len(midcorners) + len(points1) + 1
points = np.concatenate((corners, midcorners, points1, points2), axis=0)
print points.shape
triangles1 = mesh.simplices + offset1
triangles2 = mesh.simplices + offset2
# Adding the indices of the points as the last column of the coordainate list
Xmin_s1 = np.argwhere(points1[:,0]==Xmin)
Xmin_s1_points = np.hstack((points1[Xmin_s1.reshape(-1,)], Xmin_s1))
# Sorting the indices such that the points are in increasing order of its y-component
Xmin_s1 = (Xmin_s1_points[:,3][np.argsort(Xmin_s1_points[:,1])] + offset1).astype(int)
Xmin_s2 = np.argwhere(points2[:,0]==Xmin)
Xmin_s2_points = np.hstack((points2[Xmin_s2.reshape(-1,)], Xmin_s2))
Xmin_s2 = (Xmin_s2_points[:,3][np.argsort(Xmin_s2_points[:,1])] + offset2).astype(int)
Xmax_s1 = np.argwhere(points1[:,0]==Xmax)
Xmax_s1_points = np.hstack((points1[Xmax_s1.reshape(-1,)], Xmax_s1))
Xmax_s1 = (Xmax_s1_points[:,3][np.argsort(Xmax_s1_points[:,1])] + offset1).astype(int)
Xmax_s2 = np.argwhere(points2[:,0]==Xmax)
Xmax_s2_points = np.hstack((points2[Xmax_s2.reshape(-1,)], Xmax_s2))
Xmax_s2 = (Xmax_s2_points[:,3][np.argsort(Xmax_s2_points[:,1])] + offset2).astype(int)
Ymin_s1 = np.argwhere(points1[:,1]==Ymin)
Ymin_s1_points = np.hstack((points1[Ymin_s1.reshape(-1,)], Ymin_s1))
Ymin_s1 = (Ymin_s1_points[:,3][np.argsort(Ymin_s1_points[:,0])] + offset1).astype(int)
Ymin_s2 = np.argwhere(points2[:,1]==Ymin)
Ymin_s2_points = np.hstack((points2[Ymin_s2.reshape(-1,)], Ymin_s2))
Ymin_s2 = (Ymin_s2_points[:,3][np.argsort(Ymin_s2_points[:,0])] + offset2).astype(int)
Ymax_s1 = np.argwhere(points1[:,1]==Ymax)
Ymax_s1_points = np.hstack((points1[Ymax_s1.reshape(-1,)], Ymax_s1))
Ymax_s1 = (Ymax_s1_points[:,3][np.argsort(Ymax_s1_points[:,0])] + offset1).astype(int)
Ymax_s2 = np.argwhere(points2[:,1]==Ymax)
Ymax_s2_points = np.hstack((points2[Ymax_s2.reshape(-1,)], Ymax_s2))
Ymax_s2 = (Ymax_s2_points[:,3][np.argsort(Ymax_s2_points[:,0])] + offset2).astype(int)
for i in range(len(Xmin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Xmin_s1[i], Xmin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Xmin_s1[-1], 12]))
for i in range(len(Xmin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Xmin_s2[i], Xmin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Xmin_s2[-1], 12]))
for i in range(len(Xmax_s1)-1):
triangles1 = np.vstack((triangles1, [10, Xmax_s1[i], Xmax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [10, Xmax_s1[-1], 11]))
for i in range(len(Xmax_s2)-1):
triangles2 = np.vstack((triangles2, [10, Xmax_s2[i], Xmax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [10, Xmax_s2[-1], 11]))
for i in range(len(Ymin_s1)-1):
triangles1 = np.vstack((triangles1, [9, Ymin_s1[i], Ymin_s1[i+1]]))
triangles1 = np.vstack((triangles1, [9, Ymin_s1[-1], 10]))
for i in range(len(Ymin_s2)-1):
triangles2 = np.vstack((triangles2, [9, Ymin_s2[i], Ymin_s2[i+1]]))
triangles2 = np.vstack((triangles2, [9, Ymin_s2[-1], 10]))
for i in range(len(Ymax_s1)-1):
triangles1 = np.vstack((triangles1, [12, Ymax_s1[i], Ymax_s1[i+1]]))
triangles1 = np.vstack((triangles1, [12, Ymax_s1[-1], 11]))
for i in range(len(Ymax_s2)-1):
triangles2 = np.vstack((triangles2, [12, Ymax_s2[i], Ymax_s2[i+1]]))
triangles2 = np.vstack((triangles2, [12, Ymax_s2[-1], 11]))
triangles = np.vstack((triangles1, triangles2))
# Preparing PLC and save it to .poly file for tetgen
with open( os.environ['HOME'] +'/mediansurf.poly', 'w') as f:
f.write("#Part 1 - the node list\n")
f.write("#%d nodes in 3d, no attributes, no boundary marker\n"%points.shape[0])
f.write('%d %d %d %d\n'%(points.shape[0], 3, 0,0))
for i, p in enumerate(points):
f.write("%d %f %f %f\n"%(i+1, p[0], p[1], p[2]))
# Each 4 sides has 3 polygons
# Top and bottom
# Each triangle of the two surfaces are facets
fn = 6 + len(triangles)
f.write("#Part 2 - the facet list.\n")
f.write("#%d facets with boundary markers\n"%fn)
f.write('%d %d\n'%(fn, 1))
f.write("#Boundary facet list.\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 1 2 3 4\n")
f.write("%d %d %d\n"%(1, 0, 1))
f.write("4 5 6 7 8\n")
#xmin side
f.write("2 0 1\n")
f.write("4 1 4 8 5\n")
f.write("2 9 12\n")
#ymin side
f.write("2 0 1\n")
f.write("4 1 2 6 5\n")
f.write("2 9 10\n")
#xmax side
f.write("2 0 1\n")
f.write("4 2 3 7 6\n")
f.write("2 10 11\n")
#ymax side
f.write("2 0 1\n")
f.write("4 3 4 8 7\n")
f.write("2 11 12\n")
f.write("#Facet list of surface1.\n")
for t in triangles1:
f.write("%d %d %d\n"%(1, 0, -1))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Facet list of surface2.\n")
for t in triangles2:
f.write("%d %d %d\n"%(1, 0, -2))
f.write("%d %d %d %d\n"%(3, t[0], t[1], t[2]))
f.write("#Part 3 - the hole list.\n")
f.write('%d\n'%0)
f.write("#Part 4 - the region list.\n")
f.write('%d\n'%0)
if __name__ == "__main__":
surfgen_shared_boundary()
| gpl-3.0 |
suraj-jayakumar/lstm-rnn-ad | src/testdata/random_data_time_series/generate_data.py | 1 | 1042 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 23 11:15:12 2016
@author: suraj
"""
import random
import numpy as np
import pickle
import matplotlib.pyplot as plt
attachRateList = []
for i in range(3360):
attachRateList.append(random.uniform(4,6))
attachRateList = np.array(attachRateList)
encoded_attach_rate_list = np.fft.fft(attachRateList)
day_number_list = [i%7 for i in range(3360)]
encoded_day_number_list = np.fft.fft(day_number_list)
time_number_list = [i%96 for i in range(3360)]
encoded_time_number_list = np.fft.fft(time_number_list)
final_list_x = np.array([[encoded_day_number_list.real[i],encoded_day_number_list.imag[i],encoded_time_number_list.real[i],encoded_time_number_list.imag[i],encoded_attach_rate_list.real[i],encoded_attach_rate_list.imag[i]] for i in range(3360)])
final_list_y = [ (encoded_attach_rate_list[i].real,encoded_attach_rate_list[i].imag) for i in range(len(encoded_attach_rate_list)) ]
pickle.dump(final_list_x,open('x_att.p','wb'))
pickle.dump(final_list_y,open('y_att.p','wb'))
| apache-2.0 |
hackthemarket/pystrat | sim.py | 1 | 10697 | # simple trading strategy simulator
import pandas as pd
from pandas.tools.plotting import autocorrelation_plot
from pandas.tools.plotting import scatter_matrix
import numpy as np
from scipy import stats
import sklearn
from sklearn import preprocessing as pp
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import interactive
interactive(True)
import sys
import time
import logging as log
log.basicConfig(level=log.DEBUG)
import glob
import os.path
import pickle
import logging as log
log.basicConfig(level=log.DEBUG)
import random
import pdb
pd.set_option('display.width',500)
# define constant friction function
DefaultBPS = 10
def FrictionInBps(U, cfg, kvargs):
""" default FrictionInBps function just returns default,
but the interface receives all strategy info after
strategy is run, so one can create more realistic
impact models """
return DefaultBPS
""" default simulator cfg dictionary.
default keys/values:
FrictionInBps - function that takes same args as strategy.
by default, returns DefaultBps.
InitBal - in $s
Reinvest - should we reinvest our winnings or constantly assume we have InitBal?
Verbose
"""
DEF_SIM_CFG= { 'FrictionInBps': FrictionInBps,
'Verbose' : True,
'InitBal' : 1e7,
'Reinvest' : True }
# columns in prepped univ
SIM_COLS = ["Sym","Product","Instrument",
"Multiplier","Expiry","Strike",
"Open","High","Low","Close","Volume"]
SIM_COLS_OUT = ["Prev_Weight", "Weight", "Prev_Qty", "Qty",
"Trade_Qty", "Trade_Fric", "PNL", "NET_PNL"]
SIM_COL_BALS =[ "NAV","Friction","PNL","NET_PNL", "Longs","Shorts",
"Long_Dlrs","Short_Dlrs","Num_Trades","Turnover","NET_Return"]
def squarem( df, sym='Sym', min_pct=.9 ) :
# sim_squarem solves the common problem in which you have a large table of
# data grouped by symbols, some of which have missing data. You want to
# 'square' the data such that any symbol which is missing 'too much' data
# is expunged and the remaining data is filled appropriately, leaving you
# with a dataset which has the same # of observations for each symbol.
#
bysyms = df.groupby(sym).size()
idx = df.index.unique()
onumsyms = len(bysyms)
minlen = int(round(len(idx) * .9 ))
keep = bysyms[bysyms > minlen]
u = df[ df[sym].isin(keep.index) ]
numsyms = len(keep)
log.info('Got rid of %d/%d symbols',(numsyms-onumsyms),onumsyms)
u.replace(0,np.nan,inplace=True)
u.replace([np.inf, -np.inf], np.nan,inplace=True)
u.sort_index(inplace=True)
uidx = u.index.unique()
# groupby and reindex magic
z = u.groupby(sym).apply(
lambda x: x.reindex(uidx).ffill()).reset_index(0,drop=True)
# badz = z[z.isnull().any(axis=1)]
# if len(badz.index) > 0 :
# badtimes = badz.index.unique().values
# z.drop( badtimes, inplace=True )
# for dt in badtimes:
# log.info('removed %s for NaNs',pd.to_datetime(str(dt)).strftime(
# '%Y-%m-%d'))
return z
def prep_univ( dateTime, symbol,
open, high, low, close, volume,
product, instrument='STK', multiplier=1.0,expiry=None,
strike=None,adv_days=20,sd_days=20, open2close_returns=True,
scaleAndCenter=False, **more_cols) :
# constructs universe appropriate for use with simulator; any additional columns
# passed-in via ellipsis will be added to table as named
#
U = pd.DataFrame({'Sym': symbol,
'Product' : product, 'Instrument':instrument,
'Multiplier': 1.0, 'Expiry': None, 'Strike':None,
'Open':open,'High':high, 'Low':low, 'Close':close,
'Volume':volume }, index=dateTime )
U = U[ SIM_COLS ]
if len(more_cols) > 0:
U = pd.concat( [U, pd.DataFrame(more_cols)], axis=1 )
U.reset_index( inplace=True)
U.sort_values(['Sym','Date'],inplace=True)
U.Date = pd.to_datetime(U.Date)
U.set_index('Date',inplace=True)
if scaleAndCenter :
log.debug('prep_univ: scaling & centering')
raw_scaled = U.groupby('Sym').transform(
lambda x : (x - x.mean())/x.std())
U = pd.concat([ u.Sym, raw_scaled], axis=1)
# calculate adv, returns, fwd_returns & change in volume
U['ADV'] = U.groupby('Sym')['Volume'].apply(
pd.rolling_mean, adv_days, 1).shift()
U['DeltaV'] = U.groupby('Sym')['Volume'].transform(
lambda x : np.log(x / x.shift()) )
U['Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()) )
U['Fwd_Close'] = U.groupby('Sym')['Close'].shift(-1)
U['Fwd_Return'] = U.groupby('Sym')['Close'].transform(
lambda x : np.log(x / x.shift()).shift(-1) ) # fwd.returns
U['SD'] = U.groupby('Sym')['Return'].apply(
pd.rolling_std, sd_days, 1).shift()
if open2close_returns:
U['Fwd_Open'] = U.groupby('Sym')['Open'].shift(-1)
U['Fwd_COReturn'] = np.divide(np.add( U.Fwd_Open, -U.Close ),U.Close)
U.ffill(inplace=True)
U.sort_index(inplace=True)
return U
# simple, default strategy: equal weight universe on daily basis
def eq_wt( U, cfg, kvargs ) :
#pdb.set_trace()
U.Weight = 1/float(len(U.index))
return U
# given today's Universe U and Yesterday's Y, set U's
# Prev_Weight and Prev_Qty to Y's Weight & Qty
# TODO: clean-up
def _getprevs( U, Y ) :
# TODO: surely there's a cleaner way to do this...
wts = Y.reset_index()[['Sym','Weight']]
wts.columns = ['Sym','Prev_Weight']
pwts = U[['Sym']].merge( wts, on = 'Sym' )['Prev_Weight']
U.Prev_Weight=pwts.values
qts = Y.reset_index()[['Sym','Qty']]
qts.columns = ['Sym','Prev_Qty']
pqts = U[['Sym']].merge( qts, on = 'Sym' )['Prev_Qty']
U.Prev_Qty=pqts.values
# functor to run strategy each day and update tbls ...
# TODO: clean-up
def __sim ( U, FUN, cfg, B, kvargs) :
# run sim to set weights
U = FUN( U, cfg, kvargs)
# set prev values for weight & qty...
Y = kvargs.pop('_Y', None)
if Y is not None and not np.all(Y.index==U.index):
_getprevs(U,Y)
loop = 1 + int(kvargs.pop('_L'))
else:
loop = 0
kvargs['_L'] = loop
kvargs['_Y'] = U
bb = B.iloc[loop]
# fill-out trade details
NAV = bb.NAV
tospend = NAV/U.Weight
U.Qty = np.round((NAV*U.Weight) / (U.Multiplier*U.Close))
U.Trade_Qty = U.Qty - U.Prev_Qty
fbps = 1e-4 * cfg['FrictionInBps'](U,cfg,kvargs)
U.Trade_Fric = U.Trade_Qty * U.Close * U.Multiplier * fbps
U.PNL = (U.Fwd_Close - U.Close) * U.Qty * U.Multiplier
U.NET_PNL = U.PNL - U.Trade_Fric
# today's balances are based on yesterday's posns...
longs = U[U.Qty > 0]
shorts = U[U.Qty < 0]
trades = U[U.Trade_Qty != 0]
bb.Friction = U.Trade_Fric.sum()
bb.PNL = U.PNL.sum()
bb.NET_PNL = U.NET_PNL.sum()
bb.Longs = len(longs.index)
bb.Shorts = len(shorts.index)
bb.Long_Dlrs = (longs.Close * longs.Multiplier * longs.Qty).sum()
bb.Short_Dlrs = (shorts.Close * shorts.Multiplier * shorts.Qty).sum()
bb.Num_Trades = len(trades.index)
bb.Turnover = (trades.Close * trades.Multiplier
* trades.Trade_Qty.abs()).sum()/NAV
if loop > 0 :
yb = B.iloc[loop-1]
ynav = yb.NAV
tnav = ynav + yb.NET_PNL
bb.NAV = tnav
bb.NET_Return = (tnav-ynav)/ynav
B.iloc[loop] = bb
# pdb.set_trace()
return U
def sim( univ, sim_FUN=eq_wt, cfg=DEF_SIM_CFG.copy(), kvargs={} ) :
""" simulator: runs simulation and returns a table of activity and balances.
args:
univ - historical data that's been produced by prep_univ
sim_FUN - strategy function. by default, equal weights univ.
cfg - cfg info. by default
kvargs - strat-specific extra data in a dict
"""
#
t0 = time.time()
all_times = univ.index.unique().values
# prepare writable/output side of universe
W = pd.DataFrame( columns=SIM_COLS_OUT, index = univ.index).fillna(0.0)
U = pd.concat( [univ, W], axis=1 )
# create balances table: one per day
B = pd.DataFrame( columns = SIM_COL_BALS, index = all_times ).fillna(0.0)
B.NAV = cfg['InitBal']
# 'daily' loop
Z = U.groupby(U.index).apply( __sim, FUN=sim_FUN,
cfg=cfg, B=B, kvargs=kvargs )
log.info('ran over %d days and %d rows in %d secs', len(all_times),
len(U.index),time.time()-t0)
# summarize results a bit more...?
#ts=xts(B$Net.Return,order.by=B$DateTime)
# return universe and balances
#list(U=U,B=B, ts=ts)
return Z, B
def sharpe(Returns) :
return np.sqrt(252) * np.mean(Returns)/np.std(Returns)
def random_strat( U, cfg, kvargs ) :
# random portfolio strategy: picks 'num_names' randomly
nnames = kvargs.get('num_names',10)
names = random.sample(U.Sym, nnames )
U.Weight = np.where( U.Sym.isin( names ), 1/float(nnames), 0 )
return U
def best_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
best = U.sort_values('Return',ascending=False,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( best ), 1/float(nnames), 0 )
return U
def worst_strat( U, cfg, kvargs ) :
# portfolio strategy: picks 'num_names' based on trailing return
nnames = kvargs.get('num_names',10)
#pdb.set_trace()
worst = U.sort_values('Return',ascending=True,
na_position='last')['Sym'].head(10).values
U.Weight = np.where( U.Sym.isin( worst ), 1/float(nnames), 0 )
return U
def rtest(U,FUN=random_strat, runs=10):
# run given strat repeatedly, plotting NAVs and Returning them
# nb: this only makes sense if the strategy is random...
# run random_strat 'runs' times and plot NAVs
N = None
for i in range(runs) :
_,b = sim( U, sim_FUN=FUN )
n = pd.DataFrame(b.NAV)
N = n if N is None else pd.concat([N,n],axis=1)
N.plot(legend=False)
return N
def sim_test():
# dev driver
f = 'U.pkl'
P = pickle.load(open(f))
log.info('loaded <%s>',f)
P.describe()
U = P[P.index >= '2005-01-01']
U.describe()
import sim
_,B = sim.sim(U)
#plot NAV
B.NAV.plot(title='Equal Weight Everyone')
return B
| gpl-3.0 |
Gabriel-p/mcs_rot_angles | aux_modules/validation_set.py | 1 | 10176 |
import os
from astropy.io import ascii
from astropy.table import Table
from astropy.coordinates import Distance, Angle, SkyCoord
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import sys
# Change path so that we can import functions from the 'modules/' folder.
sys.path.insert(0, sys.path[0].replace('aux_', ''))
import readData
import MCs_data
def zDist(N):
"""
This function generates a uniform spread of vertical distances, in the
range (-z_dist, +z_dist).
"""
# Define maximum vertical distance (in parsec)
z_dist = 5000.
# Generate N random z' vertical distances, in parsec.
# To generate the *same* values each time the code is executed, fix the
# random seed to any integer value.
# np.random.seed(12345)
z_prime = np.random.uniform(-z_dist, z_dist, N)
return z_prime
def invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime):
"""
Inverted distance in parsecs (D) from Eq (7) in
van der Marel & Cioni (2001) using Eqs (1), (2), (3).
"""
# Express everything in radians.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
ra_0, dec_0, ra, dec = ra_0.rad, dec_0.rad, np.deg2rad(ra), np.deg2rad(dec)
# cos(rho)
A = np.cos(dec) * np.cos(dec_0) * np.cos(ra - ra_0) +\
np.sin(dec) * np.sin(dec_0)
# sin(rho) * cos(phi)
B = -np.cos(dec) * np.sin(ra - ra_0)
# sin(rho) * sin(phi)
C = np.sin(dec) * np.cos(dec_0) -\
np.cos(dec) * np.sin(dec_0) * np.cos(ra - ra_0)
# Eq (7)
D = (z_prime - D_0.value * np.cos(incl)) /\
(np.sin(incl) * (C * np.cos(theta) - B * np.sin(theta)) -
A * np.cos(incl))
return D
def rho_phi(ra, dec, glx_ctr):
"""
Obtain the angular distance between (ra, dec) coordinates and the center
of the galaxy (rho), and its position angle (phi).
"""
# Store clusters' (ra, dec) coordinates in degrees.
coords = SkyCoord(list(zip(*[ra, dec])), unit=(u.deg, u.deg))
rho = coords.separation(glx_ctr)
# Position angle between center and coordinates. This is the angle between
# the positive y axis (North) counter-clockwise towards the negative x
# axis (East).
Phi = glx_ctr.position_angle(coords)
# This is the angle measured counter-clockwise from the x positive axis
# (West).
phi = Phi + Angle('90d')
return rho, phi
def xyz_coords(rho, phi, D_0, r_dist):
'''
Obtain coordinates in the (x,y,z) system of van der Marel & Cioni (2001),
Eq (5).
Values (x, y,z) returned in Kpc.
'''
d_kpc = Distance((10**(0.2 * (np.asarray(r_dist) + 5.))) / 1000.,
unit=u.kpc)
x = d_kpc * np.sin(rho.radian) * np.cos(phi.radian)
y = d_kpc * np.sin(rho.radian) * np.sin(phi.radian)
z = D_0.kpc * u.kpc - d_kpc * np.cos(rho.radian)
x, y, z = x.value, y.value, z.value
return np.array([x, y, z])
def outData(gal, gal_data, dist_mod, e_dm):
"""
Write data to output 'xxx_input_synth.dat' file ('xxx' stands for the
processed galaxy.)
"""
data = Table(
[gal_data['Name'], gal_data['ra'], gal_data['dec'], dist_mod, e_dm,
gal_data['log(age)']],
names=['Name', 'ra', 'dec', 'dist_mod', 'e_dm', 'log(age)'])
with open(gal.lower() + "_input_synth.dat", 'w') as f:
ascii.write(data, f, format='fixed_width', delimiter=' ')
def inv_trans_eqs(x_p, y_p, z_p, theta, inc):
"""
Inverse set of equations. Transform inclined plane system (x',y',z')
into face on sky system (x,y,z).
"""
x = x_p * np.cos(theta) - y_p * np.cos(inc) * np.sin(theta) -\
z_p * np.sin(inc) * np.sin(theta)
y = x_p * np.sin(theta) + y_p * np.cos(inc) * np.cos(theta) +\
z_p * np.sin(inc) * np.cos(theta)
z = -1. * y_p * np.sin(inc) + z_p * np.cos(inc)
return x, y, z
def make_plot(gal_name, incl, theta, cl_xyz, dm):
"""
Original link for plotting intersecting planes:
http://stackoverflow.com/a/14825951/1391441
"""
# Make plot.
fig = plt.figure()
ax = Axes3D(fig)
# Placement 0, 0 is the bottom left, 1, 1 is the top right.
ax.text2D(
0.4, 0.95, r"${}:\;(\Theta, i) = ({}, {})$".format(
gal_name, theta - 90., incl),
transform=ax.transAxes, fontsize=15, color='red')
# Express in radians for calculations.
incl, theta = np.deg2rad(incl), np.deg2rad(theta)
# Plot clusters.
x_cl, y_cl, z_cl = cl_xyz
SC = ax.scatter(x_cl, z_cl, y_cl, c=dm, s=50)
min_X, max_X = min(x_cl) - 2., max(x_cl) + 2.
min_Y, max_Y = min(y_cl) - 2., max(y_cl) + 2.
min_Z, max_Z = min(z_cl) - 2., max(z_cl) + 2.
# x,y plane.
X, Y = np.meshgrid([min_X, max_X], [min_Y, max_Y])
Z = np.zeros((2, 2))
# Plot x,y plane.
ax.plot_surface(X, Z, Y, color='gray', alpha=.1, linewidth=0, zorder=1)
# Axis of x,y plane.
# x axis.
ax.plot([min_X, max_X], [0., 0.], [0., 0.], ls='--', c='k', zorder=4)
# Arrow head pointing in the positive x direction.
ax.quiver(max_X, 0., 0., max_X, 0., 0., arrow_length_ratio=.5,
length=.1, color='k')
ax.text(max_X, 0., -.5, 'x', 'x')
# y axis.
ax.plot([0., 0.], [0., 0.], [0., max_Y], ls='--', c='k')
# Arrow head pointing in the positive y direction.
ax.quiver(0., 0., max_Y, 0., 0., max_Y, arrow_length_ratio=.8,
length=.1, color='k')
ax.plot([0., 0.], [0., 0.], [min_Y, 0.], ls='--', c='k')
ax.text(-.5, 0., max_Y, 'y', 'y')
#
# A plane is a*x+b*y+c*z+d=0, [a,b,c] is the normal.
a, b, c, d = -1. * np.sin(theta) * np.sin(incl),\
np.cos(theta) * np.sin(incl), np.cos(incl), 0.
# print('a/c,b/c,1,d/c:', a / c, b / c, 1., d / c)
# Rotated plane.
X2_t, Y2_t = np.meshgrid([min_X, max_X], [0, max_Y])
Z2_t = (-a * X2_t - b * Y2_t) / c
X2_b, Y2_b = np.meshgrid([min_X, max_X], [min_Y, 0])
Z2_b = (-a * X2_b - b * Y2_b) / c
# Top half of first x',y' inclined plane.
ax.plot_surface(X2_t, Z2_t, Y2_t, color='red', alpha=.1, lw=0, zorder=3)
# Bottom half of inclined plane.
ax.plot_surface(X2_t, Z2_b, Y2_b, color='red', alpha=.1, lw=0, zorder=-1)
# Axis of x',y' plane.
# x' axis.
x_min, y_min, z_min = inv_trans_eqs(min_X, 0., 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(max_X, 0., 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='b')
# Arrow head pointing in the positive x' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.7)
ax.text(x_max, z_max, y_max - .5, "x'", 'x', color='b')
# y' axis.
x_min, y_min, z_min = inv_trans_eqs(0., min_Y, 0., theta, incl)
x_max, y_max, z_max = inv_trans_eqs(0., max_Y, 0., theta, incl)
ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='g')
# Arrow head pointing in the positive y' direction.
ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
arrow_length_ratio=.9, color='g')
ax.text(x_max - .5, z_max, y_max, "y'", 'y', color='g')
# # z' axis.
# x_min, y_min, z_min = inv_trans_eqs(0., 0, min_Z, theta, incl)
# x_max, y_max, z_max = inv_trans_eqs(0., 0, max_Z, theta, incl)
# ax.plot([x_min, x_max], [z_min, z_max], [y_min, y_max], ls='--', c='y')
# # Arrow head pointing in the positive z' direction.
# ax.quiver(x_max, z_max, y_max, x_max, z_max, y_max, length=0.1,
# arrow_length_ratio=.9, color='y')
# ax.text(x_max - .5, z_max, y_max, "z'", 'z', color='y')
ax.set_xlabel('x (Kpc)')
ax.set_ylabel('z (Kpc)')
ax.set_ylim(max_Y, min_Y)
ax.set_zlabel('y (Kpc)')
plt.colorbar(SC, shrink=0.9, aspect=25)
ax.axis('equal')
ax.axis('tight')
# This controls the initial orientation of the displayed 3D plot.
# ‘elev’ stores the elevation angle in the z plane. ‘azim’ stores the
# azimuth angle in the x,y plane.
ax.view_init(elev=0., azim=-90.)
plt.show()
# plt.savefig()
def main():
"""
"""
# Define inclination angles (i, Theta) (SMC first, LMC second).
# 'Theta' is the PA (position angle) measured from the North (positive
# y axis in van der Marel et al. 2002, Fig 3)
rot_angles = ((60, 150.), (30, 140.))
# Root path.
r_path = os.path.realpath(__file__)[:-30]
# Read input data for both galaxies from file (smc_data, lmc_data)
gal_data = readData.main(r_path)
for gal, gal_name in enumerate(['SMC', 'LMC']):
print("Generating data for {}".format(gal_name))
incl, Theta = rot_angles[gal]
# 'theta' is the position angle measured from the West (positive
# x axis), used by Eq (7) in van der Marel & Cioni (2001).
theta = Theta + 90.
# Center coordinates and distance for this galaxy.
gal_center, D_0, e_gal_dist = MCs_data.MCs_data(gal)
ra_0, dec_0 = gal_center.ra, gal_center.dec
# Center coordinates for observed clusters in this galaxy.
ra, dec = gal_data[gal]['ra'], gal_data[gal]['dec']
# Generate N random vertical distances (z'), in parsec.
z_prime = zDist(len(ra))
# Distance to clusters in parsecs.
D = invertDist(incl, theta, ra_0, dec_0, D_0, ra, dec, z_prime)
# Convert to distance moduli.
dist_mod = np.round(-5. + 5. * np.log10(D), 2)
# This line below uses the actual distance moduli found by ASteCA.
# dist_mod = gal_data[gal]['dist_mod']
# Random errors for distance moduli.
e_dm = np.round(np.random.uniform(.03, .09, len(ra)), 2)
# Store data in output file.
outData(gal_name, gal_data[gal], dist_mod, e_dm)
print("Output data stored")
# Obtain angular projected distance and position angle for the
# clusters in the galaxy.
rho, phi = rho_phi(ra, dec, gal_center)
cl_xyz = xyz_coords(rho, phi, D_0, dist_mod)
make_plot(gal_name, incl, theta, cl_xyz, dist_mod)
print("Plot saved.")
if __name__ == '__main__':
main()
| gpl-3.0 |
annahs/atmos_research | LEO_calc_coating_from_meas_scat_amp_and_write_to_db.py | 1 | 3857 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
#id INTEGER PRIMARY KEY AUTOINCREMENT,
#sp2b_file TEXT,
#file_index INT,
#instr TEXT,
#instr_locn TEXT,
#particle_type TEXT,
#particle_dia FLOAT,
#unix_ts_utc FLOAT,
#actual_scat_amp FLOAT,
#actual_peak_pos INT,
#FF_scat_amp FLOAT,
#FF_peak_pos INT,
#FF_gauss_width FLOAT,
#zeroX_to_peak FLOAT,
#LF_scat_amp FLOAT,
#incand_amp FLOAT,
#lag_time_fit_to_incand FLOAT,
#LF_baseline_pct_diff FLOAT,
#rBC_mass_fg FLOAT,
#coat_thickness_nm FLOAT,
#coat_thickness_from_actual_scat_amp FLOAT
#UNIQUE (sp2b_file, file_index, instr)
#connect to database
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
c2 = conn.cursor()
instrument = 'UBCSP2'
instrument_locn = 'WHI'
type_particle = 'incand'
start_date = '20110105'
end_date = '20120601'
lookup_file = 'C:/Users/Sarah Hanna/Documents/Data/WHI long term record/coatings/lookup_tables/coating_lookup_table_WHI_2012_UBCSP2-nc(2p26,1p26).lupckl'
rBC_density = 1.8
incand_sat = 3750
lookup = open(lookup_file, 'r')
lookup_table = pickle.load(lookup)
lookup.close()
c.execute('''SELECT * FROM SP2_coating_analysis''')
names = [description[0] for description in c.description]
pprint(names)
begin_data = calendar.timegm(datetime.strptime(start_date,'%Y%m%d').timetuple())
end_data = calendar.timegm(datetime.strptime(end_date,'%Y%m%d').timetuple())
def get_rBC_mass(incand_pk_ht, year):
if year == 2012:
rBC_mass = 0.003043*incand_pk_ht + 0.24826 #AD corrected linear calibration for UBCSP2 at WHI 2012
if year == 2010:
rBC_mass = 0.01081*incand_pk_ht - 0.32619 #AD corrected linear calibration for ECSP2 at WHI 2010
return rBC_mass
def get_coating_thickness(BC_VED,scat_amp,coating_lookup_table):
#get the coating thicknesses from the lookup table which is a dictionary of dictionaries, the 1st keyed with BC core size and the second being coating thicknesses keyed with calc scat amps
core_diameters = sorted(coating_lookup_table.keys())
prev_diameter = core_diameters[0]
for core_diameter in core_diameters:
if core_diameter > BC_VED:
core_dia_to_use = prev_diameter
break
prev_diameter = core_diameter
#now get the coating thickness for the scat_amp this is the coating thickness based on the raw scattering max
scattering_amps = sorted(coating_lookup_table[core_dia_to_use].keys())
prev_amp = scattering_amps[0]
for scattering_amp in scattering_amps:
if scat_amp < scattering_amp:
scat_amp_to_use = prev_amp
break
prev_amp = scattering_amp
scat_coating_thickness = coating_lookup_table[core_dia_to_use].get(scat_amp_to_use, np.nan) # returns value for the key, or none
return scat_coating_thickness
LOG_EVERY_N = 10000
i = 0
for row in c.execute('''SELECT incand_amp, LF_scat_amp, unix_ts_utc, sp2b_file, file_index, instr FROM SP2_coating_analysis
WHERE instr=? and instr_locn=? and particle_type=? and incand_amp<? and unix_ts_utc>=? and unix_ts_utc<?''',
(instrument,instrument_locn,type_particle,incand_sat,begin_data,end_data)):
incand_amp = row[0]
LF_amp = row[1]
event_time = datetime.utcfromtimestamp(row[2])
file = row[3]
index = row[4]
instrt = row[5]
rBC_mass = get_rBC_mass(incand_amp, event_time.year)
if rBC_mass >= 0.25:
rBC_VED = (((rBC_mass/(10**15*rBC_density))*6/3.14159)**(1/3.0))*10**7 #VED in nm with 10^15fg/g and 10^7nm/cm
coat_th = get_coating_thickness(rBC_VED,LF_amp,lookup_table)
else:
rBC_VED = None
coat_th = None
c2.execute('''UPDATE SP2_coating_analysis SET coat_thickness_from_actual_scat_amp=? WHERE sp2b_file=? and file_index=? and instr=?''', (coat_th, file,index,instrt))
i+=1
if (i % LOG_EVERY_N) == 0:
print 'record: ', i
conn.commit()
conn.close()
| mit |
bongtrop/peach | tutorial/neural-networks/linear-prediction.py | 6 | 3386 | ################################################################################
# Peach - Computational Intelligence for Python
# Jose Alexandre Nalon
#
# This file: tutorial/linear-prediction.py
# Using neural networks to predict number sequences
################################################################################
# A neural network can be used to predict future values of a sequence of
# numbers. Wold's Decomposition Theorem stablishes that any sequence can be
# split in a regular and predictable part and an innovation process (which is
# discrete white noise, and thus impredictable). The goal of this tutorial is
# to show how to use the neural network implementation of Peach to do this.
# We import numpy for arrays and peach for the library. Actually, peach also
# imports the numpy module, but we want numpy in a separate namespace:
from numpy import *
import random
import peach as p
# First, we create the network, with only one layer with only one neuron in it.
# The neuron has many inputs and only one output. The activation function is the
# identity. This kind of neuron is usually known as ADALINE (Adaptive Linear
# Neuron, later Adaptive Linear Element). We use as learning algorithm the LMS
# algorithm.
N = 32
nn = p.FeedForward((N, 1), phi=p.Identity, lrule=p.LMS(0.05))
# The lists below will track the values of the sequence being predicted and of
# the error for plotting.
xlog = [ ]
ylog = [ ]
elog = [ ]
error = 1.
i = 0
x = zeros((N, 1), dtype=float) # Input is a column-vector.
while i < 2000 and error > 1.e-10:
# The sequence we will predict is the one generated by a cossinus. The next
# value of the function is the desired output of the neuron. The neuron will
# use past values to predict the unknown value. To spice things, we add some
# gaussian noise (actually, it might help the convergence).
d = cos(2.*pi/128. * i) + random.gauss(0., 0.01)
# Here, we activate the network to calculate the prediction.
y = nn(x)[0, 0] # Notice that we need to access the output
error = abs(d - y) # as a vector, since that's how the NN work.
nn.learn(x, d)
# We store the results to plot later.
xlog.append(d)
ylog.append(y)
elog.append(error)
# Here, we apply a delay in the sequence by shifting every value one
# position back. We are using N (=32) samples to make the prediction, but
# the code here makes no distinction and could be used with any number of
# coefficients in the prediction. The last value of the sequence is put in
# the [0] position of the vector.
x[1:] = x[:-1]
x[0] = d
i = i + 1
# If the system has the plot package matplotlib, this tutorial tries to plot
# and save the convergence of synaptic weights and error. The plot is saved in
# the file ``linear-prediction.png``.
try:
import pylab
pylab.subplot(211)
pylab.hold(True)
pylab.grid(True)
pylab.plot(array(xlog), 'b--')
pylab.plot(array(ylog), 'g')
pylab.plot(array(elog), 'r:')
pylab.legend([ "$x$", "$y$", "$error$" ])
pylab.subplot(212)
pylab.grid(True)
pylab.stem(arange(0, N), reshape(nn[0].weights, (N,)), "k-", "ko", "k-")
pylab.xlim([0, N-1])
pylab.savefig("linear-prediction.png")
except ImportError:
print "After %d iterations:" % (len(elog),)
print nn[0].weights | lgpl-2.1 |
cs207-project/TimeSeries | procs/_corr.py | 1 | 4794 | import numpy.fft as nfft
import numpy as np
import timeseries as ts
from scipy.stats import norm
# import pyfftw
import sys
#sys.path.append("/Users/yuhantang/CS207/TimeSeries/procs")
from .interface import *
def createfromlist(l):
d = new_darray(len(l))
for i in range(0,len(l)):
darray_set(d,i,l[i])
return d
def tsmaker(m, s, j):
meta={}
meta['order'] = int(np.random.choice([-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]))
meta['blarg'] = int(np.random.choice([1, 2]))
t = np.arange(0.0, 1.0, 0.01)
v = norm.pdf(t, m, s) + j*np.random.randn(100)
return meta, ts.TimeSeries(t, v)
def random_ts(a):
t = np.arange(0.0, 1.0, 0.01)
v = a*np.random.random(100)
return ts.TimeSeries(t, v)
def stand(x, m, s):
return (x-m)/s
def ccor(ts1, ts2):
"given two standardized time series, compute their cross-correlation using FFT"
# Get the next 2 th power 110 -> 128
next_2 = int(2**np.ceil(np.log(len(ts1.values()))))
#
ts1_value = ts1.values()
ts2_value = ts2.values()
ts1_container,ts2_container = [],[]
ts1_zero_container = [0]*len(ts1.values())
ts2_zero_container = [0]*len(ts2.values())
ts1_c_array,ts2_c_array = [None]*(len(ts1.values())*2),[None]*(len(ts2.values())*2)
ts1_c_array[::2] = ts1_value
ts1_c_array[1::2] = ts1_zero_container
ts2_c_array[::2] = ts2_value
ts2_c_array[1::2] = ts2_zero_container
for i in range(len(ts1_c_array)+1,next_2*2):
ts1_c_array.append(np.double(0))
for i in range(len(ts2_c_array)+1,next_2*2):
ts2_c_array.append(np.double(0))
ts1_c_array.insert(0,0)
ts2_c_array.insert(0,0)
ts1_c_array = createfromlist(np.double(ts1_c_array))
ts2_c_array = createfromlist(np.double(ts2_c_array))
four1(ts1_c_array,next_2,1)
four1(ts2_c_array,next_2,1)
for i in range(len(ts2.values())*2+1):
ts1_container.append(darray_get(ts1_c_array,i))
for j in range(len(ts1.values())*2+1):
ts2_container.append(darray_get(ts2_c_array,j))
ts1_fft = np.asarray(ts1_container[1::2]) + 1j * np.asarray(ts1_container[2::2])
ts2_fft = np.asarray(ts2_container[1::2]) + 1j * np.asarray(ts2_container[2::2])
ts1_fft = ts1_fft[:len(ts1)+1]
ts2_fft = ts2_fft[:len(ts2)+1]
# ifft part
ts1_ts2_conj = ts1_fft * np.conj(ts2_fft)
ts1_ts2_ifft_container = [0]*len(ts1_ts2_conj)*2
ts1_ts2_ifft_container[::2] = ts1_ts2_conj.real
ts1_ts2_ifft_container[1::2] = ts1_ts2_conj.imag
for i in range(len(ts1_ts2_conj)+1, next_2 *2):
ts1_ts2_ifft_container.append(0)
ts1_ts2_ifft_container.insert(0,0)
ts1_ts2_ifft_container = createfromlist(ts1_ts2_ifft_container)
four1(ts1_ts2_ifft_container, next_2, -1)
ts1_ts2_ifft_container_python = []
for i in range(len(ts1_ts2_conj)*2+1):
ts1_ts2_ifft_container_python.append(darray_get(ts1_ts2_ifft_container,i))
ccor_value = np.asarray(ts1_ts2_ifft_container_python[1::2])
return 1/len(ts1) * ccor_value
def max_corr_at_phase(ts1, ts2):
ccorts = ccor(ts1, ts2)
idx = np.argmax(ccorts)
maxcorr = ccorts[idx]
return idx, maxcorr
#The equation for the kernelized cross correlation is given at
#http://www.cs.tufts.edu/~roni/PUB/ecml09-tskernels.pdf
#normalize the kernel there by np.sqrt(K(x,x)K(y,y)) so that the correlation
#of a time series with itself is 1.
def kernel_corr(ts1, ts2, mult=1):
"compute a kernelized correlation so that we can get a real distance"
#your code here.
cross_correlation = ccor(ts1, ts2) * mult
corr_ts1, corr_ts2 = ccor(ts1, ts1) * mult, ccor(ts2, ts2) * mult
return np.sum(np.exp(cross_correlation))/np.sqrt(np.sum(np.exp(corr_ts1))*np.sum(np.exp(corr_ts2)))
#this is for a quick and dirty test of these functions
#you might need to add procs to pythonpath for this to work
if __name__ == "__main__":
print("HI")
_, t1 = tsmaker(0.5, 0.1, 0.01)
_, t2 = tsmaker(0.5, 0.1, 0.01)
print(t1.mean(), t1.std(), t2.mean(), t2.std())
import matplotlib.pyplot as plt
plt.plot(t1)
plt.plot(t2)
plt.show()
standts1 = stand(t1, t1.mean(), t1.std())
standts2 = stand(t2, t2.mean(), t2.std())
#print(type(standts1),'this is the type=================*********')
#assert 1 == 2
idx, mcorr = max_corr_at_phase(standts1, standts2)
print(idx, mcorr)
sumcorr = kernel_corr(standts1, standts2, mult=10)
print(sumcorr)
t3 = random_ts(2)
t4 = random_ts(3)
plt.plot(t3)
plt.plot(t4)
plt.show()
standts3 = stand(t3, t3.mean(), t3.std())
standts4 = stand(t4, t4.mean(), t4.std())
idx, mcorr = max_corr_at_phase(standts3, standts4)
print(idx, mcorr)
sumcorr = kernel_corr(standts3, standts4, mult=10)
print(sumcorr)
| mit |
benjaminoh1/tensorflowcookbook | Chapter 07/bag_of_words.py | 1 | 6082 | # Working with Bag of Words
#---------------------------------------
#
# In this example, we will download and preprocess the ham/spam
# text data. We will then use a one-hot-encoding to make a
# bag of words set of features to use in logistic regression.
#
# We will use these one-hot-vectors for logistic regression to
# predict if a text is spam or ham.
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import numpy as np
import csv
import string
import requests
import io
from zipfile import ZipFile
from tensorflow.contrib import learn
from tensorflow.python.framework import ops
ops.reset_default_graph()
# Start a graph session
sess = tf.Session()
# Check if data was downloaded, otherwise download it and save for future use
save_file_name = os.path.join('temp','temp_spam_data.csv')
if os.path.isfile(save_file_name):
text_data = []
with open(save_file_name, 'r') as temp_output_file:
reader = csv.reader(temp_output_file)
for row in reader:
text_data.append(row)
else:
zip_url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/00228/smsspamcollection.zip'
r = requests.get(zip_url)
z = ZipFile(io.BytesIO(r.content))
file = z.read('SMSSpamCollection')
# Format Data
text_data = file.decode()
text_data = text_data.encode('ascii',errors='ignore')
text_data = text_data.decode().split('\n')
text_data = [x.split('\t') for x in text_data if len(x)>=1]
# And write to csv
with open(save_file_name, 'w') as temp_output_file:
writer = csv.writer(temp_output_file)
writer.writerows(text_data)
texts = [x[1] for x in text_data]
target = [x[0] for x in text_data]
# Relabel 'spam' as 1, 'ham' as 0
target = [1 if x=='spam' else 0 for x in target]
# Normalize text
# Lower case
texts = [x.lower() for x in texts]
# Remove punctuation
texts = [''.join(c for c in x if c not in string.punctuation) for x in texts]
# Remove numbers
texts = [''.join(c for c in x if c not in '0123456789') for x in texts]
# Trim extra whitespace
texts = [' '.join(x.split()) for x in texts]
# Plot histogram of text lengths
text_lengths = [len(x.split()) for x in texts]
text_lengths = [x for x in text_lengths if x < 50]
plt.hist(text_lengths, bins=25)
plt.title('Histogram of # of Words in Texts')
# Choose max text word length at 25
sentence_size = 25
min_word_freq = 3
# Setup vocabulary processor
vocab_processor = learn.preprocessing.VocabularyProcessor(sentence_size, min_frequency=min_word_freq)
# Have to fit transform to get length of unique words.
vocab_processor.fit_transform(texts)
embedding_size = len(vocab_processor.vocabulary_)
# Split up data set into train/test
train_indices = np.random.choice(len(texts), round(len(texts)*0.8), replace=False)
test_indices = np.array(list(set(range(len(texts))) - set(train_indices)))
texts_train = [x for ix, x in enumerate(texts) if ix in train_indices]
texts_test = [x for ix, x in enumerate(texts) if ix in test_indices]
target_train = [x for ix, x in enumerate(target) if ix in train_indices]
target_test = [x for ix, x in enumerate(target) if ix in test_indices]
# Setup Index Matrix for one-hot-encoding
identity_mat = tf.diag(tf.ones(shape=[embedding_size]))
# Create variables for logistic regression
A = tf.Variable(tf.random_normal(shape=[embedding_size,1]))
b = tf.Variable(tf.random_normal(shape=[1,1]))
# Initialize placeholders
x_data = tf.placeholder(shape=[sentence_size], dtype=tf.int32)
y_target = tf.placeholder(shape=[1, 1], dtype=tf.float32)
# Text-Vocab Embedding
x_embed = tf.nn.embedding_lookup(identity_mat, x_data)
x_col_sums = tf.reduce_sum(x_embed, 0)
# Declare model operations
x_col_sums_2D = tf.expand_dims(x_col_sums, 0)
model_output = tf.add(tf.matmul(x_col_sums_2D, A), b)
# Declare loss function (Cross Entropy loss)
loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(model_output, y_target))
# Prediction operation
prediction = tf.sigmoid(model_output)
# Declare optimizer
my_opt = tf.train.GradientDescentOptimizer(0.001)
train_step = my_opt.minimize(loss)
# Intitialize Variables
init = tf.initialize_all_variables()
sess.run(init)
# Start Logistic Regression
print('Starting Training Over {} Sentences.'.format(len(texts_train)))
loss_vec = []
train_acc_all = []
train_acc_avg = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_train)):
y_data = [[target_train[ix]]]
sess.run(train_step, feed_dict={x_data: t, y_target: y_data})
temp_loss = sess.run(loss, feed_dict={x_data: t, y_target: y_data})
loss_vec.append(temp_loss)
if (ix+1)%10==0:
print('Training Observation #' + str(ix+1) + ': Loss = ' + str(temp_loss))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
train_acc_temp = target_train[ix]==np.round(temp_pred)
train_acc_all.append(train_acc_temp)
if len(train_acc_all) >= 50:
train_acc_avg.append(np.mean(train_acc_all[-50:]))
# Get test set accuracy
print('Getting Test Set Accuracy For {} Sentences.'.format(len(texts_test)))
test_acc_all = []
for ix, t in enumerate(vocab_processor.fit_transform(texts_test)):
y_data = [[target_test[ix]]]
if (ix+1)%50==0:
print('Test Observation #' + str(ix+1))
# Keep trailing average of past 50 observations accuracy
# Get prediction of single observation
[[temp_pred]] = sess.run(prediction, feed_dict={x_data:t, y_target:y_data})
# Get True/False if prediction is accurate
test_acc_temp = target_test[ix]==np.round(temp_pred)
test_acc_all.append(test_acc_temp)
print('\nOverall Test Accuracy: {}'.format(np.mean(test_acc_all)))
# Plot training accuracy over time
plt.plot(range(len(train_acc_avg)), train_acc_avg, 'k-', label='Train Accuracy')
plt.title('Avg Training Acc Over Past 50 Generations')
plt.xlabel('Generation')
plt.ylabel('Training Accuracy')
plt.show() | mit |
SP2RC-Coding-Club/Codes | 13_07_2017/3D_slab_modes.py | 1 | 35096 |
#import pdb # pause code for debugging at pdb.set_trace()
import numpy as np
import toolbox as tool
import slab_functions as sf
from pysac.plot.mayavi_seed_streamlines import SeedStreamline
import matplotlib.pyplot as plt
from mayavi import mlab
import gc
#import move_seed_points as msp
import mayavi_plotting_functions as mpf
import dispersion_diagram
import img2vid as i2v
from functools import partial
import os
# ================================
# Preamble: set mode options and view parameters
# ================================
# What mode do you want? OPTIONS:
mode_options = ['slow-kink-surf', 'slow-saus-surf', 'slow-saus-body-3',
'slow-kink-body-3', 'slow-saus-body-2', 'slow-kink-body-2',
'slow-saus-body-1', 'slow-kink-body-1', 'fast-saus-body-1',
'fast-kink-body-1', 'fast-saus-body-2', 'fast-kink-body-2',
'fast-saus-body-3', 'fast-kink-body-3', 'fast-kink-surf',
'fast-saus-surf', 'shear-alfven', 'shear-alfven-broadband']
# Which angle shall we view from? OPTIONS:
view_options = ['front', 'front-parallel', 'top', 'top-parallel', 'front-top',
'front-side', 'front-top-side']
# Uniform lighting?
#uniform_light = True
uniform_light = False
show_density = False
show_density_pert = False
show_mag = False
show_mag_scale = False
show_mag_fade = False
show_mag_vec = False
show_vel_front = False
show_vel_front_pert = False
show_vel_top = False
show_vel_top_pert = False
show_disp_top = False
show_disp_front = False
show_axes = False
show_axis_labels = False
show_mini_axis = False
show_boundary = False
# Uncomment the parametrer you would like to see
# No density perturbations or vel/disp pert for alfven modes.
#show_density = True
#show_density_pert = True
show_mag = True
#show_mag_scale = True #must also have show_mag = True
#show_mag_fade = True
#show_mag_vec = True
#show_vel_front = True
#show_vel_front_pert = True
#show_vel_top = True
#show_vel_top_pert = True
#show_disp_top = True
#show_disp_front = True
show_axes = True
#show_axis_labels = True
show_mini_axis = True
show_boundary = True
# Visualisation modules in string form for file-names
vis_modules = [show_density, show_density_pert, show_mag, show_mag_scale,
show_mag_fade, show_mag_vec, show_vel_front, show_vel_front_pert,
show_vel_top, show_vel_top_pert, show_disp_top, show_disp_front]
vis_modules_strings = ['show_density', 'show_density_pert', 'show_mag', 'show_mag_scale',
'show_mag_fade', 'show_mag_vec', 'show_vel_front', 'show_vel_front_pert',
'show_vel_top', 'show_vel_top_pert', 'show_disp_top', 'show_disp_front']
vis_mod_string = ''
for i, j in enumerate(vis_modules):
if vis_modules[i]:
vis_mod_string = vis_mod_string + vis_modules_strings[i][5:] + '_'
# Set to True if you would like the dispersion diagram with chosen mode highlighted.
show_dispersion = False
#show_dispersion = True
# Wanna see the animation? Of course you do
#show_animation = False
show_animation = True
# Basic plot to see which eigensolutions have been found.
show_quick_plot = False
#show_quick_plot = True
# Video resolution
#res = (1920,1080) # There is a problem with this resolution- height must be odd number - Mayavi bug apparently
res = tuple(101 * np.array((16,9)))
#res = tuple(51 * np.array((16,9)))
#res = tuple(21 * np.array((16,9)))
number_of_frames = 1
# Frames per second of output video
fps = 20
#save_images = False
save_images = True
make_video = False
#make_video = True
# Where should I save the animation images/videos?
os.path.abspath(os.curdir)
os.chdir('..')
save_directory = os.path.join(os.path.abspath(os.curdir), '3D_vis_animations')
# Where should I save the dispersion diagrams?
save_dispersion_diagram_directory = os.path.join(os.path.abspath(os.curdir), '3D_vis_dispersion_diagrams')
# ================================
# Visualisation set-up
# ================================
# Variable definitions (for reference):
# x = k*x
# y = k*y
# z = k*z
# W = omega/k
# K = k*x_0
# t = omega*t
# Loop through selected modes
for mode_ind in [0]:#range(8,14): # for all others. REMEMBER SBB pparameters
#for mode_ind in [14,15]: #for fast body surf. REMEMBER SBS parameters
#for mode_ind in [16, 17]:
#for mode_ind in [13]: #for an individual mode
#for mode_ind in range(2,14):
if mode_ind not in range(len(mode_options)):
raise NameError('Mode not in mode_options')
# (note that fast surface modes, i.e. 14 and 15, can only be
# found with SBS parameters in slab_functions...)
mode = mode_options[mode_ind]
# Specify oscillation parameters
if 'slow' in mode and 'surf' in mode or 'alfven' in mode:
K = 2.
elif 'slow' in mode and 'body' in mode:
K = 8.
elif 'fast' in mode and 'body-1' in mode:
K = 8.
elif 'fast' in mode and 'body-2' in mode:
K = 15.
elif 'fast' in mode and 'body-3' in mode:
K = 22.
elif 'fast' in mode and 'surf' in mode:
K = 8.
else:
raise NameError('Mode not found')
# Specify density ratio R1 := rho_1 / rho_0
# R1 = 1.5 # Higher denisty on left than right
# R1 = 1.8
# R1 = 1.9 # Disp_diagram will only work for R1=1.5, 1.8, 2.0
R1 = 2. # Symmetric slab
# Reduce number of variables in dispersion relation
disp_rel_partial = partial(sf.disp_rel_asym, R1=R1)
# find eigenfrequencies W (= omega/k) within the range Wrange for the given parameters.
Wrange1 = np.linspace(0., sf.cT, 11)
Wrange2 = np.linspace(sf.cT, sf.c0, 401)
Wrange3 = np.linspace(sf.c0, sf.c2, 11)
Woptions_slow_surf = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange1, args=None).transpose())
Woptions_slow_body = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange2, args=None).transpose())
Woptions_fast = np.real(tool.point_find(disp_rel_partial, np.array(K), Wrange3, args=None).transpose())
# Remove W values that are very close to characteristic speeds - these are spurious solutions
tol = 1e-2
indices_to_rm = []
for i, w in enumerate(Woptions_slow_surf):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < 0 or w > sf.cT:
indices_to_rm.append(i)
Woptions_slow_surf = np.delete(Woptions_slow_surf, indices_to_rm)
Woptions_slow_surf.sort()
indices_to_rm = []
for i, w in enumerate(Woptions_slow_body):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < sf.cT or w > sf.c0:
indices_to_rm.append(i)
Woptions_slow_body = np.delete(Woptions_slow_body, indices_to_rm)
Woptions_slow_body.sort()
indices_to_rm = []
for i, w in enumerate(Woptions_fast):
spurious_roots_diff = abs(np.array([w, w - sf.c0, w - sf.c1(R1), w - sf.c2, w - sf.vA]))
if min(spurious_roots_diff) < tol or w < sf.c0 or w > min(sf.c1, sf.c2):
indices_to_rm.append(i)
Woptions_fast = np.delete(Woptions_fast, indices_to_rm)
Woptions_fast.sort()
# remove any higher order slow body modes - we only want to do the first 3 saus/kink
if len(Woptions_slow_body) > 6:
Woptions_slow_body = np.delete(Woptions_slow_body, range(len(Woptions_slow_body) - 6))
Woptions = np.concatenate((Woptions_slow_surf, Woptions_slow_body, Woptions_fast))
# set W to be the eigenfrequency for the requested mode
if 'fast-saus-body' in mode or 'fast-kink-surf' in mode:
W = Woptions_fast[-2]
elif 'fast-kink-body' in mode or 'fast-saus-surf' in mode:
W = Woptions_fast[-1]
elif 'slow' in mode and 'surf' in mode:
W = Woptions_slow_surf[mode_ind]
elif 'slow' in mode and 'body' in mode:
W = Woptions_slow_body[mode_ind-2]
if 'alfven' in mode:
W = sf.vA
else:
W = np.real(W)
# Quick plot to see if we are hitting correct mode
if show_quick_plot:
plt.plot([K] * len(Woptions), Woptions, '.')
plt.plot(K+0.5, W, 'go')
plt.xlim([0,23])
plt.show()
# ================================
# Dispersion diagram
# ================================
if show_dispersion:
if 'alfven' in mode:
raise NameError('Disperion plot requested for an alfven mode. Cant do that.')
dispersion_diagram.dispersion_diagram(mode_options, mode,
disp_rel_partial, K, W, R1)
# plt.tight_layout() # seems to make it chop the sides off with this
plt.savefig(os.path.join(save_dispersion_diagram_directory, 'R1_' + str(R1) + '_' + mode + '.png') )
plt.close()
# ================================
# Animation
# ================================
if show_animation:
print('Starting ' + mode)
# set grid parameters
xmin = -2.*K
xmax = 2.*K
ymin = 0.
ymax = 4.
zmin = 0.
zmax = 2*np.pi
# You can change ny but be careful changing nx, nz.
nx = 300#100 #100 #300 gives us reduced bouncing of field lines for the same video size, but there is significant computational cost.
ny = 300#100 #100 #100#20 #100
nz = 300#100 #100
nt = number_of_frames
if nz % nt != 0:
print("nt doesnt divide nz so there may be a problem with chopping in z direction for each time step")
t_start = 0.
t_end = zmax
t = t_start
xvals = np.linspace(xmin, xmax, nx)
yvals = np.linspace(ymin, ymax, ny)
zvals = np.linspace(zmin, zmax, nz, endpoint=False) # A fudge to give the height as exactly one wavelength
x_spacing = max(nx, ny, nz) / nx
y_spacing = max(nx, ny, nz) / ny
z_spacing = max(nx, ny, nz) / nz
# For masking points for plotting vector fields- have to do it manually due to Mayavi bug
mod = int(4 * nx / 100)
mod_y = int(np.ceil(mod / y_spacing))
# Get the data xi=displacement, v=velocity, b=mag field
if show_disp_top or show_disp_front:
xixvals = np.real(np.repeat(sf.xix(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
xizvals = np.real(np.repeat(sf.xiz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
xiyvals = np.real(np.repeat(sf.xiy(mode, xvals, zvals, t, W, K)[:, :, np.newaxis], ny, axis=2))
if show_vel_front or show_vel_top:
vxvals = np.real(np.repeat(sf.vx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vzvals = np.real(np.repeat(sf.vz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vyvals = np.real(np.repeat(sf.vy(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
if show_vel_front_pert or show_vel_top_pert:
vxvals = np.real(np.repeat(sf.vx_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vzvals = np.real(np.repeat(sf.vz_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
vyvals = np.zeros_like(vxvals)
# Axis is defined on the mag field so we have to set up this data
bxvals = np.real(np.repeat(sf.bx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
byvals = np.real(np.repeat(sf.by(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
bz_eq3d = np.repeat(sf.bz_eq(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2)
bzvals = np.real(np.repeat(-sf.bz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2) +
bz_eq3d)
# displacement at the right and left boundaries
if show_boundary:
xix_boundary_r_vals = np.real(np.repeat(K + sf.xix_boundary(mode, zvals, t, W, K, R1, boundary='r')[:, np.newaxis], ny, axis=1))
xix_boundary_l_vals = np.real(np.repeat(-K + sf.xix_boundary(mode, zvals, t, W, K, R1, boundary='l')[:, np.newaxis], ny, axis=1))
if show_density:
rho_vals = np.real(np.repeat(sf.rho(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
if show_density_pert:
rho_vals = np.real(np.repeat(sf.rho_pert(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
if show_disp_top or show_disp_front:
xixvals_t = xixvals
xiyvals_t = xiyvals
xizvals_t = xizvals
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_t = vxvals
vyvals_t = vyvals
vzvals_t = vzvals
if show_boundary:
xix_boundary_r_vals_t = xix_boundary_r_vals
xix_boundary_l_vals_t = xix_boundary_l_vals
if show_density or show_density_pert:
rho_vals_t = rho_vals
# ================================
# Starting figure and visualisation modules
# ================================
zgrid_zy, ygrid_zy = np.mgrid[0:nz:(nz)*1j,
0:ny:(ny)*1j]
fig = mlab.figure(size=res) # (1920, 1080) for 1080p , tuple(101 * np.array((16,9))) #16:9 aspect ratio for video upload
# Spacing of grid so that we can display a visualisation cube without having the same number of grid points in each dimension
spacing = np.array([x_spacing, z_spacing, y_spacing])
if show_density or show_density_pert:
# Scalar field density
rho = mlab.pipeline.scalar_field(rho_vals_t, name="density", figure=fig)
rho.spacing = spacing
mpf.volume_red_blue(rho, rho_vals_t)
#Masking points
if show_mag_vec:
bxvals_mask_front_t, byvals_mask_front_t, bzvals_mask_front_t = mpf.mask_points(bxvals_t, byvals_t, bzvals_t,
'front', mod, mod_y)
if show_disp_top:
xixvals_mask_top_t, xiyvals_mask_top_t, xizvals_mask_top_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'top', mod, mod_y)
if show_disp_front:
xixvals_mask_front_t, xiyvals_mask_front_t, xizvals_mask_front_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'front', mod, mod_y)
if show_vel_top or show_vel_top_pert:
vxvals_mask_top_t, vyvals_mask_top_t, vzvals_mask_top_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'top', mod, mod_y)
if show_vel_front or show_vel_front_pert:
vxvals_mask_front_t, vyvals_mask_front_t, vzvals_mask_front_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'front', mod, mod_y)
xgrid, zgrid, ygrid = np.mgrid[0:nx:(nx)*1j,
0:nz:(nz)*1j,
0:ny:(ny)*1j]
field = mlab.pipeline.vector_field(bxvals_t, bzvals_t, byvals_t, name="B field",
figure=fig, scalars=zgrid)
field.spacing = spacing
if show_axes:
mpf.axes_no_label(field)
if show_mini_axis:
mpf.mini_axes()
if uniform_light:
#uniform lighting, but if we turn shading of volumes off, we are ok without
mpf.uniform_lighting(fig)
#Black background
mpf.background_colour(fig, (0., 0., 0.))
scalefactor = 8. * nx / 100. # scale factor for direction field vectors
# Set up visualisation modules
if show_mag_vec:
bdirfield_front = mlab.pipeline.vector_field(bxvals_mask_front_t, bzvals_mask_front_t,
byvals_mask_front_t, name="B field front",
figure=fig)
bdirfield_front.spacing = spacing
mpf.vector_cut_plane(bdirfield_front, 'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_vel_top or show_vel_top_pert:
vdirfield_top = mlab.pipeline.vector_field(vxvals_mask_top_t, np.zeros_like(vxvals_mask_top_t),
vyvals_mask_top_t, name="V field top",
figure=fig)
vdirfield_top.spacing = spacing
mpf.vector_cut_plane(vdirfield_top, 'top', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_vel_front or show_vel_front_pert:
vdirfield_front = mlab.pipeline.vector_field(vxvals_mask_front_t, vzvals_mask_front_t,
vyvals_mask_front_t, name="V field front",
figure=fig)
vdirfield_front.spacing = spacing
mpf.vector_cut_plane(vdirfield_front,'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_disp_top:
xidirfield_top = mlab.pipeline.vector_field(xixvals_mask_top_t, np.zeros_like(xixvals_mask_top_t),
xiyvals_mask_top_t, name="Xi field top",
figure=fig)
xidirfield_top.spacing = spacing
mpf.vector_cut_plane(xidirfield_top, 'top', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
if show_disp_front:
xidirfield_front = mlab.pipeline.vector_field(xixvals_mask_front_t, xizvals_mask_front_t,
xiyvals_mask_front_t, name="Xi field front",
figure=fig)
xidirfield_front.spacing = spacing
mpf.vector_cut_plane(xidirfield_front, 'front', nx, ny, nz,
y_spacing, scale_factor=scalefactor)
# Loop through time
for t_ind in range(nt):
if t_ind == 0:
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
if show_disp_top or show_disp_front:
xixvals_t = xixvals
xiyvals_t = xiyvals
xizvals_t = xizvals
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_t = vxvals
vyvals_t = vyvals
vzvals_t = vzvals
if show_boundary:
xix_boundary_r_vals_t = xix_boundary_r_vals
xix_boundary_l_vals_t = xix_boundary_l_vals
if show_density or show_density_pert:
rho_vals_t = rho_vals
else:
bxvals = np.real(np.repeat(sf.bx(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2))
byvals = np.real(np.repeat(sf.by(mode, xvals, zvals, t, K)[:, :, np.newaxis], ny, axis=2))
bz_eq3d = np.repeat(sf.bz_eq(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2)
bzvals = np.real(np.repeat(-sf.bz(mode, xvals, zvals, t, W, K, R1)[:, :, np.newaxis], ny, axis=2) +
bz_eq3d)
bxvals_t = bxvals
byvals_t = byvals
bzvals_t = bzvals
# Update mag field data
field.mlab_source.set(u=bxvals_t, v=bzvals_t, w=byvals_t)
# Update mag field visualisation module
if show_mag_vec:
bxvals_mask_front_t, byvals_mask_front_t, bzvals_mask_front_t = mpf.mask_points(bxvals_t, byvals_t, bzvals_t,
'front', mod, mod_y)
bdirfield_front.mlab_source.set(u=bxvals_mask_front_t, v=bzvals_mask_front_t, w=byvals_mask_front_t)
# Update displacement field data
if show_disp_top or show_disp_front:
xixvals_split = np.split(xixvals, [nz - (nz / nt) * t_ind], axis=1)
xiyvals_split = np.split(xiyvals, [nz - (nz / nt) * t_ind], axis=1)
xizvals_split = np.split(xizvals, [nz - (nz / nt) * t_ind], axis=1)
xixvals_t = np.concatenate((xixvals_split[1], xixvals_split[0]), axis=1)
xiyvals_t = np.concatenate((xiyvals_split[1], xiyvals_split[0]), axis=1)
xizvals_t = np.concatenate((xizvals_split[1], xizvals_split[0]), axis=1)
# Update displacement field visualisation module
if show_disp_top:
xixvals_mask_top_t, xiyvals_mask_top_t, xizvals_mask_top_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'top', mod, mod_y)
xidirfield_top.mlab_source.set(u=xixvals_mask_top_t, v=np.zeros_like(xixvals_mask_top_t), w=xiyvals_mask_top_t)
if show_disp_front:
xixvals_mask_front_t, xiyvals_mask_front_t, xizvals_mask_front_t = mpf.mask_points(xixvals_t, xiyvals_t, xizvals_t,
'front', mod, mod_y)
xidirfield_front.mlab_source.set(u=xixvals_mask_front_t, v=xizvals_mask_front_t, w=xiyvals_mask_front_t)
# Update velocity field data
if show_vel_top or show_vel_top_pert or show_vel_front or show_vel_front_pert:
vxvals_split = np.split(vxvals, [nz - (nz / nt) * t_ind], axis=1)
vyvals_split = np.split(vyvals, [nz - (nz / nt) * t_ind], axis=1)
vzvals_split = np.split(vzvals, [nz - (nz / nt) * t_ind], axis=1)
vxvals_t = np.concatenate((vxvals_split[1], vxvals_split[0]), axis=1)
vyvals_t = np.concatenate((vyvals_split[1], vyvals_split[0]), axis=1)
vzvals_t = np.concatenate((vzvals_split[1], vzvals_split[0]), axis=1)
# Update velocity field visualisation module
if show_vel_top or show_vel_top_pert:
vxvals_mask_top_t, vyvals_mask_top_t, vzvals_mask_top_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'top', mod, mod_y)
vdirfield_top.mlab_source.set(u=vxvals_mask_top_t, v=np.zeros_like(vxvals_mask_top_t), w=vyvals_mask_top_t)
if show_vel_front or show_vel_front_pert:
vxvals_mask_front_t, vyvals_mask_front_t, vzvals_mask_front_t = mpf.mask_points(vxvals_t, vyvals_t, vzvals_t,
'front', mod, mod_y)
vdirfield_front.mlab_source.set(u=vxvals_mask_front_t, v=vzvals_mask_front_t, w=vyvals_mask_front_t)
# Update boundary displacement data
if show_boundary:
xix_boundary_r_vals_split = np.split(xix_boundary_r_vals, [nz - (nz / nt) * t_ind], axis=0)
xix_boundary_l_vals_split = np.split(xix_boundary_l_vals, [nz - (nz / nt) * t_ind], axis=0)
xix_boundary_r_vals_t = np.concatenate((xix_boundary_r_vals_split[1], xix_boundary_r_vals_split[0]), axis=0)
xix_boundary_l_vals_t = np.concatenate((xix_boundary_l_vals_split[1], xix_boundary_l_vals_split[0]), axis=0)
# Update density data
if show_density or show_density_pert:
rho_vals_split = np.split(rho_vals, [nz - (nz / nt) * t_ind], axis=1)
rho_vals_t = np.concatenate((rho_vals_split[1], rho_vals_split[0]), axis=1)
rho.mlab_source.set(scalars=rho_vals_t)
# Boundary data - Letting mayavi know where to plot the boundary
if show_boundary:
ext_min_r = ((nx) * (xix_boundary_r_vals_t.min() - xmin) / (xmax - xmin)) * x_spacing
ext_max_r = ((nx) * (xix_boundary_r_vals_t.max() - xmin) / (xmax - xmin)) * x_spacing
ext_min_l = ((nx) * (xix_boundary_l_vals_t.min() - xmin) / (xmax - xmin)) * x_spacing
ext_max_l = ((nx) * (xix_boundary_l_vals_t.max() - xmin) / (xmax - xmin)) * x_spacing
#Make field lines
if show_mag:
# move seed points up with phase speed. - Bit of a fudge.
# Create an array of points for which we want mag field seeds
nx_seed = 9
ny_seed = 13
start_x = 30. * nx / 100.
end_x = nx+1 - start_x
start_y = 1.
if ny == 20: # so that the lines dont go right up to the edge of the box
end_y = ny - 1.
elif ny == 100:
end_y = ny - 2.
elif ny == 300:
end_y = ny - 6.
else:
end_y = ny - 1
seeds=[]
dx_res = (end_x - start_x) / (nx_seed-1)
dy_res = (end_y - start_y) / (ny_seed-1)
for j in range(ny_seed):
for i in range(nx_seed):
x = start_x + (i * dx_res) * x_spacing
y = start_y + (j * dy_res) * y_spacing
z = 1. + (t_start + t_ind*(t_end - t_start)/nt)/zmax * nz
seeds.append((x,z,y))
if 'alfven' in mode:
for i in range(nx_seed):
del seeds[0]
del seeds[-1]
# Remove previous field lines - field lines cannot be updated, just the data that they are built from
if t_ind != 0:
field_lines.remove() # field_lines is defined in first go through loop
field_lines = SeedStreamline(seed_points=seeds)
# Field line visualisation tinkering
field_lines.stream_tracer.integration_direction='both'
field_lines.streamline_type = 'tube'
field_lines.stream_tracer.maximum_propagation = nz * 2
field_lines.tube_filter.number_of_sides = 20
field_lines.tube_filter.radius = 0.7 * max(nx, ny, nz) / 100.
field_lines.tube_filter.capping = True
field_lines.actor.property.opacity = 1.0
field.add_child(field_lines)
module_manager = field_lines.parent
# Colormap of magnetic field strength plotted on the field lines
if show_mag_scale:
module_manager.scalar_lut_manager.lut_mode = 'coolwarm'
module_manager.scalar_lut_manager.data_range=[7,18]
else:
mag_lut = module_manager.scalar_lut_manager.lut.table.to_array()
mag_lut[:,0] = [220]*256
mag_lut[:,1] = [20]*256
mag_lut[:,2] = [20]*256
module_manager.scalar_lut_manager.lut.table = mag_lut
if show_mag_fade:
mpf.colormap_fade(module_manager, fade_value=20)
# Which views do you want to show? Options are defined at the start
views_selected = [0]#[0,1,4,5,6] #range(7) #[2,3]
for view_ind, view_selected in enumerate(views_selected):
view = view_options[view_selected]
# Display boundary - cannot be updated each time
if show_boundary:
# Boundaries should look different depending on view
if view == 'front-parallel':
#remove previous boundaries
if t != 0 or view_ind != 0:
boundary_r.remove()
boundary_l.remove()
# Make a fading colormap by changing opacity at ends
lut = np.reshape(np.array([150, 150, 150, 255]*256), (256,4))
fade_value = 125
lut[:fade_value,-1] = np.linspace(0, 255, fade_value)
lut[-fade_value:,-1] = np.linspace(255, 0, fade_value)
# Set up boundary visualisation
boundary_r = mlab.mesh(xix_boundary_r_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_r, ext_max_r, 1, nz, 0, (ny-1) * y_spacing],
opacity=1., representation='wireframe',
line_width=12., scalars=zgrid_zy)
boundary_l = mlab.mesh(xix_boundary_l_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_l, ext_max_l, 1, nz, 0, (ny-1) * y_spacing],
opacity=1., representation='wireframe',
line_width=12., scalars=zgrid_zy)
# Boundary color and other options
boundary_r.module_manager.scalar_lut_manager.lut.table = lut
boundary_l.module_manager.scalar_lut_manager.lut.table = lut
boundary_r.actor.property.lighting = False
boundary_r.actor.property.shading = False
boundary_l.actor.property.lighting = False
boundary_l.actor.property.shading = False
else:
#remove previous boundaries
if t != 0 or view_ind != 0:
boundary_r.remove()
boundary_l.remove()
# Make a fading colormap by changing opacity at ends
lut = np.reshape(np.array([150, 150, 150, 255]*256), (256,4))
fade_value = 20
lut[:fade_value,-1] = np.linspace(0, 255, fade_value)
lut[-fade_value:,-1] = np.linspace(255, 0, fade_value)
# Set up boundary visualisation
boundary_r = mlab.mesh(xix_boundary_r_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_r, ext_max_r, 1, nz, 0, (ny-1) * y_spacing],
opacity=0.7, scalars=zgrid_zy)
boundary_l = mlab.mesh(xix_boundary_l_vals_t, zgrid_zy, ygrid_zy,
extent=[ext_min_l, ext_max_l, 1, nz, 0, (ny-1) * y_spacing],
opacity=0.7, scalars=zgrid_zy)
# Boundary color and other options
boundary_r.module_manager.scalar_lut_manager.lut.table = lut
boundary_l.module_manager.scalar_lut_manager.lut.table = lut
boundary_r.actor.property.lighting = False
boundary_r.actor.property.shading = False
boundary_l.actor.property.lighting = False
boundary_l.actor.property.shading = False
# Set viewing angle - For some unknown reason we must redefine the camera position each time.
# This is something to do with the boundaries being replaced each time.
mpf.view_position(fig, view, nx, ny, nz)
if save_images:
prefix = 'R1_'+str(R1) + '_' + mode + '_' + vis_mod_string + view + '_'# + '_norho_'
mlab.savefig(os.path.join(save_directory, prefix + str(t_ind+1) + '.png'))
if t_ind == nt - 1:
if make_video:
i2v.image2video(filepath=save_directory, prefix=prefix,
output_name=prefix+'video', out_extension='mp4',
fps=fps, n_loops=4, delete_images=True,
delete_old_videos=True, res=res[1])
# Log: to keep us updated with progress
if t_ind % 5 == 4:
print('Finished frame number ' + str(t_ind + 1) + ' out of ' + str(number_of_frames))
#Release some memory after each time step
gc.collect()
#step t forward
t = t + (t_end - t_start) / nt
# Close Mayavi window each time if we cant to make a video
if make_video:
mlab.close(fig)
print('Finished ' + mode) | mit |
iShoto/testpy | codes/20200104_metric_learning_mnist/src/train_mnist_original_center.py | 1 | 5545 | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.optim.lr_scheduler as lr_scheduler
from torch.autograd.function import Function
import torchvision
import os
import matplotlib.pyplot as plt
import argparse
from tqdm import trange
import numpy as np
from sklearn.metrics import classification_report
from losses import CenterLoss
from mnist_net import Net
import mnist_loader
# cf. https://cpp-learning.com/center-loss/
def main():
args = parse_args()
# Dataset
train_loader, test_loader, classes = mnist_loader.load_dataset(args.dataset_dir, img_show=True)
# Device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model
model = Net().to(device)
print(model)
# Loss
nllloss = nn.NLLLoss().to(device) # CrossEntropyLoss = log_softmax + NLLLoss
loss_weight = 1
centerloss = CenterLoss(10, 2).to(device)
# Optimizer
dnn_optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=0.0005)
sheduler = lr_scheduler.StepLR(dnn_optimizer, 20, gamma=0.8)
center_optimizer = optim.SGD(centerloss.parameters(), lr =0.5)
print('Start training...')
for epoch in range(100):
# Update parameters.
epoch += 1
sheduler.step()
# Train and test a model.
train_acc, train_loss, feat, labels = train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer)
test_acc, test_loss = test(device, test_loader, model, nllloss, loss_weight, centerloss)
stdout_temp = 'Epoch: {:>3}, train acc: {:<8}, train loss: {:<8}, test acc: {:<8}, test loss: {:<8}'
print(stdout_temp.format(epoch, train_acc, train_loss, test_acc, test_loss))
# Visualize features of each class.
vis_img_path = args.vis_img_path_temp.format(str(epoch).zfill(3))
visualize(feat.data.cpu().numpy(), labels.data.cpu().numpy(), epoch, vis_img_path)
# Save a trained model.
model_path = args.model_path_temp.format(str(epoch).zfill(3))
torch.save(model.state_dict(), model_path)
def train(device, train_loader, model, nllloss, loss_weight, centerloss, dnn_optimizer, center_optimizer):
running_loss = 0.0
pred_list = []
label_list = []
ip1_loader = []
idx_loader = []
model.train()
for i,(imgs, labels) in enumerate(train_loader):
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Initilize gradient.
dnn_optimizer.zero_grad()
center_optimizer.zero_grad()
# Calculate gradient.
loss.backward()
# Update parameters.
dnn_optimizer.step()
center_optimizer.step()
# For calculation.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# For visualization.
ip1_loader.append(ip1)
idx_loader.append((labels))
# Calculate training accurary and loss.
result = classification_report(pred_list, label_list, output_dict=True)
train_acc = round(result['weighted avg']['f1-score'], 6)
train_loss = round(running_loss / len(train_loader.dataset), 6)
# Concatinate features and labels.
feat = torch.cat(ip1_loader, 0)
labels = torch.cat(idx_loader, 0)
return train_acc, train_loss, feat, labels
def test(device, test_loader, model, nllloss, loss_weight, centerloss):
model = model.eval()
# Prediciton
running_loss = 0.0
pred_list = []
label_list = []
for i,(imgs, labels) in enumerate(test_loader):
with torch.no_grad():
# Set batch data.
imgs, labels = imgs.to(device), labels.to(device)
# Predict labels.
ip1, pred = model(imgs)
# Calculate loss.
loss = nllloss(pred, labels) + loss_weight * centerloss(labels, ip1)
# Append predictions and labels.
running_loss += loss.item()
pred_list += [int(p.argmax()) for p in pred]
label_list += [int(l) for l in labels]
# Calculate accuracy.
result = classification_report(pred_list, label_list, output_dict=True)
test_acc = round(result['weighted avg']['f1-score'], 6)
test_loss = round(running_loss / len(test_loader.dataset), 6)
return test_acc, test_loss
def visualize(feat, labels, epoch, vis_img_path):
colors = ['#ff0000', '#ffff00', '#00ff00', '#00ffff', '#0000ff',
'#ff00ff', '#990000', '#999900', '#009900', '#009999']
plt.figure()
for i in range(10):
plt.plot(feat[labels==i, 0], feat[labels==i, 1], '.', color=colors[i])
plt.legend(['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'], loc='best')
plt.xlim(left=-8, right=8)
plt.ylim(bottom=-8, top=8)
plt.text(-7.8, 7.3, "epoch=%d" % epoch)
plt.savefig(vis_img_path)
plt.clf()
def parse_args():
arg_parser = argparse.ArgumentParser(description="parser for focus one")
arg_parser.add_argument("--dataset_dir", type=str, default='../inputs/')
arg_parser.add_argument("--model_dir", type=str, default='../outputs/models/checkpoints/')
arg_parser.add_argument("--model_path_temp", type=str, default='../outputs/models/checkpoints/mnist_original_softmax_center_epoch_{}.pth')
arg_parser.add_argument("--vis_img_dir", type=str, default='../outputs/visual/')
arg_parser.add_argument("--vis_img_path_temp", type=str, default='../outputs/visual/epoch_{}.png')
args = arg_parser.parse_args()
os.makedirs(args.dataset_dir, exist_ok=True)
os.makedirs(args.model_dir, exist_ok=True)
os.makedirs(args.vis_img_dir, exist_ok=True)
return args
if __name__ == "__main__":
main()
| mit |
reinaH/osf.io | scripts/analytics/email_invites.py | 55 | 1332 | # -*- coding: utf-8 -*-
import os
import matplotlib.pyplot as plt
from framework.mongo import database
from website import settings
from utils import plot_dates, mkdirp
user_collection = database['user']
FIG_PATH = os.path.join(settings.ANALYTICS_PATH, 'figs', 'features')
mkdirp(FIG_PATH)
def analyze_email_invites():
invited = user_collection.find({'unclaimed_records': {'$ne': {}}})
dates_invited = [
user['date_registered']
for user in invited
]
if not dates_invited:
return
fig = plot_dates(dates_invited)
plt.title('email invitations ({}) total)'.format(len(dates_invited)))
plt.savefig(os.path.join(FIG_PATH, 'email-invites.png'))
plt.close()
def analyze_email_confirmations():
confirmed = user_collection.find({
'unclaimed_records': {'$ne': {}},
'is_claimed': True,
})
dates_confirmed = [
user['date_confirmed']
for user in confirmed
]
if not dates_confirmed:
return
fig = plot_dates(dates_confirmed)
plt.title('confirmed email invitations ({}) total)'.format(len(dates_confirmed)))
plt.savefig(os.path.join(FIG_PATH, 'email-invite-confirmations.png'))
plt.close()
def main():
analyze_email_invites()
analyze_email_confirmations()
if __name__ == '__main__':
main()
| apache-2.0 |
eramirem/astroML | book_figures/chapter9/fig_photoz_tree.py | 3 | 3637 | """
Photometric Redshifts by Decision Trees
---------------------------------------
Figure 9.14
Photometric redshift estimation using decision-tree regression. The data is
described in Section 1.5.5. The training set consists of u, g , r, i, z
magnitudes of 60,000 galaxies from the SDSS spectroscopic sample.
Cross-validation is performed on an additional 6000 galaxies. The left panel
shows training error and cross-validation error as a function of the maximum
depth of the tree. For a number of nodes N > 13, overfitting is evident.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from astroML.datasets import fetch_sdss_specgals
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch data and prepare it for the computation
data = fetch_sdss_specgals()
# put magnitudes in a matrix
mag = np.vstack([data['modelMag_%s' % f] for f in 'ugriz']).T
z = data['z']
# train on ~60,000 points
mag_train = mag[::10]
z_train = z[::10]
# test on ~6,000 separate points
mag_test = mag[1::100]
z_test = z[1::100]
#------------------------------------------------------------
# Compute the cross-validation scores for several tree depths
depth = np.arange(1, 21)
rms_test = np.zeros(len(depth))
rms_train = np.zeros(len(depth))
i_best = 0
z_fit_best = None
for i, d in enumerate(depth):
clf = DecisionTreeRegressor(max_depth=d, random_state=0)
clf.fit(mag_train, z_train)
z_fit_train = clf.predict(mag_train)
z_fit = clf.predict(mag_test)
rms_train[i] = np.mean(np.sqrt((z_fit_train - z_train) ** 2))
rms_test[i] = np.mean(np.sqrt((z_fit - z_test) ** 2))
if rms_test[i] <= rms_test[i_best]:
i_best = i
z_fit_best = z_fit
best_depth = depth[i_best]
#------------------------------------------------------------
# Plot the results
fig = plt.figure(figsize=(5, 2.5))
fig.subplots_adjust(wspace=0.25,
left=0.1, right=0.95,
bottom=0.15, top=0.9)
# first panel: cross-validation
ax = fig.add_subplot(121)
ax.plot(depth, rms_test, '-k', label='cross-validation')
ax.plot(depth, rms_train, '--k', label='training set')
ax.set_xlabel('depth of tree')
ax.set_ylabel('rms error')
ax.yaxis.set_major_locator(plt.MultipleLocator(0.01))
ax.set_xlim(0, 21)
ax.set_ylim(0.009, 0.04)
ax.legend(loc=1)
# second panel: best-fit results
ax = fig.add_subplot(122)
ax.scatter(z_test, z_fit_best, s=1, lw=0, c='k')
ax.plot([-0.1, 0.4], [-0.1, 0.4], ':k')
ax.text(0.04, 0.96, "depth = %i\nrms = %.3f" % (best_depth, rms_test[i_best]),
ha='left', va='top', transform=ax.transAxes)
ax.set_xlabel(r'$z_{\rm true}$')
ax.set_ylabel(r'$z_{\rm fit}$')
ax.set_xlim(-0.02, 0.4001)
ax.set_ylim(-0.02, 0.4001)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.1))
ax.yaxis.set_major_locator(plt.MultipleLocator(0.1))
plt.show()
| bsd-2-clause |
vitale232/ves | ves/VESinverse_vectorized.py | 1 | 12839 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 28 16:32:48 2016
@author: jclark
this code uses the Ghosh method to determine the apparent resistivities
for a layered earth model. Either schlumberger or Wenner configurations
can be used
"""
import numpy as np
import random
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
import sys
# Schlumberger filter
fltr1 = [0., .00046256, -.0010907, .0017122, -.0020687,
.0043048, -.0021236, .015995, .017065, .098105, .21918, .64722,
1.1415, .47819, -3.515, 2.7743, -1.201, .4544, -.19427, .097364,
-.054099, .031729, -.019109, .011656, -.0071544, .0044042,
-.002715, .0016749, -.0010335, .00040124]
#Wenner Filter
fltr2 = [0., .000238935, .00011557, .00017034, .00024935,
.00036665, .00053753, .0007896, .0011584, .0017008, .0024959,
.003664, .0053773, .007893, .011583, .016998, .024934, .036558,
.053507, .078121, .11319, .16192, .22363, .28821, .30276, .15523,
-.32026, -.53557, .51787, -.196, .054394, -.015747, .0053941,
-.0021446, .000665125]
print(len(fltr1))
print(len(fltr2))
#I know there must be a better method to assign lists. And probably numpy
#arrays would be best. But my Python wasn't up to it. If the last letter
#is an 'l' that means it is a log10 of the value
# 65 is completely arbitrary
p = [0] * 20 # earth layer parameters?
r = [0] * 65 # apparent resistivty?
rl = [0] * 65 # np.log(r) ?
t = [0] * 50 #
b = [0] * 65 #
asav = [0] * 65 # voltage spacing in meters?
asavl = [0] * 65 # np.log(asav)
adatl = [0] * 65 # interpolated voltage spacing ( np.log(10) / 6 )?
rdatl = [0] * 65 # np.log()
# adat = [0] * 65 # voltage spacing input
# rdat = [0] * 65 # apparent res input
pkeep = [0] * 65 # earth parameters after applying equations?
rkeep = [0] * 65 # r after applying equations?
rkeepl = [0] * 65 # np.log()!
pltanswer = [0] * 65
pltanswerl = [0] * 65
pltanswerkeep = [0] * 65
pltanswerkeepl = [0] * 65
rl = [0] * 65
small = [0] * 65
xlarge = [0] * 65
x=[0] * 100
y = [0] * 100
y2 = [0] * 100
u = [0] * 5000
new_x = [0] * 1000
new_y = [0] * 1000
ndat = 13
#hard coded data input - spacing and apparent resistivities measured
#in teh field
adat = [0., 0.55, 0.95, 1.5, 2.5, 3., 4.5, 5.5, 9., 12., 20., 30., 70.]
rdat = [0., 125., 110., 95., 40., 24., 15., 10.5, 8., 6., 6.5, 11., 25.]
one30 = 1.e30 # What's the purpose of this and should it be user input?
rms = one30 # Just a starting value for rmserror?
errmin = 1.e10 # Should this be user input?
# INPUT
array_spacing = 'wenner' # 1 is for shchlumberger and 2 is for Wenner
nLayers = 3 #number of layers
n = 2 * nLayers - 1 # What does n represent? number of parameters
spac = 0.2 # smallest electrode spacing - should this come from the input file?
m = 20 # number of points where resistivity is calculated
spac = np.log(spac)
delx = np.log(10.0) / 6. # I take it this is the sample interval on the log scale?
# this is where the range in parameters should be input from a GUI
# I'm hard coding this in for now
#enter thickenss range for each layer and then resistivity range.
#for 3 layers small[1] and small[2] are low end of thickness range
# small[3], small[4] and small[5] are the low end of resistivities
# I think I have it coded up that these are getting grabbed from the rectangles currently.
# Is that the best way to go?
small[1] = 1.
small[2] = 10.
small[3] = 20.
small[4] = 2.
small[5] = 500.
xlarge[1] = 5
xlarge[2] = 75.
xlarge[3] = 200.
xlarge[4] = 100
xlarge[5] = 3000.
iter_ = 10000 #number of iterations for the Monte Carlo guesses. to be input on GUI
# Is 10000 the most reasonable default, or should I play with it?
def readData(adat, rdat, ndat, return_indexed=False):
#normally this is where the data would be read from the csv file
# but now I'm just hard coding it in as global lists
for i in range(1, ndat):
adatl[i] = np.log10(adat[i])
rdatl[i] = np.log10(rdat[i])
if return_indexed:
return adatl[:ndat], rdatl[:ndat]
else:
return adatl, rdatl
<<<<<<< HEAD
=======
def error(): # simple rms error calc
sumerror = 0.
#pltanswer = [0]*64
spline(m, one30, one30, asavl, rl, y2) # So this calculates the predicted fit?
# and essentially operates on the list in place?
for i in range(1, ndat): # So you always skip the value 0? due to -inf returns?
ans = splint(m, adatl[i], asavl, rl, y2) # Then this calulates error?
sumerror = sumerror + (rdatl[i] - ans) * (rdatl[i] - ans)
#print(i,sum1,rdat[i],rdatl[i],ans)
pltanswerl[i] = ans
pltanswer[i] = np.power(10, ans)
rms = np.sqrt(sumerror / (ndat - 1))
# check the spline routine
# for i in range(1,m+1,1):
# anstest = splint(m, asavl[i],asavl,rl,y2)
# print( asavl[i], rl[i], anstest)
#print(' rms = ', rms)
# if you erally want to get a good idea of all perdictions from Montecarlo
# perform the following plot (caution - change iter to a smaller number)
#plt.loglog(adat[1:ndat],pltanswer[1:ndat])
return rms
>>>>>>> 60497dd... ?s
def transf(y, i):
# these lines apparently find the computer precision ep
ep = 1.0
ep = ep / 2.0
fctr = ep + 1.
while fctr > 1.:
ep = ep / 2.0
fctr = ep + 1.
u = 1. / np.exp(y) # y = spac - 19. * delx - 0.13069
t[1] = p[n]
for j in range(2, nLayers + 1, 1):
pwr = -2. * u * p[nLayers + 1 - j]
if pwr < np.log(2. * ep):
pwr = np.log(2. * ep)
a = np.exp(pwr)
b = (1. - a) / (1. + a)
rs = p[n + 1 - j]
tpr = b * rs
t[j] = (tpr + t[j - 1]) / (1. + tpr * t[j - 1] / (rs * rs))
r[i] = t[nLayers]
return
def filters(b, k):
for i in range(1, m + 1):
re = 0.
for j in range(1, k + 1):
re = re + b[j] * r[i + k - j] # include ranges of thickness, res . push button for rmse error, observed data
# surf thicknes .2 - 100
# res 2-3000 # could use huge ranges at cost of time
r[i] = re
return
def rmsfit():
if array_spacing.lower() == 'wenner':
y = spac - 19. * delx - 0.13069
mum1 = m + 28
for i in range(1, mum1 + 1):
transf(y, i)
y = y + delx
filters(fltr1, 29)
elif array_spacing.lower() == 'schlumberger':
s = np.log(2.)
y = spac - 10.8792495 * delx
mum2 = m + 33
for i in range(1, mum2 + 1):
transf(y, i)
a = r[i]
y1 = y + s
transf(y1, i)
r[i] = 2. * a - r[i]
y = y + delx
filters(fltr2, 34)
else:
print("\nType of survey not indicated.")
raise SystemExit('Exiting.\n\n Take better care next time.')
x = spac
#print("A-Spacing App. Resistivity")
for i in range(1, m + 1):
a = np.exp(x)
asav[i] = a
asavl[i] = np.log10(a)
rl[i] = np.log10(r[i])
x = x + delx
#print("%7.2f %9.3f " % ( asav[i], r[i]))
rms = error()
return rms
def error(): # simple rms error calc
sumerror = 0.
#pltanswer = [0]*64
spline(m, one30, one30, asavl, rl, y2) # So this calculates the predicted fit?
# and essentially operates on the list in place?
for i in range(1, ndat): # So you always skip the value 0? due to -inf returns?
ans = splint(m, adatl[i], asavl, rl, y2) # Then this calulates error?
sumerror = sumerror + (rdatl[i] - ans) * (rdatl[i] - ans)
#print(i,sum1,rdat[i],rdatl[i],ans)
pltanswerl[i] = ans
pltanswer[i] = np.power(10, ans)
rms = np.sqrt(sumerror / (ndat - 1))
# check the spline routine
# for i in range(1,m+1,1):
# anstest = splint(m, asavl[i],asavl,rl,y2)
# print( asavl[i], rl[i], anstest)
#print(' rms = ', rms)
# if you erally want to get a good idea of all perdictions from Montecarlo
# perform the following plot (caution - change iter to a smaller number)
#plt.loglog(adat[1:ndat],pltanswer[1:ndat])
return rms
# my code to do a spline fit to predicted data at the nice spacing of Ghosh
# use splint to determine the spline interpolated prediction at the
# spacing where the measured resistivity was taken - to compare observation
# to prediction
def spline(n, yp1, ypn, x=[] ,y=[] ,y2=[]):
"""Still struggling to understand the general operation of this function."""
u = [0] * 1000
one29 = 0.99e30
#print(x,y)
if yp1 > one29:
y2[0] = 0.
u[0] = 0.
else:
y2[0] = -0.5
u[0] = (3. / (x[1] - x[0])) * ((y[1] - y[0]) / (x[1] - x[0]) - yp1)
for i in range(1, n):
#print(i,x[i])
sig = (x[i] - x[i-1]) / (x[i+1] - x[i-1])
p=sig * y2[i - 1] + 2.
y2[i] = (sig-1.) / p
u[i] = (((6. * ((y[i+1] - y[i]) / (x[i+1] - x[i]) - (y[i] - y[i-1]) /
x[i] - x[i-1])) / (x[i + 1] - x[i - 1]) - sig * u[i - 1]) / p)
if ypn > one29:
qn = 0.
un = 0.
else:
qn = 0.5
un = (3. / (x[n] - x[n - 1])) * (ypn - (y[n] - y[n - 1]) / (x[n] - x[n - 1]))
y2[n] = (un - qn * u[n - 1]) / (qn * y2[n - 1] + 1.)
for k in range(n-1, -1, -1):
y2[k] = y2[k] * y2[k + 1] + u[k]
return
def splint(n, x ,xa=[], ya=[], y2a=[]): # Is this function the T function?
"""Still struggling to understand the general operation of this function."""
klo = 0
khi = n
while khi - klo > 1:
k = int((khi + klo) // 2)
if xa[k] > x:
khi = k
else:
klo = k
h = xa[khi] - xa[klo]
if abs(h) < 1e-20:
print(" bad xa input")
#print(x,xa[khi],xa[klo])
a = (xa[khi] - x) / h
b = (x - xa[klo]) / h
y = (a * ya[klo] + b * ya[khi] + ((a * a * a - a) * y2a[klo] +
(b * b * b - b) * y2a[khi]) * (h * h) /6.)
#print("x= ", x,"y= ", y, " ya= ", ya[khi]," y2a= ", y2a[khi], " h= ",h)
return y
#main here
if __name__ == '__main__':
adatl, rdatl = readData(adat, rdat, ndat, return_indexed=False)
print(adat[1:ndat],rdat[1:ndat])
print('log stufffff')
print(adatl[1:ndat], rdatl[1:ndat]) # is this to skip 0?
#enter thickenss range for each layer and then resistivity range.
#for 3 layers small[1] and small[2] are low end of thickness range
# small[3], small[4] and small[5] are the low end of resistivities
for iloop in range(1, int(iter_/2) + 1):
#print( ' iloop is ', iloop)
for i in range(1, n + 1): # number of parameters + 1
randNumber = random.random() # IS this just to add noise to the model?
# #print(randNumber, ' random')
# print(xlarge)
# print(small)
# s = input('')
# print('xlarge[i]: {}, small[i]: {}'.format(xlarge[i], small[i]))
p[i] = (xlarge[i] - small[i]) * randNumber + small[i]
# print(p)
print('\n')
print(p)
# s = input('')
rms = rmsfit()
if rms < errmin:
print('rms ', rms, ' errmin ', errmin)
for i in range(1, n + 1):
pkeep[i] = p[i]
for i in range(1, m + 1):
rkeep[i] = r[i]
rkeepl[i] = rl[i]
for i in range(1, ndat + 1):
pltanswerkeepl[i] = pltanswerl[i]
pltanswerkeep[i] = pltanswer[i]
errmin = rms
#output the best fitting earth model
print(' Layer ', ' Thickness ', ' Res_ohm-m ')
for i in range(1,nLayers,1):
print(i, pkeep[i], pkeep[nLayers+i-1])
print( nLayers, ' Infinite ', pkeep[n])
for i in range(1,m+1, 1):
asavl[i] = np.log10(asav[i])
#output the error of fit
print( ' RMS error ', errmin)
print( ' Spacing', ' Res_pred ', ' Log10_spacing ', ' Log10_Res_pred ')
for i in range(1,m+1,1):
#print(asav[i], rkeep[i], asavl[i], rkeepl[i])
print("%7.2f %9.3f %9.3f %9.3f" % ( asav[i], rkeep[i],
asavl[i], rkeepl[i]))
print('plot a lot')
plt.loglog(asav[1:m],rkeep[1:m],'-') # resistivity prediction curve
plt.loglog(adat[1:ndat],pltanswerkeep[1:ndat], 'ro') # predicted data red dots
s=7
plt.loglog(adat[1:ndat],rdat[1:ndat],'bo',markersize=s) #original data blue dots
plt.show()
plt.grid(True)
sys.exit(0)
| lgpl-3.0 |
PTDreamer/dRonin | python/ins/cins.py | 11 | 3838 |
from sympy import symbols, lambdify, sqrt
from sympy import MatrixSymbol, Matrix
from numpy import cos, sin, power
from sympy.matrices import *
from quaternions import *
import numpy
import ins
# this is the set of (currently) recommend INS settings. modified from
# https://raw.githubusercontent.com/wiki/TauLabs/TauLabs/files/htfpv-sparky-nav_20130527.uav
default_mag_var = numpy.array([10.0, 10.0, 100.0])
default_gyro_var = numpy.array([1e-5, 1e-5, 1e-4])
default_accel_var = numpy.array([0.01, 0.01, 0.01])
default_baro_var = 0.1
default_gps_var=numpy.array([1e-3,1e-2,10])
class CINS:
GRAV = 9.805
def __init__(self):
""" Creates the CINS class.
Important variables are
* X - the vector of state variables
* Xd - the vector of state derivatives for state and inputs
* Y - the vector of outputs for current state value
"""
self.state = []
def configure(self, mag_var=None, gyro_var=None, accel_var=None, baro_var=None, gps_var=None):
""" configure the INS parameters """
if mag_var is not None:
ins.configure(mag_var=mag_var)
if gyro_var is not None:
ins.configure(gyro_var=gyro_var)
if accel_var is not None:
ins.configure(accel_var=accel_var)
if baro_var is not None:
ins.configure(baro_var=baro_var)
if gps_var is not None:
ins.configure(gps_var=gps_var)
def prepare(self):
""" prepare the C INS wrapper
"""
self.state = ins.init()
self.configure(
mag_var=default_mag_var,
gyro_var=default_gyro_var,
accel_var=default_accel_var,
baro_var=default_baro_var,
gps_var=default_gps_var
)
def predict(self, gyros, accels, dT = 1.0/666.0):
""" Perform the prediction step
"""
self.state = ins.prediction(gyros, accels, dT)
def correction(self, pos=None, vel=None, mag=None, baro=None):
""" Perform the INS correction based on the provided corrections
"""
sensors = 0
Z = numpy.zeros((10,),numpy.float64)
# the masks must match the values in insgps.h
if pos is not None:
sensors = sensors | 0x0003
Z[0] = pos[0]
Z[1] = pos[1]
if vel is not None:
sensors = sensors | 0x0038
Z[3] = vel[0]
Z[4] = vel[1]
Z[5] = vel[2]
if mag is not None:
sensors = sensors | 0x01C0
Z[6] = mag[0]
Z[7] = mag[1]
Z[8] = mag[2]
if baro is not None:
sensors = sensors | 0x0200
Z[9] = baro
self.state = ins.correction(Z, sensors)
def test():
""" test the INS with simulated data
"""
from numpy import cos, sin
import matplotlib.pyplot as plt
fig, ax = plt.subplots(2,2)
sim = PyINS()
sim.prepare()
dT = 1.0 / 666.0
STEPS = 100000
history = numpy.zeros((STEPS,16))
history_rpy = numpy.zeros((STEPS,3))
times = numpy.zeros((STEPS,1))
for k in range(STEPS):
ROLL = 0.1
YAW = 0.2
sim.predict(U=[0,0,YAW, 0, PyINS.GRAV*sin(ROLL), -PyINS.GRAV*cos(ROLL) - 0.0], dT=dT)
history[k,:] = sim.state
history_rpy[k,:] = quat_rpy(sim.state[6:10])
times[k] = k * dT
angle = 0*numpy.pi/3 + YAW * dT * k # radians
height = 1.0 * k * dT
if True and k % 60 == 59:
sim.correction(pos=[[10],[5],[-height]])
if True and k % 60 == 59:
sim.correction(vel=[[0],[0],[-1]])
if k % 20 == 8:
sim.correction(baro=[height])
if True and k % 20 == 15:
sim.correction(mag=[[400 * cos(angle)], [-400 * sin(angle)], [1600]])
if k % 1000 == 0:
ax[0][0].cla()
ax[0][0].plot(times[0:k:4],history[0:k:4,0:3])
ax[0][0].set_title('Position')
ax[0][1].cla()
ax[0][1].plot(times[0:k:4],history[0:k:4,3:6])
ax[0][1].set_title('Velocity')
plt.sca(ax[0][1])
plt.ylim(-2,2)
ax[1][0].cla()
ax[1][0].plot(times[0:k:4],history_rpy[0:k:4,:])
ax[1][0].set_title('Attitude')
ax[1][1].cla()
ax[1][1].plot(times[0:k:4],history[0:k:4,10:])
ax[1][1].set_title('Biases')
plt.draw()
fig.show()
plt.show()
if __name__ =='__main__':
test() | gpl-3.0 |
q1ang/scikit-learn | examples/decomposition/plot_pca_vs_fa_model_selection.py | 142 | 4467 | """
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances the low rank models are more likely than shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
print(__doc__)
# Authors: Alexandre Gramfort
# Denis A. Engemann
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.covariance import ShrunkCovariance, LedoitWolf
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
###############################################################################
# Create the data
n_samples, n_features, rank = 1000, 50, 10
sigma = 1.
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
###############################################################################
# Fit the models
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA()
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {'shrinkage': shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, 'Homoscedastic Noise'),
(X_hetero, 'Heteroscedastic Noise')]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, 'b', label='PCA scores')
plt.plot(n_components, fa_scores, 'r', label='FA scores')
plt.axvline(rank, color='g', label='TRUTH: %d' % rank, linestyle='-')
plt.axvline(n_components_pca, color='b',
label='PCA CV: %d' % n_components_pca, linestyle='--')
plt.axvline(n_components_fa, color='r',
label='FactorAnalysis CV: %d' % n_components_fa, linestyle='--')
plt.axvline(n_components_pca_mle, color='k',
label='PCA MLE: %d' % n_components_pca_mle, linestyle='--')
# compare with other covariance estimators
plt.axhline(shrunk_cov_score(X), color='violet',
label='Shrunk Covariance MLE', linestyle='-.')
plt.axhline(lw_score(X), color='orange',
label='LedoitWolf MLE' % n_components_pca_mle, linestyle='-.')
plt.xlabel('nb of components')
plt.ylabel('CV scores')
plt.legend(loc='lower right')
plt.title(title)
plt.show()
| bsd-3-clause |
RomainBrault/scikit-learn | examples/linear_model/plot_sgd_weighted_samples.py | 344 | 1458 | """
=====================
SGD: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
y = [1] * 10 + [-1] * 10
sample_weight = 100 * np.abs(np.random.randn(20))
# and assign a bigger weight to the last 10 samples
sample_weight[:10] *= 10
# plot the weighted data points
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9,
cmap=plt.cm.bone)
## fit the unweighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid'])
## fit the weighted model
clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100)
clf.fit(X, y, sample_weight=sample_weight)
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed'])
plt.legend([no_weights.collections[0], samples_weights.collections[0]],
["no weights", "with weights"], loc="lower left")
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
chanceraine/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/legend.py | 69 | 30705 | """
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Return value is a sequence of text, line instances that make
up the legend
"""
from __future__ import division
import warnings
import numpy as np
from matplotlib import rcParams
from matplotlib.artist import Artist
from matplotlib.cbook import is_string_like, iterable, silent_list, safezip
from matplotlib.font_manager import FontProperties
from matplotlib.lines import Line2D
from matplotlib.patches import Patch, Rectangle, Shadow, FancyBboxPatch
from matplotlib.collections import LineCollection, RegularPolyCollection
from matplotlib.transforms import Bbox
from matplotlib.offsetbox import HPacker, VPacker, PackerBase, TextArea, DrawingArea
class Legend(Artist):
"""
Place a legend on the axes at location loc. Labels are a
sequence of strings and loc can be a string or an integer
specifying the legend location
The location codes are::
'best' : 0, (only implemented for axis legends)
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
loc can be a tuple of the noramilzed coordinate values with
respect its parent.
Return value is a sequence of text, line instances that make
up the legend
"""
codes = {'best' : 0, # only implemented for axis legends
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
}
zorder = 5
def __str__(self):
return "Legend"
def __init__(self, parent, handles, labels,
loc = None,
numpoints = None, # the number of points in the legend line
markerscale = None, # the relative size of legend markers vs. original
scatterpoints = 3, # TODO: may be an rcParam
scatteryoffsets=None,
prop = None, # properties for the legend texts
# the following dimensions are in axes coords
pad = None, # deprecated; use borderpad
labelsep = None, # deprecated; use labelspacing
handlelen = None, # deprecated; use handlelength
handletextsep = None, # deprecated; use handletextpad
axespad = None, # deprecated; use borderaxespad
# spacing & pad defined as a fractionof the font-size
borderpad = None, # the whitespace inside the legend border
labelspacing=None, #the vertical space between the legend entries
handlelength=None, # the length of the legend handles
handletextpad=None, # the pad between the legend handle and text
borderaxespad=None, # the pad between the axes and legend border
columnspacing=None, # spacing between columns
ncol=1, # number of columns
mode=None, # mode for horizontal distribution of columns. None, "expand"
fancybox=None, # True use a fancy box, false use a rounded box, none use rc
shadow = None,
):
"""
- *parent* : the artist that contains the legend
- *handles* : a list of artists (lines, patches) to add to the legend
- *labels* : a list of strings to label the legend
Optional keyword arguments:
================ ==================================================================
Keyword Description
================ ==================================================================
loc a location code or a tuple of coordinates
numpoints the number of points in the legend line
prop the font property
markerscale the relative size of legend markers vs. original
fancybox if True, draw a frame with a round fancybox. If None, use rc
shadow if True, draw a shadow behind legend
scatteryoffsets a list of yoffsets for scatter symbols in legend
borderpad the fractional whitespace inside the legend border
labelspacing the vertical space between the legend entries
handlelength the length of the legend handles
handletextpad the pad between the legend handle and text
borderaxespad the pad between the axes and legend border
columnspacing the spacing between columns
================ ==================================================================
The dimensions of pad and spacing are given as a fraction of the
fontsize. Values from rcParams will be used if None.
"""
from matplotlib.axes import Axes # local import only to avoid circularity
from matplotlib.figure import Figure # local import only to avoid circularity
Artist.__init__(self)
if prop is None:
self.prop=FontProperties(size=rcParams["legend.fontsize"])
else:
self.prop=prop
self.fontsize = self.prop.get_size_in_points()
propnames=['numpoints', 'markerscale', 'shadow', "columnspacing",
"scatterpoints"]
localdict = locals()
for name in propnames:
if localdict[name] is None:
value = rcParams["legend."+name]
else:
value = localdict[name]
setattr(self, name, value)
# Take care the deprecated keywords
deprecated_kwds = {"pad":"borderpad",
"labelsep":"labelspacing",
"handlelen":"handlelength",
"handletextsep":"handletextpad",
"axespad":"borderaxespad"}
# convert values of deprecated keywords (ginve in axes coords)
# to new vaules in a fraction of the font size
# conversion factor
bbox = parent.bbox
axessize_fontsize = min(bbox.width, bbox.height)/self.fontsize
for k, v in deprecated_kwds.items():
# use deprecated value if not None and if their newer
# counter part is None.
if localdict[k] is not None and localdict[v] is None:
warnings.warn("Use '%s' instead of '%s'." % (v, k),
DeprecationWarning)
setattr(self, v, localdict[k]*axessize_fontsize)
continue
# Otherwise, use new keywords
if localdict[v] is None:
setattr(self, v, rcParams["legend."+v])
else:
setattr(self, v, localdict[v])
del localdict
self._ncol = ncol
if self.numpoints <= 0:
raise ValueError("numpoints must be >= 0; it was %d"% numpoints)
# introduce y-offset for handles of the scatter plot
if scatteryoffsets is None:
self._scatteryoffsets = np.array([3./8., 4./8., 2.5/8.])
else:
self._scatteryoffsets = np.asarray(scatteryoffsets)
reps = int(self.numpoints / len(self._scatteryoffsets)) + 1
self._scatteryoffsets = np.tile(self._scatteryoffsets, reps)[:self.scatterpoints]
# _legend_box is an OffsetBox instance that contains all
# legend items and will be initialized from _init_legend_box()
# method.
self._legend_box = None
if isinstance(parent,Axes):
self.isaxes = True
self.set_figure(parent.figure)
elif isinstance(parent,Figure):
self.isaxes = False
self.set_figure(parent)
else:
raise TypeError("Legend needs either Axes or Figure as parent")
self.parent = parent
if loc is None:
loc = rcParams["legend.loc"]
if not self.isaxes and loc in [0,'best']:
loc = 'upper right'
if is_string_like(loc):
if loc not in self.codes:
if self.isaxes:
warnings.warn('Unrecognized location "%s". Falling back on "best"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 0
else:
warnings.warn('Unrecognized location "%s". Falling back on "upper right"; '
'valid locations are\n\t%s\n'
% (loc, '\n\t'.join(self.codes.keys())))
loc = 1
else:
loc = self.codes[loc]
if not self.isaxes and loc == 0:
warnings.warn('Automatic legend placement (loc="best") not implemented for figure legend. '
'Falling back on "upper right".')
loc = 1
self._loc = loc
self._mode = mode
# We use FancyBboxPatch to draw a legend frame. The location
# and size of the box will be updated during the drawing time.
self.legendPatch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.fontsize,
snap=True
)
# The width and height of the legendPatch will be set (in the
# draw()) to the length that includes the padding. Thus we set
# pad=0 here.
if fancybox is None:
fancybox = rcParams["legend.fancybox"]
if fancybox == True:
self.legendPatch.set_boxstyle("round",pad=0,
rounding_size=0.2)
else:
self.legendPatch.set_boxstyle("square",pad=0)
self._set_artist_props(self.legendPatch)
self._drawFrame = True
# init with null renderer
self._init_legend_box(handles, labels)
self._last_fontsize_points = self.fontsize
def _set_artist_props(self, a):
"""
set the boilerplate props for artists added to axes
"""
a.set_figure(self.figure)
for c in self.get_children():
c.set_figure(self.figure)
a.set_transform(self.get_transform())
def _findoffset_best(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend at its best position"
ox, oy = self._find_best_position(width, height, renderer)
return ox+xdescent, oy+ydescent
def _findoffset_loc(self, width, height, xdescent, ydescent, renderer):
"Heper function to locate the legend using the location code"
if iterable(self._loc) and len(self._loc)==2:
# when loc is a tuple of axes(or figure) coordinates.
fx, fy = self._loc
bbox = self.parent.bbox
x, y = bbox.x0 + bbox.width * fx, bbox.y0 + bbox.height * fy
else:
bbox = Bbox.from_bounds(0, 0, width, height)
x, y = self._get_anchored_bbox(self._loc, bbox, self.parent.bbox, renderer)
return x+xdescent, y+ydescent
def draw(self, renderer):
"Draw everything that belongs to the legend"
if not self.get_visible(): return
self._update_legend_box(renderer)
renderer.open_group('legend')
# find_offset function will be provided to _legend_box and
# _legend_box will draw itself at the location of the return
# value of the find_offset.
if self._loc == 0:
_findoffset = self._findoffset_best
else:
_findoffset = self._findoffset_loc
def findoffset(width, height, xdescent, ydescent):
return _findoffset(width, height, xdescent, ydescent, renderer)
self._legend_box.set_offset(findoffset)
fontsize = renderer.points_to_pixels(self.fontsize)
# if mode == fill, set the width of the legend_box to the
# width of the paret (minus pads)
if self._mode in ["expand"]:
pad = 2*(self.borderaxespad+self.borderpad)*fontsize
self._legend_box.set_width(self.parent.bbox.width-pad)
if self._drawFrame:
# update the location and size of the legend
bbox = self._legend_box.get_window_extent(renderer)
self.legendPatch.set_bounds(bbox.x0, bbox.y0,
bbox.width, bbox.height)
self.legendPatch.set_mutation_scale(fontsize)
if self.shadow:
shadow = Shadow(self.legendPatch, 2, -2)
shadow.draw(renderer)
self.legendPatch.draw(renderer)
self._legend_box.draw(renderer)
renderer.close_group('legend')
def _approx_text_height(self, renderer=None):
"""
Return the approximate height of the text. This is used to place
the legend handle.
"""
if renderer is None:
return self.fontsize
else:
return renderer.points_to_pixels(self.fontsize)
def _init_legend_box(self, handles, labels):
"""
Initiallize the legend_box. The legend_box is an instance of
the OffsetBox, which is packed with legend handles and
texts. Once packed, their location is calculated during the
drawing time.
"""
fontsize = self.fontsize
# legend_box is a HPacker, horizontally packed with
# columns. Each column is a VPacker, vertically packed with
# legend items. Each legend item is HPacker packed with
# legend handleBox and labelBox. handleBox is an instance of
# offsetbox.DrawingArea which contains legend handle. labelBox
# is an instance of offsetbox.TextArea which contains legend
# text.
text_list = [] # the list of text instances
handle_list = [] # the list of text instances
label_prop = dict(verticalalignment='baseline',
horizontalalignment='left',
fontproperties=self.prop,
)
labelboxes = []
for l in labels:
textbox = TextArea(l, textprops=label_prop,
multilinebaseline=True, minimumdescent=True)
text_list.append(textbox._text)
labelboxes.append(textbox)
handleboxes = []
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height() * 0.7
descent = 0.
# each handle needs to be drawn inside a box of (x, y, w, h) =
# (0, -descent, width, height). And their corrdinates should
# be given in the display coordinates.
# NOTE : the coordinates will be updated again in
# _update_legend_box() method.
# The transformation of each handle will be automatically set
# to self.get_trasnform(). If the artist does not uses its
# default trasnform (eg, Collections), you need to
# manually set their transform to the self.get_transform().
for handle in handles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
legline.update_from(handle)
self._set_artist_props(legline) # after update
legline.set_clip_box(None)
legline.set_clip_path(None)
legline.set_drawstyle('default')
legline.set_marker('None')
handle_list.append(legline)
legline_marker = Line2D(xdata_marker, ydata[:len(xdata_marker)])
legline_marker.update_from(handle)
self._set_artist_props(legline_marker)
legline_marker.set_clip_box(None)
legline_marker.set_clip_path(None)
legline_marker.set_linestyle('None')
# we don't want to add this to the return list because
# the texts and handles are assumed to be in one-to-one
# correpondence.
legline._legmarker = legline_marker
elif isinstance(handle, Patch):
p = Rectangle(xy=(0., 0.),
width = self.handlelength*fontsize,
height=(height-descent),
)
p.update_from(handle)
self._set_artist_props(p)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
elif isinstance(handle, LineCollection):
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline = Line2D(xdata, ydata)
self._set_artist_props(legline)
legline.set_clip_box(None)
legline.set_clip_path(None)
lw = handle.get_linewidth()[0]
dashes = handle.get_dashes()[0]
color = handle.get_colors()[0]
legline.set_color(color)
legline.set_linewidth(lw)
legline.set_dashes(dashes)
handle_list.append(legline)
elif isinstance(handle, RegularPolyCollection):
#ydata = self._scatteryoffsets
ydata = height*self._scatteryoffsets
size_max, size_min = max(handle.get_sizes()),\
min(handle.get_sizes())
# we may need to scale these sizes by "markerscale"
# attribute. But other handle types does not seem
# to care about this attribute and it is currently ignored.
if self.scatterpoints < 4:
sizes = [.5*(size_max+size_min), size_max,
size_min]
else:
sizes = (size_max-size_min)*np.linspace(0,1,self.scatterpoints)+size_min
p = type(handle)(handle.get_numsides(),
rotation=handle.get_rotation(),
sizes=sizes,
offsets=zip(xdata_marker,ydata),
transOffset=self.get_transform(),
)
p.update_from(handle)
p.set_figure(self.figure)
p.set_clip_box(None)
p.set_clip_path(None)
handle_list.append(p)
else:
handle_list.append(None)
handlebox = DrawingArea(width=self.handlelength*fontsize,
height=height,
xdescent=0., ydescent=descent)
handle = handle_list[-1]
handlebox.add_artist(handle)
if hasattr(handle, "_legmarker"):
handlebox.add_artist(handle._legmarker)
handleboxes.append(handlebox)
# We calculate number of lows in each column. The first
# (num_largecol) columns will have (nrows+1) rows, and remaing
# (num_smallcol) columns will have (nrows) rows.
nrows, num_largecol = divmod(len(handleboxes), self._ncol)
num_smallcol = self._ncol-num_largecol
# starting index of each column and number of rows in it.
largecol = safezip(range(0, num_largecol*(nrows+1), (nrows+1)),
[nrows+1] * num_largecol)
smallcol = safezip(range(num_largecol*(nrows+1), len(handleboxes), nrows),
[nrows] * num_smallcol)
handle_label = safezip(handleboxes, labelboxes)
columnbox = []
for i0, di in largecol+smallcol:
# pack handleBox and labelBox into itemBox
itemBoxes = [HPacker(pad=0,
sep=self.handletextpad*fontsize,
children=[h, t], align="baseline")
for h, t in handle_label[i0:i0+di]]
# minimumdescent=False for the text of the last row of the column
itemBoxes[-1].get_children()[1].set_minimumdescent(False)
# pack columnBox
columnbox.append(VPacker(pad=0,
sep=self.labelspacing*fontsize,
align="baseline",
children=itemBoxes))
if self._mode == "expand":
mode = "expand"
else:
mode = "fixed"
sep = self.columnspacing*fontsize
self._legend_box = HPacker(pad=self.borderpad*fontsize,
sep=sep, align="baseline",
mode=mode,
children=columnbox)
self._legend_box.set_figure(self.figure)
self.texts = text_list
self.legendHandles = handle_list
def _update_legend_box(self, renderer):
"""
Update the dimension of the legend_box. This is required
becuase the paddings, the hadle size etc. depends on the dpi
of the renderer.
"""
# fontsize in points.
fontsize = renderer.points_to_pixels(self.fontsize)
if self._last_fontsize_points == fontsize:
# no update is needed
return
# each handle needs to be drawn inside a box of
# (x, y, w, h) = (0, -descent, width, height).
# And their corrdinates should be given in the display coordinates.
# The approximate height and descent of text. These values are
# only used for plotting the legend handle.
height = self._approx_text_height(renderer) * 0.7
descent = 0.
for handle in self.legendHandles:
if isinstance(handle, RegularPolyCollection):
npoints = self.scatterpoints
else:
npoints = self.numpoints
if npoints > 1:
# we put some pad here to compensate the size of the
# marker
xdata = np.linspace(0.3*fontsize,
(self.handlelength-0.3)*fontsize,
npoints)
xdata_marker = xdata
elif npoints == 1:
xdata = np.linspace(0, self.handlelength*fontsize, 2)
xdata_marker = [0.5*self.handlelength*fontsize]
if isinstance(handle, Line2D):
legline = handle
ydata = ((height-descent)/2.)*np.ones(xdata.shape, float)
legline.set_data(xdata, ydata)
legline_marker = legline._legmarker
legline_marker.set_data(xdata_marker, ydata[:len(xdata_marker)])
elif isinstance(handle, Patch):
p = handle
p.set_bounds(0., 0.,
self.handlelength*fontsize,
(height-descent),
)
elif isinstance(handle, RegularPolyCollection):
p = handle
ydata = height*self._scatteryoffsets
p.set_offsets(zip(xdata_marker,ydata))
# correction factor
cor = fontsize / self._last_fontsize_points
# helper function to iterate over all children
def all_children(parent):
yield parent
for c in parent.get_children():
for cc in all_children(c): yield cc
#now update paddings
for box in all_children(self._legend_box):
if isinstance(box, PackerBase):
box.pad = box.pad * cor
box.sep = box.sep * cor
elif isinstance(box, DrawingArea):
box.width = self.handlelength*fontsize
box.height = height
box.xdescent = 0.
box.ydescent=descent
self._last_fontsize_points = fontsize
def _auto_legend_data(self):
"""
Returns list of vertices and extents covered by the plot.
Returns a two long list.
First element is a list of (x, y) vertices (in
display-coordinates) covered by all the lines and line
collections, in the legend's handles.
Second element is a list of bounding boxes for all the patches in
the legend's handles.
"""
assert self.isaxes # should always hold because function is only called internally
ax = self.parent
vertices = []
bboxes = []
lines = []
for handle in ax.lines:
assert isinstance(handle, Line2D)
path = handle.get_path()
trans = handle.get_transform()
tpath = trans.transform_path(path)
lines.append(tpath)
for handle in ax.patches:
assert isinstance(handle, Patch)
if isinstance(handle, Rectangle):
transform = handle.get_data_transform()
bboxes.append(handle.get_bbox().transformed(transform))
else:
transform = handle.get_transform()
bboxes.append(handle.get_path().get_extents(transform))
return [vertices, bboxes, lines]
def draw_frame(self, b):
'b is a boolean. Set draw frame to b'
self._drawFrame = b
def get_children(self):
'return a list of child artists'
children = []
if self._legend_box:
children.append(self._legend_box)
return children
def get_frame(self):
'return the Rectangle instance used to frame the legend'
return self.legendPatch
def get_lines(self):
'return a list of lines.Line2D instances in the legend'
return [h for h in self.legendHandles if isinstance(h, Line2D)]
def get_patches(self):
'return a list of patch instances in the legend'
return silent_list('Patch', [h for h in self.legendHandles if isinstance(h, Patch)])
def get_texts(self):
'return a list of text.Text instance in the legend'
return silent_list('Text', self.texts)
def get_window_extent(self):
'return a extent of the the legend'
return self.legendPatch.get_window_extent()
def _get_anchored_bbox(self, loc, bbox, parentbbox, renderer):
"""
Place the *bbox* inside the *parentbbox* according to a given
location code. Return the (x,y) coordinate of the bbox.
- loc: a location code in range(1, 11).
This corresponds to the possible values for self._loc, excluding "best".
- bbox: bbox to be placed, display coodinate units.
- parentbbox: a parent box which will contain the bbox. In
display coordinates.
"""
assert loc in range(1,11) # called only internally
BEST, UR, UL, LL, LR, R, CL, CR, LC, UC, C = range(11)
anchor_coefs={UR:"NE",
UL:"NW",
LL:"SW",
LR:"SE",
R:"E",
CL:"W",
CR:"E",
LC:"S",
UC:"N",
C:"C"}
c = anchor_coefs[loc]
fontsize = renderer.points_to_pixels(self.fontsize)
container = parentbbox.padded(-(self.borderaxespad) * fontsize)
anchored_box = bbox.anchored(c, container=container)
return anchored_box.x0, anchored_box.y0
def _find_best_position(self, width, height, renderer, consider=None):
"""
Determine the best location to place the legend.
`consider` is a list of (x, y) pairs to consider as a potential
lower-left corner of the legend. All are display coords.
"""
assert self.isaxes # should always hold because function is only called internally
verts, bboxes, lines = self._auto_legend_data()
bbox = Bbox.from_bounds(0, 0, width, height)
consider = [self._get_anchored_bbox(x, bbox, self.parent.bbox, renderer) for x in range(1, len(self.codes))]
#tx, ty = self.legendPatch.get_x(), self.legendPatch.get_y()
candidates = []
for l, b in consider:
legendBox = Bbox.from_bounds(l, b, width, height)
badness = 0
badness = legendBox.count_contains(verts)
badness += legendBox.count_overlaps(bboxes)
for line in lines:
if line.intersects_bbox(legendBox):
badness += 1
ox, oy = l, b
if badness == 0:
return ox, oy
candidates.append((badness, (l, b)))
# rather than use min() or list.sort(), do this so that we are assured
# that in the case of two equal badnesses, the one first considered is
# returned.
# NOTE: list.sort() is stable.But leave as it is for now. -JJL
minCandidate = candidates[0]
for candidate in candidates:
if candidate[0] < minCandidate[0]:
minCandidate = candidate
ox, oy = minCandidate[1]
return ox, oy
| agpl-3.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/util/testing.py | 3 | 92623 | from __future__ import division
# pylint: disable-msg=W0402
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import traceback
from datetime import datetime
from functools import wraps, partial
from contextlib import contextmanager
from distutils.version import LooseVersion
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_datetimelike_v_object,
is_number, is_bool,
needs_i8_conversion,
is_categorical_dtype,
is_interval_dtype,
is_sequence,
is_list_like)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.compat as compat
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, is_platform_windows, is_platform_32bit,
StringIO, PY3
)
from pandas.core.computation import expressions as expr
from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
Index, MultiIndex,
Series, DataFrame, Panel, Panel4D)
from pandas._libs import testing as _testing
from pandas.io.common import urlopen
try:
import pytest
slow = pytest.mark.slow
except ImportError:
# Should be ok to just ignore. If you actually need
# slow then you'll hit an import error long before getting here.
pass
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__%s__.pickle' % rands(10))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specifed by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specifed by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def assert_almost_equal(left, right, check_exact=False,
check_dtype='equiv', check_less_precise=False,
**kwargs):
"""
Check that the left and right objects are approximately equal.
Parameters
----------
left : object
right : object
check_exact : bool, default True
Whether to compare number exactly.
check_dtype: bool, default True
check dtype if both a and b are the same type
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right, check_exact=check_exact,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# other sequences
if check_dtype:
if is_number(left) and is_number(right):
# do not compare numeric classes, like np.float64 and float
pass
elif is_bool(left) and is_bool(right):
# do not compare bool classes, like np.bool_ and bool
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = 'numpy array'
else:
obj = 'Input'
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{0} Expected type {1}, found {2} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(cls_name, cls, type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
def _skip_if_32bit():
import pytest
if is_platform_32bit():
pytest.skip("skipping for 32 bit")
def _skip_module_if_no_mpl():
import pytest
mpl = pytest.importorskip("matplotlib")
mpl.use("Agg", warn=False)
def _skip_if_no_mpl():
try:
import matplotlib as mpl
mpl.use("Agg", warn=False)
except ImportError:
import pytest
pytest.skip("matplotlib not installed")
def _skip_if_mpl_1_5():
import matplotlib as mpl
v = mpl.__version__
if v > LooseVersion('1.4.3') or v[0] == '0':
import pytest
pytest.skip("matplotlib 1.5")
else:
mpl.use("Agg", warn=False)
def _skip_if_no_scipy():
try:
import scipy.stats # noqa
except ImportError:
import pytest
pytest.skip("no scipy.stats module")
try:
import scipy.interpolate # noqa
except ImportError:
import pytest
pytest.skip('scipy.interpolate missing')
try:
import scipy.sparse # noqa
except ImportError:
import pytest
pytest.skip('scipy.sparse missing')
def _check_if_lzma():
try:
return compat.import_lzma()
except ImportError:
return False
def _skip_if_no_lzma():
import pytest
return _check_if_lzma() or pytest.skip('need backports.lzma to run')
def _skip_if_no_xarray():
try:
import xarray
except ImportError:
import pytest
pytest.skip("xarray not installed")
v = xarray.__version__
if v < LooseVersion('0.7.0'):
import pytest
pytest.skip("xarray not version is too low: {0}".format(v))
def _skip_if_no_pytz():
try:
import pytz # noqa
except ImportError:
import pytest
pytest.skip("pytz not installed")
def _skip_if_no_dateutil():
try:
import dateutil # noqa
except ImportError:
import pytest
pytest.skip("dateutil not installed")
def _skip_if_windows_python_3():
if PY3 and is_platform_windows():
import pytest
pytest.skip("not used on python 3/win32")
def _skip_if_windows():
if is_platform_windows():
import pytest
pytest.skip("Running on Windows")
def _skip_if_no_pathlib():
try:
from pathlib import Path # noqa
except ImportError:
import pytest
pytest.skip("pathlib not available")
def _skip_if_no_localpath():
try:
from py.path import local as LocalPath # noqa
except ImportError:
import pytest
pytest.skip("py.path not installed")
def _incompat_bottleneck_version(method):
""" skip if we have bottleneck installed
and its >= 1.0
as we don't match the nansum/nanprod behavior for all-nan
ops, see GH9422
"""
if method not in ['sum', 'prod']:
return False
try:
import bottleneck as bn
return bn.__version__ >= LooseVersion('1.0')
except ImportError:
return False
def skip_if_no_ne(engine='numexpr'):
from pandas.core.computation.expressions import (
_USE_NUMEXPR,
_NUMEXPR_INSTALLED)
if engine == 'numexpr':
if not _USE_NUMEXPR:
import pytest
pytest.skip("numexpr enabled->{enabled}, "
"installed->{installed}".format(
enabled=_USE_NUMEXPR,
installed=_NUMEXPR_INSTALLED))
def _skip_if_has_locale():
import locale
lang, _ = locale.getlocale()
if lang is not None:
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_not_us_locale():
import locale
lang, _ = locale.getlocale()
if lang != 'en_US':
import pytest
pytest.skip("Specific locale is set {0}".format(lang))
def _skip_if_no_mock():
try:
import mock # noqa
except ImportError:
try:
from unittest import mock # noqa
except ImportError:
import nose
raise nose.SkipTest("mock is not installed")
def _skip_if_no_ipython():
try:
import IPython # noqa
except ImportError:
import nose
raise nose.SkipTest("IPython not installed")
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("%s, the 'locale -a' command cannot be found on your "
"system" % e)
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('%s.*' % prefix).findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if all(lc is not None for lc in normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>>
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>>
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: %d (file: %s)" %
(fd, filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: %s" % e)
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
raise_assert_detail(obj, '{0} levels are different'.format(obj),
'{0}, {1}'.format(left.nlevels, left),
'{0}, {1}'.format(right.nlevels, right))
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, '{0} length are different'.format(obj),
'{0}, {1}'.format(len(left), left),
'{0}, {1}'.format(len(right), right))
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{0}]'.format(level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
if check_exact:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_attr_equal('closed', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = set([type(left).__name__, type(right).__name__])
if len(types - set(['Int64Index', 'RangeIndex'])):
msg = '{0} classes are not equivalent'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{0} classes are different'.format(obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
raise_assert_detail(obj, 'Attribute "{0}" are different'.format(attr),
left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ('one of \'objs\' is not a matplotlib Axes instance, '
'type encountered {0!r}')
assert isinstance(el, (plt.Axes, dict)), msg.format(
el.__class__.__name__)
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {0!r} '
''.format(objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
obj='Categorical', check_category_order=True):
"""Test that Categoricals are equivalent.
Parameters
----------
left, right : Categorical
Categoricals to compare
check_dtype : bool, default True
Check that integer dtype of the codes are the same
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{0}.categories'.format(obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{0}.codes'.format(obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{0}.categories'.format(obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{0}.values'.format(obj))
assert_attr_equal('ordered', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
msg = """{0} are different
{1}
[left]: {2}
[right]: {3}""".format(obj, message, left, right)
if diff is not None:
msg = msg + "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
obj='numpy array', check_same=None):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
"""
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "%r is not %r" % (left_base, right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "%r is %r" % (left_base, right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{0} shapes are different'
.format(obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{0} values are different ({1} %)'\
.format(obj, np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
raise_assert_detail(obj, 'Series length are different',
'{0}, {1}'.format(len(left), left.index),
'{0}, {1}'.format(len(right), right.index))
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
if check_dtype:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{0}'.format(obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = '[datetimelike_compat=True] {0} is not equal to {1}.'
raise AssertionError(msg.format(left.values, right.values))
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
# TODO: big hack here
l = pd.IntervalIndex(left)
r = pd.IntervalIndex(right)
assert_index_equal(l, r, obj='{0}.index'.format(obj))
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{0}'.format(obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{0} category'.format(obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default False
Whether to check the columns class, dtype and inferred_type
are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If true, ignore the order of rows & columns
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'({0}, {1})'.format(*left.shape),
'({0}, {1})'.format(*right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.index'.format(obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{0}.columns'.format(obj))
# compare by blocks
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {0}]'.format(i))
def assert_panelnd_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
assert_func=assert_frame_equal,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
assert_func : function for comparing data
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right.blocks
lblocks = left.blocks
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
assert item in right, "non-matching item (right) '%s'" % item
litem = left.iloc[i]
ritem = right.iloc[i]
assert_func(litem, ritem, check_less_precise=check_less_precise)
for i, item in enumerate(right._get_axis(0)):
assert item in left, "non-matching item (left) '%s'" % item
# TODO: strangely check_names fails in py3 ?
_panel_frame_equal = partial(assert_frame_equal, check_names=False)
assert_panel_equal = partial(assert_panelnd_equal,
assert_func=_panel_frame_equal)
assert_panel4d_equal = partial(assert_panelnd_equal,
assert_func=assert_panel_equal)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not left.sp_index.equals(right.sp_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{0}.index'.format(obj))
assert_index_equal(left.columns, right.columns,
obj='{0}.columns'.format(obj))
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col],
check_dtype=check_dtype)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
assert_attr_equal('default_fill_value', left, right, obj=obj)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
def assert_sp_list_equal(left, right):
assert isinstance(left, pd.SparseList)
assert isinstance(right, pd.SparseList)
assert_sp_array_equal(left.to_array(), right.to_array())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '%r'" % k
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
assert elem1 is not elem2, ("Expected object %r and "
"object %r to be different "
"objects, were same."
% (type(elem1), type(elem2)))
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name)
def makeIntervalIndex(k=10, name=None):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None):
return RangeIndex(0, k, 1, name=name)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name)
def makeTimedeltaIndex(k=10, freq='D', name=None):
return TimedeltaIndex(start='1 day', periods=k, freq=freq, name=name)
def makePeriodIndex(k=10, name=None):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name)
return dr
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return dict((c, Series(randn(N), index=index)) for c in getCols(K))
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return dict((c, makeTimeSeries(nper, freq)) for c in getCols(K))
def getPeriodData(nper=None):
return dict((c, makePeriodSeries(nper)) for c in getCols(K))
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makeTimeDataFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = dict((c, makePeriodFrame(nper)) for c in cols)
return Panel.fromDict(data)
def makePanel4D(nper=None):
with warnings.catch_warnings(record=True):
d = dict(l1=makePanel(nper), l2=makePanel(nper),
l3=makePanel(nper))
return Panel4D(d)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"%s" is not a legal value for `idx_type`, use '
'"i"/"f"/"s"/"u"/"dt/"p"/"td".' % idx_type)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all([x > 0 for x in ndupe_l])
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub("[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = prefix + '_l%d_g' % i + str(j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
index = Index(tuples[0], name=names[0])
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R%dC%d" % (r, c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
# Dependency checker when running tests.
#
# Copied this from nipy/nipype
# Copyright of respective developers, License: BSD-3
def skip_if_no_package(pkg_name, min_version=None, max_version=None,
app='pandas', checker=LooseVersion):
"""Check that the min/max version of the required package is installed.
If the package check fails, the test is automatically skipped.
Parameters
----------
pkg_name : string
Name of the required package.
min_version : string, optional
Minimal version number for required package.
max_version : string, optional
Max version number for required package.
app : string, optional
Application that is performing the check. For instance, the
name of the tutorial being executed that depends on specific
packages.
checker : object, optional
The class that will perform the version checking. Default is
distutils.version.LooseVersion.
Examples
--------
package_check('numpy', '1.3')
"""
import pytest
if app:
msg = '%s requires %s' % (app, pkg_name)
else:
msg = 'module requires %s' % pkg_name
if min_version:
msg += ' with version >= %s' % (min_version,)
if max_version:
msg += ' with version < %s' % (max_version,)
try:
mod = __import__(pkg_name)
except ImportError:
mod = None
try:
have_version = mod.__version__
except AttributeError:
pytest.skip('Cannot find version for %s' % pkg_name)
if min_version and checker(have_version) < checker(min_version):
pytest.skip(msg)
if max_version and checker(have_version) >= checker(max_version):
pytest.skip(msg)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'HTTP Error 400',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
'certificate verify',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,) # noqa
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to supress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
skip("Skipping test due to known errno"
" and error %s" % e)
try:
e_str = traceback.format_exc(e)
except:
e_str = str(e)
if any([m.lower() in e_str.lower() for m in _skip_on_messages]):
skip("Skipping test because exception "
"message is known and error %s" % e)
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip("Skipping test due to lack of connectivity"
" and error %s" % e)
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`.
This is a port of the `assertRaisesRegexp` function from unittest in
Python 2.7. However, with our migration to `pytest`, please refrain
from using this. Instead, use the following paradigm:
with pytest.raises(_exception) as exc_info:
func(*args, **kwargs)
exc_info.matches(reg_exp)
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{0} not raised.".format(exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
e = AssertionError('"%s" does not match "%s"' %
(self.regexp.pattern, str(val)))
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code that expects to raise (or not raise)
warnings. Checks that code raises the expected warning and only the
expected warning. Pass ``False`` or ``None`` to check that it does *not*
raise a warning. Defaults to ``exception.Warning``, baseclass of all
Warnings. (basically a wrapper around ``warnings.catch_warnings``).
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning,
DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. "
"File where warning is raised: {0} != {1}. "
"Warning message: {2}".format(
actual_warning.filename, caller.filename,
actual_warning.message))
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
assert saw_warning, ("Did not see expected warning of class %r."
% expected_warning.__name__)
assert not extra_warnings, ("Caused unexpected warning(s): %r."
% extra_warnings)
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=expr._MIN_ELEMENTS):
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ['testattr', 'name']
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def patch(ob, attr, value):
"""Temporarily patch an attribute of an object.
Parameters
----------
ob : any
The object to patch. This must support attribute assignment for `attr`.
attr : str
The name of the attribute to patch.
value : any
The temporary attribute to assign.
Examples
--------
>>> class C(object):
... attribute = 'original'
...
>>> C.attribute
'original'
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
...
>>> in_context
'patched'
>>> C.attribute # the value is reset when the context manager exists
'original'
Correctly replaces attribute when the manager exits with an exception.
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
... raise ValueError()
Traceback (most recent call last):
...
ValueError
>>> in_context
'patched'
>>> C.attribute
'original'
"""
noattr = object() # mark that the attribute never existed
old = getattr(ob, attr, noattr)
setattr(ob, attr, value)
try:
yield
finally:
if old is noattr:
delattr(ob, attr)
else:
setattr(ob, attr, old)
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
if is_platform_windows():
import pytest
pytest.skip("timezone setting not supported on windows")
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except:
pass
else:
os.environ['TZ'] = tz
time.tzset()
orig_tz = os.environ.get('TZ')
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
| mit |
ephes/scikit-learn | sklearn/__init__.py | 154 | 3014 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.17.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve',
'linear_model', 'manifold', 'metrics', 'mixture', 'multiclass',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
faroit/loudness | python/tests/test_OME.py | 1 | 2084 | import numpy as np
import matplotlib.pyplot as plt
import loudness as ln
def plotResponse(freqPoints, dataPoints,
freqsInterp, responseInterp,
ylim=(-40, 10), title = ""):
if np.any(dataPoints):
plt.semilogx(freqPoints, dataPoints, 'o')
plt.semilogx(freqsInterp, responseInterp)
plt.xlim(20, 20e3)
plt.ylim(ylim)
plt.xlabel("Frequency, Hz")
plt.ylabel("Response, dB")
plt.title(title)
plt.show()
def plotMiddleEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(filterType, ln.OME.NONE)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getMiddleEarFreqPoints()
dataPoints = ome.getMiddleEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotOuterEar(filterType, ylim=(-40, 0)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(ln.OME.NONE, filterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
freqPoints = ome.getOuterEarFreqPoints()
dataPoints = ome.getOuterEardB()
plotResponse(freqPoints, dataPoints,
freqs, response, ylim)
def plotCombined(middleFilterType, outerFilterType, ylim=(-40, 10)):
freqs = np.arange(20, 20000, 2)
ome = ln.OME(middleFilterType, outerFilterType)
ome.interpolateResponse(freqs)
response = ome.getResponse()
plotResponse(None, None,
freqs, response, ylim)
plt.figure(1)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR, (-40, 0))
plt.figure(2)
plotMiddleEar(ln.OME.CHGM2011_MIDDLE_EAR, (-40, 10))
plt.figure(2)
plotMiddleEar(ln.OME.ANSIS342007_MIDDLE_EAR_HPF, (-40, 0))
plt.figure(3)
plotOuterEar(ln.OME.ANSIS342007_FREEFIELD, (-5, 20))
plt.figure(4)
plotOuterEar(ln.OME.ANSIS342007_DIFFUSEFIELD, (-5, 20))
plt.figure(5)
plotOuterEar(ln.OME.BD_DT990, (-10, 10))
plt.figure(6)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR,
ln.OME.ANSIS342007_FREEFIELD, (-40, 10))
plt.figure(7)
plotCombined(ln.OME.ANSIS342007_MIDDLE_EAR, ln.OME.BD_DT990, (-40, 10))
| gpl-3.0 |
xyguo/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 55 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of components exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coef_, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
tomzw11/Pydrone | route.py | 1 | 2000 | import matplotlib.pyplot as plt
import matplotlib.patches as patches
def route(root):
root_height = root[2]
coordinates = [\
[0.42*root_height+root[0],0.42*root_height+root[1],root_height/2],\
[-0.42*root_height+root[0],0.42*root_height+root[1],root_height/2],\
[-0.42*root_height+root[0],-0.15*root_height+root[1],root_height/2],\
[0.42*root_height+root[0],-0.15*root_height+root[1],root_height/2]]
return coordinates
if __name__ == "__main__":
meter_to_feet = 3.28
root = [0,0,16*1]
print 'root',root,'\n'
level1 = route(root)
print 'level 1 \n'
print level1[0],'\n'
print level1[1],'\n'
print level1[2],'\n'
print level1[3],'\n'
print 'level 2 \n'
level2 = [[0]*3]*4
for x in xrange(4):
level2[x] = route(level1[x])
for y in xrange(4):
print 'level2 point[',x+1,y+1,']',level2[x][y],'\n'
fig, ax = plt.subplots()
ball, = plt.plot(6.72+1.52,6.72+1.52,'mo')
plt.plot(0,0,'bo')
plt.plot([level1[0][0],level1[1][0],level1[2][0],level1[3][0]],[level1[0][1],level1[1][1],level1[2][1],level1[3][1]],'ro')
rect_blue = patches.Rectangle((-13.44,-4.8),13.44*2,9.12*2,linewidth=1,edgecolor='b',facecolor='b',alpha = 0.1)
ax.add_patch(rect_blue)
rect_red = patches.Rectangle((0,4.23),13.44,9.12,linewidth=1,edgecolor='r',facecolor='r',alpha = 0.3)
ax.add_patch(rect_red)
plt.plot([level2[0][0][0],level2[0][1][0],level2[0][2][0],level2[0][3][0]],[level2[0][0][1],level2[0][1][1],level2[0][2][1],level2[0][3][1]],'go')
rect_green = patches.Rectangle((6.72,6.72+4.23/2),13.44/2,9.12/2,linewidth=1,edgecolor='g',facecolor='g',alpha = 0.5)
ax.add_patch(rect_green)
linear_s = [12,12]
plt.plot(12,12,'yo')
rect_yellow = patches.Rectangle((10,11),13.44/4,9.12/4,linewidth=1,edgecolor='y',facecolor='y',alpha = 0.5)
ax.add_patch(rect_yellow)
ax.legend([ball,rect_blue,rect_red,rect_green,rect_yellow],['Ball','Root View','Level 1 - 4 anchors','Level 2 - 16 anchors','Linear Search - 64 anchors'])
plt.axis([-13.44, 13.44, -4.8, 13.44])
plt.show()
| mit |
bmazin/SDR | Projects/ChannelizerSim/legacy/bin_width_1st_stage.py | 1 | 1524 |
import matplotlib.pyplot as plt
import scipy.signal
import numpy as np
import math
import random
from matplotlib.backends.backend_pdf import PdfPages
samples = 51200
L = samples/512
fs = 512e6
dt = 1/fs
time = [i*dt for i in range(samples)]
def pfb_fir(x):
N = len(x)
T = 4
L = 512
bin_width_scale = 2.5
dx = T*math.pi/L/T
X = np.array([n*dx-T*math.pi/2 for n in range(T*L)])
coeff = np.sinc(bin_width_scale*X/math.pi)*np.hanning(T*L)
y = np.array([0+0j]*(N-T*L))
for n in range((T-1)*L, N):
m = n%L
coeff_sub = coeff[L*T-m::-L]
y[n-T*L] = (x[n-(T-1)*L:n+L:L]*coeff_sub).sum()
return y
R = 100/5
#freqs = [i*1e5 + 6.0e6 for i in range(R)]
freqs = [i*5e4 + 6.0e6 for i in range(R*8)]
bin = []
bin_pfb = []
for f in freqs:
print f
signal = np.array([complex(math.cos(2*math.pi*f*t), math.sin(2*math.pi*f*t)) for t in time])
y = pfb_fir(signal)
bin_pfb.append(np.fft.fft(y[0:512])[10])
bin = np.array(bin)
bin_pfb = np.array(bin_pfb)
freqs = np.array(freqs)/1e6
b = scipy.signal.firwin(20, cutoff=0.125, window="hanning")
w,h = scipy.signal.freqz(b,1, 4*R, whole=1)
h = np.array(h[2*R:4*R].tolist()+h[0:2*R].tolist())
#h = np.array(h[20:40].tolist()+h[0:20].tolist())
fig = plt.figure()
ax0 = fig.add_subplot(111)
#ax0.plot(freqs, abs(fir9), '.', freqs, abs(fir10), '.', freqs, abs(fir11), '.')
ax0.plot(freqs, 10*np.log10(abs(bin_pfb)/512), 'k-')
ax0.set_xlabel('Frequency (MHz)')
ax0.set_ylabel('Gain (dB)')
ax0.set_ylim((-50,0))
plt.show()
#ax0.axvline(x = 10, linewidth=1, color='k')
| gpl-2.0 |
SKIRT/PTS | magic/plot/imagegrid.py | 1 | 106384 | # -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.magic.plot.imagegrid Contains the ImageGridPlotter classes.
# -----------------------------------------------------------------
# Ensure Python 3 compatibility
from __future__ import absolute_import, division, print_function
# Import standard modules
import aplpy
from abc import ABCMeta, abstractproperty
import matplotlib.pyplot as plt
from matplotlib import cm
from collections import OrderedDict, defaultdict
# Import the relevant PTS classes and modules
from ..tools.plotting import get_vmin_vmax
from ...core.tools import filesystem as fs
from ..core.frame import Frame
from ...core.basics.log import log
from ...core.basics.configurable import Configurable
from ...core.tools.utils import lazyproperty, memoize_method
from ...core.tools import sequences
from ..core.image import Image
from ...core.basics.distribution import Distribution
from ...core.basics.plot import MPLFigure
from ...core.basics.composite import SimplePropertyComposite
from ...core.basics.plot import normal_colormaps
from ..core.list import uniformize
from ...core.tools import numbers
from ...core.tools import types
# ------------------------------------------------------------------------------
light_theme = "light"
dark_theme = "dark"
themes = [light_theme, dark_theme]
# ------------------------------------------------------------------------------
default_cmap = "inferno"
default_residual_cmap = 'RdBu'
default_absolute_residual_cmap = "OrRd"
# ------------------------------------------------------------------------------
# Initialize dictionary for light theme settings
light_theme_settings = OrderedDict()
# Set parameters
light_theme_settings['axes.facecolor'] = 'white'
light_theme_settings['savefig.facecolor'] = 'white'
light_theme_settings['axes.edgecolor'] = 'black'
light_theme_settings['xtick.color'] = 'black'
light_theme_settings['ytick.color'] = 'black'
light_theme_settings["axes.labelcolor"] = 'black'
light_theme_settings["text.color"] = 'black'
# light_theme_settings["axes.titlecolor"]='black'
# ------------------------------------------------------------------------------
# Initialize dictionary for dark theme settings
dark_theme_settings = OrderedDict()
# Set parameters
dark_theme_settings['axes.facecolor'] = 'black'
dark_theme_settings['savefig.facecolor'] = 'black'
dark_theme_settings['axes.edgecolor'] = 'white'
dark_theme_settings['xtick.color'] = 'white'
dark_theme_settings['ytick.color'] = 'white'
dark_theme_settings["axes.labelcolor"] ='white'
dark_theme_settings["text.color"] = 'white'
#plt.rcParams["axes.titlecolor"] = 'white'
# ------------------------------------------------------------------------------
class ImagePlotSettings(SimplePropertyComposite):
"""
This class ...
"""
__metaclass__ = ABCMeta
# ------------------------------------------------------------------------------
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ImagePlotSettings, self).__init__()
# Define properties
self.add_property("label", "string", "label for the image", None)
self.add_property("vmin", "real", "plotting minimum")
self.add_property("vmax", "real", "plotting maximum")
self.add_boolean_property("soft_vmin", "soft vmin", False) #, None) # use None as default to use plotter config if not defined
self.add_boolean_property("soft_vmax", "soft vmax", False) #, None) # use None as default to use plotter config if not defined
self.add_property("cmap", "string", "colormap", choices=normal_colormaps)
# ------------------------------------------------------------------------------
class ImageGridPlotter(Configurable):
"""
This class ...
"""
__metaclass__ = ABCMeta
# -----------------------------------------------------------------
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ImageGridPlotter, self).__init__(*args, **kwargs)
# The figure
self.figure = None
# The grid
self.grid = None
# The plots
self.plots = None
# The settings
self.settings = defaultdict(self.image_settings_class)
# -----------------------------------------------------------------
@abstractproperty
def image_settings_class(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@abstractproperty
def names(self):
"""
This function ...
:return:
"""
pass
# ------------------------------------------------------------------------------
@property
def light(self):
return self.config.theme == light_theme
# -----------------------------------------------------------------
@property
def dark(self):
return self.config.theme == dark_theme
# -----------------------------------------------------------------
@lazyproperty
def text_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def frame_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "black"
# Dark theme
elif self.dark: return "white"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@lazyproperty
def background_color(self):
"""
This function ...
:return:
"""
# Set light theme
if self.light: return "white"
# Dark theme
elif self.dark: return "black"
# Invalid
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
@abstractproperty
def first_frame(self):
"""
This function ...
:return:
"""
pass
# -----------------------------------------------------------------
@lazyproperty
def center(self):
"""
This function ...
:return:
"""
# Center coordinate is defined
if self.config.center is not None: return self.config.center
# Not defined?
return self.first_frame.center_sky
# -----------------------------------------------------------------
@property
def ra_center(self):
return self.center.ra
# ------------------------------------------------------------------------------
@property
def dec_center(self):
return self.center.dec
# ------------------------------------------------------------------------------
@lazyproperty
def ra_center_deg(self):
return self.ra_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def dec_center_deg(self):
return self.dec_center.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def spacing_deg(self):
return self.config.spacing.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def radius_deg(self):
return self.config.radius.to("deg").value
# ------------------------------------------------------------------------------
@lazyproperty
def colormap(self):
return cm.get_cmap(self.config.cmap)
# -----------------------------------------------------------------
@lazyproperty
def nan_color(self):
if self.config.nan_color is not None: return self.config.nan_color
else: return self.colormap(0)
# -----------------------------------------------------------------
@lazyproperty
def theme_settings(self):
if self.light: return light_theme_settings
elif self.dark: return dark_theme_settings
else: raise ValueError("Invalid theme")
# -----------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ImageGridPlotter, self).setup(**kwargs)
# plt.rcParams.update({'font.size':20})
plt.rcParams["axes.labelsize"] = self.config.axes_label_size # 16 #default 20
plt.rcParams["xtick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["ytick.labelsize"] = self.config.ticks_label_size # 10 #default 16
plt.rcParams["legend.fontsize"] = self.config.legend_fontsize # 10 #default 14
plt.rcParams["legend.markerscale"] = self.config.legend_markers_cale
plt.rcParams["lines.markersize"] = self.config.lines_marker_size # 4 #default 4
plt.rcParams["axes.linewidth"] = self.config.linewidth
# Set theme-specific settings
for label in self.theme_settings: plt.rcParams[label] = self.theme_settings[label]
# plt.rcParams['xtick.major.size'] = 5
# plt.rcParams['xtick.major.width'] = 2
# plt.rcParams['ytick.major.size'] = 5
# plt.rcParams['ytick.major.width'] = 2
# ------------------------------------------------------------------------------
def plot_images(images, **kwargs):
"""
This function ...
:param images:
:param kwargs:
:return:
"""
# Create the plotter
plotter = StandardImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(images=images)
# -----------------------------------------------------------------
class StandardImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
This function ...
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImagePlotSettings, self).__init__(**kwargs)
# Set properties
self.set_properties(kwargs)
# -----------------------------------------------------------------
class StandardImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
This function ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(StandardImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.frames = OrderedDict()
# The error frames
self.errors = OrderedDict()
# The masks
self.masks = OrderedDict()
# The regions
self.regions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
"""
This function ...
:return:
"""
return StandardImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
@property
def names(self):
"""
This function ...
:return:
"""
return self.frames.keys()
# ------------------------------------------------------------------------------
def add_image(self, name, image, errors=None, mask=None, regions=None, replace=False, settings=None):
"""
This function ...
:param name:
:param image:
:param errors:
:param mask:
:param regions:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Image is passed
if isinstance(image, Image):
# Get the frame
frame = image.primary
# Get errors?
# Get mask?
# Get regions?
# Frame is passed
elif isinstance(image, Frame): frame = image
# Invalid
else: raise ValueError("Invalid value for 'image': must be Frame or Image")
# Add frame
self.frames[name] = frame
# Add errors
if errors is not None: self.errors[name] = errors
# Add regions
if regions is not None: self.regions[name] = regions
# Add mask
if mask is not None: self.masks[name] = mask
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Images
if self.config.write_images: self.write_images()
# Frames
if self.config.write_frames: self.write_frames()
# Masks
if self.config.write_masks: self.write_masks()
# Regions
if self.config.write_regions: self.write_regions()
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_frames(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_masks(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def write_regions(self):
"""
This function ...
:return:
"""
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# ------------------------------------------------------------------------------
images_name = "images"
observations_name = "observations"
models_name = "models"
errors_name = "errors"
model_errors_name = "model_errors"
residuals_name = "residuals"
distributions_name = "distributions"
settings_name = "settings"
# ------------------------------------------------------------------------------
observation_name = "observation"
model_name = "model"
observation_or_model = [observation_name, model_name]
# ------------------------------------------------------------------------------
horizontal_mode, vertical_mode = "horizontal", "vertical"
default_direction = vertical_mode
directions = [horizontal_mode, vertical_mode]
# ------------------------------------------------------------------------------
class ResidualImagePlotSettings(ImagePlotSettings):
"""
This class ...
"""
def __init__(self, **kwargs):
"""
The constructor ...
"""
# Call the constructor of the base class
super(ResidualImagePlotSettings, self).__init__()
# Define properties
self.add_property("residual_amplitude", "percentage", "amplitude of the residual plots")
self.add_boolean_property("soft_residual_amplitude", "soft residual amplitude", False) #, None) # use None as default to use plotter config if not defined
self.add_property("residual_cmap", "string", "colormap for the residual plots") # no choices because can be absolute or not
# Set properties
self.set_properties(kwargs)
# ------------------------------------------------------------------------------
def plot_residuals(observations, models, **kwargs):
"""
This function ...
:param observations:
:param models:
:param kwargs:
:return:
"""
# Create the plotter
plotter = ResidualImageGridPlotter(**kwargs)
# Run the plotter
plotter.run(observations=observations, models=models)
# -----------------------------------------------------------------
class ResidualImageGridPlotter(ImageGridPlotter):
"""
This class ...
"""
def __init__(self, *args, **kwargs):
"""
The constructor ...
:param args:
:param kwargs:
"""
# Call the constructor of the base class
super(ResidualImageGridPlotter, self).__init__(*args, **kwargs)
# The image frames
self.observations = OrderedDict()
self.errors = OrderedDict()
self.models = OrderedDict()
self.model_errors = OrderedDict()
self.residuals = OrderedDict()
# The residual distributions
self.distributions = OrderedDict()
# ------------------------------------------------------------------------------
@property
def image_settings_class(self):
return ResidualImagePlotSettings
# ------------------------------------------------------------------------------
def _run(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Create the residual frames
self.create_residuals()
# Create the residual distributions
self.create_distributions()
# Show stuff
if self.config.show: self.show()
# Write
self.write()
# Plot
self.plot()
# ------------------------------------------------------------------------------
def setup(self, **kwargs):
"""
This function ...
:param kwargs:
:return:
"""
# Call the setup function of the base class
super(ResidualImageGridPlotter, self).setup(**kwargs)
# Load the images
if kwargs.get(images_name, None) is not None: self.add_images(kwargs.pop(images_name))
if kwargs.get(observations_name, None) is not None: self.add_observations(kwargs.pop(observations_name))
if kwargs.get(models_name, None) is not None: self.add_models(kwargs.pop(models_name))
if kwargs.get(errors_name, None) is not None: self.add_error_maps(kwargs.pop(errors_name))
if kwargs.get(residuals_name, None) is not None: self.add_residual_maps(kwargs.pop(residuals_name))
# Nothing added
if self.config.from_directory is not None: self.load_from_directory(self.config.from_directory)
elif not self.has_images: self.load_from_directory(self.config.path)
# Initialize the figure
self.initialize_figure()
# ------------------------------------------------------------------------------
@property
def figsize(self):
return (15,10)
# ------------------------------------------------------------------------------
@property
def horizontal(self):
return self.config.direction == horizontal_mode
# ------------------------------------------------------------------------------
@property
def vertical(self):
return self.config.direction == vertical_mode
# ------------------------------------------------------------------------------
@lazyproperty
def npanels(self):
if self.config.distributions: return 4 # observation, model, residual, distribution
else: return 3 # observation, model, residual
# ------------------------------------------------------------------------------
@lazyproperty
def nrows(self):
if self.horizontal: return self.npanels
elif self.vertical: return self.nimages
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@lazyproperty
def ncolumns(self):
if self.horizontal: return self.nimages
elif self.vertical: return self.npanels
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
@property
def share_x(self):
return True
# ------------------------------------------------------------------------------
@property
def share_y(self):
return True
# ------------------------------------------------------------------------------
def initialize_figure(self):
"""
This function ...
:return:
"""
# Debugging
log.debug("Initializing the figure with size " + str(self.figsize) + " ...")
# Create the plot
self.figure = MPLFigure(size=self.figsize)
# Create plots
#self.plots = self.figure.create_grid(self.nrows, self.ncolumns, sharex=self.share_x, sharey=self.share_y)
# Create grid
self.grid = self.figure.create_gridspec(self.nrows, self.ncolumns, hspace=0.0, wspace=0.0)
# Initialize structure to contain the plots
#print("NCOLUMNS", self.ncolumns)
#print("NROWS", self.nrows)
self.plots = [[None for i in range(self.ncolumns)] for j in range(self.nrows)]
# ------------------------------------------------------------------------------
@property
def all_names(self):
return sequences.combine_unique(self.observation_names, self.model_names, self.errors_names, self.residuals_names)
# ------------------------------------------------------------------------------
@property
def observation_names(self):
return self.observations.keys()
# ------------------------------------------------------------------------------
def has_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.observation_names
# ------------------------------------------------------------------------------
@property
def model_names(self):
return self.models.keys()
# ------------------------------------------------------------------------------
def has_model(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_names
# ------------------------------------------------------------------------------
@property
def errors_names(self):
return self.errors.keys()
# ------------------------------------------------------------------------------
def has_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.errors
# ------------------------------------------------------------------------------
@property
def model_errors_names(self):
return self.model_errors.keys()
# ------------------------------------------------------------------------------
def has_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.model_errors
# ------------------------------------------------------------------------------
@property
def residuals_names(self):
return self.residuals.keys()
# ------------------------------------------------------------------------------
def has_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.residuals
# ------------------------------------------------------------------------------
@property
def distribution_names(self):
return self.distributions.keys()
# ------------------------------------------------------------------------------
def has_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.distributions
# ------------------------------------------------------------------------------
@property
def settings_names(self):
return self.settings.keys()
# ------------------------------------------------------------------------------
def has_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return name in self.settings_names
# ------------------------------------------------------------------------------
@property
def names(self):
return self.observation_names
# ------------------------------------------------------------------------------
@property
def first_name(self):
return self.names[0]
# ------------------------------------------------------------------------------
@property
def first_observation(self):
return self.get_observation(self.first_name)
# ------------------------------------------------------------------------------
@property
def first_frame(self):
return self.first_observation
# ------------------------------------------------------------------------------
@property
def nimages(self):
return len(self.names)
# ------------------------------------------------------------------------------
@property
def has_images(self):
return self.nimages > 0
# ------------------------------------------------------------------------------
def add_image(self, name, observation, model=None, errors=None, model_errors=None, residuals=None, replace=False,
settings=None):
"""
This function ...
:param name:
:param observation:
:param model:
:param errors:
:param model_errors:
:param residuals:
:param replace:
:param settings:
:return:
"""
# Check if name already exists
if not replace and name in self.names: raise ValueError("Already an image with name '" + name + "' added")
# Check type of the image
if isinstance(observation, Image):
# Get observation frame
if observation_name in observation.frame_names: observation = observation.frames[observation_name]
else: observation = observation.primary
# Get model frame
if model_name in observation.frame_names:
if model is not None: raise ValueError("Cannot pass model frame if image contains model frame")
model = observation.frames[model_name]
# Get errors frame
if errors_name in observation.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = observation.frames[errors_name]
# Get model errors frame
if model_errors_name in observation.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if image contains model error map")
model_errors = observation.frames[model_errors_name]
# Get residuals frame
if residuals_name in observation.frame_names:
if residuals is not None: raise ValueError("Cannot pass residual map if image contains residual map")
residuals = observation.frames[residuals_name]
# Check the type of the model image
if model is not None and isinstance(model, Image):
# Get the model frame
if model_name in model.frame_names: model = model.frames[model_name]
else: model = model.primary
# Get the model errors frame
if model_errors_name in model.frame_names:
if errors_name in model.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains model error map")
model_errors = model.frames[model_errors_name]
elif errors_name in model.frame_names:
if model_errors is not None: raise ValueError("Cannot pass model error map if model image contains error map")
model_errors = model.frames[errors_name]
# Add observation
self.observations[name] = observation
# Add model
if model is not None: self.models[name] = model
# Add errors
if errors is not None: self.errors[name] = errors
# Add model errors
if model_errors is not None: self.model_errors[name] = model_errors
# Add residuals
if residuals is not None: self.residuals[name] = residuals
# Set settings
if settings is not None: self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def add_observation(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get observation frame
if observation_name in frame.frame_names: frame = frame.frames[observation_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", observation_name, errors_name]): raise ValueError("Observation image contains too many frames")
# Add observation frame
self.observations[name] = frame
# Add error map
if errors is not None: self.errors[name] = errors
# ------------------------------------------------------------------------------
def add_model(self, name, frame, errors=None):
"""
This function ...
:param name:
:param frame:
:param errors:
:return:
"""
# Check the type of the image
if isinstance(frame, Image):
# Get model frame
if model_name in frame.frame_names: frame = frame.frames[model_name]
else: frame = frame.primary
# Get error map
if errors_name in frame.frame_names:
if model_errors_name in frame.frame_names: raise ValueError("Model image contains both 'errors' and 'model_errors' frame")
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[errors_name]
elif model_errors_name in frame.frame_names:
if errors is not None: raise ValueError("Cannot pass error map if image contains error map")
errors = frame.frames[model_errors_name]
# Check whether there are no other frames
if sequences.contains_more(frame.frame_names, ["primary", model_name, errors_name, model_errors_name]): raise ValueError("Model image contains too many frames")
# Add model frame
self.models[name] = frame
# Add error map
if errors is not None: self.model_errors[name] = errors
# ------------------------------------------------------------------------------
def add_errors(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.errors[name] = frame
# ------------------------------------------------------------------------------
def add_model_errors(self, name, frame):
"""
Thisn function ...
:param name:
:param frame:
:return:
"""
# Add
self.model_errors[name] = frame
# ------------------------------------------------------------------------------
def add_residuals(self, name, frame):
"""
This function ...
:param name:
:param frame:
:return:
"""
# Add
self.residuals[name] = frame
# ------------------------------------------------------------------------------
def add_distribution(self, name, distribution):
"""
This function ...
:param name:
:param distribution:
:return:
"""
# Add
self.distributions[name] = distribution
# -----------------------------------------------------------------
def add_settings(self, name, **settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name].set_properties(settings)
# ------------------------------------------------------------------------------
def set_settings(self, name, settings):
"""
This function ...
:param name:
:param settings:
:return:
"""
# Set settings
self.settings[name] = settings
# ------------------------------------------------------------------------------
def set_setting(self, name, setting_name, value):
"""
This function ...
:param name:
:param setting_name:
:param value:
:return:
"""
# Set
self.settings[name][setting_name] = value
# ------------------------------------------------------------------------------
def add_images(self, images):
"""
This function ...
:param images:
:return:
"""
# Debugging
log.debug("Adding images ...")
# Loop over the images
for name in images:
# Get the image
image = images[name]
# Add
self.add_image(name, image)
# ------------------------------------------------------------------------------
def add_observations(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding observations ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_observation(name, frame)
# ------------------------------------------------------------------------------
def add_models(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding models ...")
# Loop over the frames
for name in frames:
# Get the frames
frame = frames[name]
# Add
self.add_model(name, frame)
# ------------------------------------------------------------------------------
def add_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_errors(name, frame)
# ------------------------------------------------------------------------------
def add_model_error_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding model error maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_model_errors(name, frame)
# ------------------------------------------------------------------------------
def add_residual_maps(self, frames):
"""
This function ...
:param frames:
:return:
"""
# Debugging
log.debug("Adding residual maps ...")
# Loop over the frames
for name in frames:
# Get the frame
frame = frames[name]
# Add
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Are there FITS files in the directory?
if fs.has_files_in_path(path, extension="fits"): self.load_images_from_directory(path)
# Are there subdirectories?
elif fs.has_directories_in_path(path):
# Determine paths
images_path = fs.join(path, images_name)
observations_path = fs.join(path, observations_name)
models_path = fs.join(path, models_name)
residuals_path = fs.join(path, residuals_name)
settings_path = fs.join(path, settings_name)
# Load observations
if fs.is_directory(images_path): self.load_images_from_directory(path)
if fs.is_directory(observations_path): self.load_observations_from_directory(path)
if fs.is_directory(models_path): self.load_models_from_directory(path)
if fs.is_directory(residuals_path): self.load_residuals_from_directory(path)
if fs.is_directory(settings_path): self.load_settings_from_directory(path)
# No FITS files nor subdirectories
else: raise IOError("No image files nor subdirectories found in '" + path + "'")
# ------------------------------------------------------------------------------
def load_images_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading image files from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading '" + name + "' image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Add the image
self.add_image(name, image)
# ------------------------------------------------------------------------------
def load_observations_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading observed image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' observed image ...")
# Get header
#header = get_header(filepath)
# Get the filter
#fltr = get_filter(name, header=header)
# Check whether the filter is in the list of filters to be plotted
#if fltr not in config.filters: continue
# Get the index for this filter
#index = config.filters.index(fltr)
# Load the image
#frame = Frame.from_file(filepath)
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_observation(name, image)
# ------------------------------------------------------------------------------
def load_models_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading model image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "name"]):
# Debugging
log.debug("Loading the '" + name + "' model image ...")
# Load the image
image = Image.from_file(filepath, always_call_first_primary=False)
# Replace zeroes and negatives
image.primary.replace_zeroes_by_nans()
image.primary.replace_negatives_by_nans()
# Add the image
self.add_model(name, image)
# ------------------------------------------------------------------------------
def load_residuals_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading residual image frames from '" + path + "' ...")
# Loop over the FITS files
for name, filepath in fs.files_in_path(path, extension="fits", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' residual map ...")
# Load the frame
frame = Frame.from_file(filepath)
# Add the map
self.add_residuals(name, frame)
# ------------------------------------------------------------------------------
def load_settings_from_directory(self, path):
"""
This function ...
:param path:
:return:
"""
# Debugging
log.debug("Loading plotting settings from '" + path + "' ...")
# Loop over the dat files
for name, filepath in fs.files_in_path(path, extension="dat", returns=["name", "path"]):
# Debugging
log.debug("Loading the '" + name + "' settings ...")
# Load the settings
settings = ImagePlotSettings.from_file(filepath)
# Set the settings
self.set_settings(name, settings)
# ------------------------------------------------------------------------------
def get_observation_or_model(self, name):
"""
This function ...
:param name:
:return:
"""
if self.has_observation(name): return self.get_observation(name)
elif self.has_model(name): return self.get_model(name)
else: raise ValueError("Doesn't have observation or model for name '" + name + "'")
# ------------------------------------------------------------------------------
def get_filter(self, name):
"""
This function ...
:param name:
:return:
"""
return self.get_observation_or_model(name).filter
# ------------------------------------------------------------------------------
def get_wcs(self, name):
"""
Thisf unction ...
:param name:
:return:
"""
return self.get_observation_or_model(name).wcs
# ------------------------------------------------------------------------------
def calculate_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
# Get the frames
#observation = self.observations[name]
#model = self.models[name]
# Uniformize
observation, model = uniformize(self.observations[name], self.models[name])
# Error-weighed residuals
if self.config.weighed:
if self.config.weighing_reference == observation_name:
if not self.has_errors(name): raise ValueError("No errors for the '" + name + "' image")
errors = self.get_errors(name)
elif self.config.weighing_reference == model_name:
if not self.has_model_errors(name): raise ValueError("No model errors for the '" + name + "' image")
errors = self.get_model_errors(name)
else: raise ValueError("Invalid value for 'weighing_reference'")
# Calculate
res = Frame((model - observation) / errors, wcs=observation.wcs)
# Relative residuals
elif self.config.relative: res = Frame((model - observation) / observation, wcs=observation.wcs)
# Absolute residuals
else: res = Frame(model - observation, wcs=observation.wcs)
# Take absolute values?
if self.config.absolute: res = res.absolute
# Return the residual
return res
# ------------------------------------------------------------------------------
def create_residuals(self):
"""
This function ...
:param self:
:return:
"""
# Inform the user
log.info("Creating the residual frames ...")
# Loop over the observed images
for name in self.names:
# Checks
if not self.has_model(name): continue
if self.has_residuals(name): continue
# Debugging
log.debug("Creating residual frame for the '" + name + "' image ...")
# Create
res = self.calculate_residuals(name)
# Add the residuals frame
self.residuals[name] = res
# ------------------------------------------------------------------------------
def create_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating the residual distributions ...")
# Loop over the residual maps
for name in self.residuals_names:
# Checks
if self.has_distribution(name): continue
# Debugging
log.debug("Creating distribution for the '" + name + "' residuals ...")
# Get the residual map
residuals = self.get_residuals(name)
# Create the distribution
distribution = Distribution.from_data("Residual", residuals, sigma_clip=self.config.sigma_clip_distributions, sigma_level=self.config.sigma_clip_level)
# Add the distribution
self.distributions[name] = distribution
# ------------------------------------------------------------------------------
def get_observation(self, name):
"""
This function ...
:param name:
:return:
"""
return self.observations[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_observation_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add observation frame
image.add_frame(self.get_observation(name), observation_name)
# Add error map
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_model(self, name):
"""
This function ...
:param name:
:return:
"""
return self.models[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_model_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create image
image = Image(name=name)
# Add model frame
image.add_frame(self.get_model(name), model_name)
# Add error map
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), errors_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.errors[name]
# ------------------------------------------------------------------------------
def get_model_errors(self, name):
"""
This function ...
:param name:
:return:
"""
return self.model_errors[name]
# ------------------------------------------------------------------------------
def get_residuals(self, name):
"""
This function ...
:param name:
:return:
"""
return self.residuals[name]
# ------------------------------------------------------------------------------
def get_distribution(self, name):
"""
This function ...
:param name:
:return:
"""
return self.distributions[name]
# ------------------------------------------------------------------------------
@memoize_method
def get_image(self, name):
"""
This function ...
:param name:
:return:
"""
# Create the image
image = Image(name=name)
# Add the observation
if self.has_observation(name): image.add_frame(self.get_observation(name), observation_name)
# Add the model
if self.has_model(name): image.add_frame(self.get_model(name), model_name)
# Add the errors
if self.has_errors(name): image.add_frame(self.get_errors(name), errors_name)
# Add the model errors
if self.has_model_errors(name): image.add_frame(self.get_model_errors(name), model_errors_name)
# Add the residuals
if self.has_residuals(name): image.add_frame(self.get_residuals(name), residuals_name)
# Return the image
return image
# ------------------------------------------------------------------------------
def get_settings(self, name):
"""
This function ...
:param name:
:return:
"""
return self.settings[name]
# ------------------------------------------------------------------------------
def show(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Showing ...")
# ------------------------------------------------------------------------------
def write(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing ...")
# Write observations
if self.config.write_observations: self.write_observations()
# Write models
if self.config.write_models: self.write_models()
# Write residual frames
if self.config.write_residuals: self.write_residuals()
# Write the images
if self.config.write_images: self.write_images()
# Write the distributions
if self.config.write_distributions: self.write_distributions()
# Write the settings
if self.config.write_settings: self.write_settings()
# ------------------------------------------------------------------------------
@lazyproperty
def images_path(self):
return self.output_path_directory(images_name)
# ------------------------------------------------------------------------------
@lazyproperty
def observations_path(self):
return self.output_path_directory(observations_name)
# ------------------------------------------------------------------------------
@lazyproperty
def models_path(self):
return self.output_path_directory(models_name)
# ------------------------------------------------------------------------------
@lazyproperty
def residuals_path(self):
return self.output_path_directory(residuals_name)
# ------------------------------------------------------------------------------
@lazyproperty
def distributions_path(self):
return self.output_path_directory(distributions_name)
# ------------------------------------------------------------------------------
@lazyproperty
def settings_path(self):
return self.output_path_directory(settings_name)
# ------------------------------------------------------------------------------
def write_images(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the images ...")
# Loop over all images
for name in self.all_names:
# Determine path
path = fs.join(self.images_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' image ...")
# Get image
image = self.get_image(name)
# Save the image
image.saveto(path)
# ------------------------------------------------------------------------------
def write_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the observed frames ...")
# Loop over the observed images
for name in self.observation_names:
# Determine the path
path = fs.join(self.observations_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' observed image ...")
# Get the frame
frame = self.get_observation_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the model frames ...")
# Loop over the model images
for name in self.model_names:
# Determine the path
path = fs.join(self.models_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' model image ...")
# Get the frame
frame = self.get_model_image(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual frames ...")
# Loop over the residual maps
for name in self.residuals_names:
# Determine the path
path = fs.join(self.residuals_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual frame ...")
# Get the residual map
frame = self.get_residuals(name)
# Save the frame
frame.saveto(path)
# ------------------------------------------------------------------------------
def write_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the residual distributions ...")
# Loop over the distributions
for name in self.distribution_names:
# Determine the path
path = fs.join(self.distributions_path, name + ".fits")
# Debugging
log.debug("Writing the '" + name + "' residual distribution ...")
# Get the distribution
distribution = self.get_distribution(name)
# Save
distribution.saveto(path)
# ------------------------------------------------------------------------------
def write_settings(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Writing the plotting settings ...")
# Loop over the settings
for name in self.settings_names:
# Determine the path
path = fs.join(self.settings_path, name + ".dat")
# Debugging
log.debug("Writing the '" + name + "' plotting settings ...")
# Get the settings
settings = self.get_settings(name)
# Save
settings.saveto(path)
# ------------------------------------------------------------------------------
def plot(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting ...")
# Plot observations
self.plot_observations()
# Plot models
self.plot_models()
# Plot residuals
self.plot_residuals()
# Plot distributions
if self.config.distributions: self.plot_distributions()
# Finish the plot
self.finish()
# ------------------------------------------------------------------------------
def get_label(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return name
# Get the settings
settings = self.get_settings(name)
# Return
if settings.label is not None: return settings.label
else: return name
# ------------------------------------------------------------------------------
def get_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings?
if not self.has_settings(name): return self.config.cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.cmap is not None: return settings.cmap
else: return self.config.cmap
# ------------------------------------------------------------------------------
@property
def config_residual_cmap(self):
"""
This function ...
:return:
"""
if self.config.absolute: return self.config.absolute_residual_cmap
else: return self.config.residual_cmap
# ------------------------------------------------------------------------------
def get_residual_colormap(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config_residual_cmap
# Get the settings
settings = self.get_settings(name)
# Return
if settings.residual_cmap is not None: return settings.residual_cmap
else: return self.config_residual_cmap
# ------------------------------------------------------------------------------
def get_limits(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.vmin, self.config.vmax, False, False
# Get the settings
settings = self.get_settings(name)
# Get limits
vmin = settings.vmin if settings.vmin is not None else self.config.vmin
vmax = settings.vmax if settings.vmax is not None else self.config.vmax
# Get flags
soft_vmin = settings.soft_vmin if settings.vmin is not None else False # don't use True flag if vmin is not set in settings
soft_vmax = settings.soft_vmax if settings.vmax is not None else False # don't use True flag if vmax is not set in settings
# Return
return vmin, vmax, soft_vmin, soft_vmax
# ------------------------------------------------------------------------------
def get_residual_amplitude(self, name):
"""
This function ...
:param name:
:return:
"""
# No settings
if not self.has_settings(name): return self.config.residual_amplitude, False
# Get the settings
settings = self.get_settings(name)
# Get amplitude
amplitude = settings.residual_amplitude if settings.residual_amplitude is not None else self.config.residual_amplitude
# Get flag
soft_amplitude = settings.soft_residual_amplitude if settings.residual_amplitude is not None else False # don't use True flag if amplitude is not set in settings
# Return
return amplitude, soft_amplitude
# ------------------------------------------------------------------------------
def set_limits(self, name, vmin, vmax, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param name:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Set vmin and vmax
self.add_settings(name, vmin=vmin, vmax=vmax)
# Set flags
if soft_vmin is not None: self.set_setting(name, "soft_vmin", soft_vmin)
if soft_vmax is not None: self.set_setting(name, "soft_vmax", soft_vmax)
# ------------------------------------------------------------------------------
def get_vmin_vmax(self, frame, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Defined?
has_vmin = vmin is not None
has_vmax = vmax is not None
# Vmin and vmax don't have to be calculated
if has_vmin and has_vmax and (not soft_vmin) and (not soft_vmax): return vmin, vmax
# Calculate vmin and or vmax
return get_vmin_vmax(frame.data, interval=self.config.interval, zmin=vmin, zmax=vmax, soft_zmin=soft_vmin, soft_zmax=soft_vmax)
# ------------------------------------------------------------------------------
def get_residual_vmin_vmax(self, frame, amplitude=None, soft_amplitude=False):
"""
This function ...
:param frame:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Defined?
if amplitude is not None and not soft_amplitude:
if self.config.absolute: return 0., amplitude
else: return -amplitude, amplitude
# Calculate vmin and or vmax
if self.config.absolute: return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=0, zmax=amplitude, soft_zmin=False, soft_zmax=soft_amplitude)
else:
zmin = -amplitude if amplitude is not None else None
zmax = amplitude
return get_vmin_vmax(frame.data, interval=self.config.residual_interval, zmin=zmin, zmax=zmax, soft_zmin=soft_amplitude, soft_zmax=soft_amplitude, around_zero=True, symmetric=True)
# ------------------------------------------------------------------------------
def get_observation_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 0
if self.horizontal: return 0, index
# Vertical
#elif self.vertical: return 0, index
elif self.vertical: return index, 0
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_model_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 1
if self.horizontal: return 1, index
# Vertical
#elif self.vertical: return 1, index
elif self.vertical: return index, 1
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_residuals_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 2
if self.horizontal: return 2, index
# Vertical
#elif self.vertical: return 2, index
elif self.vertical: return index, 2
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_distribution_row_col(self, index):
"""
This function ...
:param index:
:return:
"""
# Horizontal
#if self.horizontal: return index, 3
if self.horizontal: return 3, index
# Vertical
#elif self.vertical: return 3, index
elif self.vertical: return index, 3
# Invalid
else: raise ValueError("Invalid direction")
# ------------------------------------------------------------------------------
def get_observation_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_observation_row_col(index)
#print(self.grid.get_geometry())
#print(self.grid.get_height_ratios())
# Return the grid spec
#if return_row_col: return self.grid[row, col], row, col
#else: return self.grid[row, col]
#if return_row_col: return self.grid[index], row, col
#else: return self.grid[index]
# No, no, this was a mistake with 'get_observation_row_col'
#if return_row_col: return self.grid[col, row], row, col # WHY?
#else: return self.grid[col, row] # WHY?
# This was right after all
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_model_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_model_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_residuals_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_residuals_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def get_distribution_spec(self, index, return_row_col=False):
"""
This function ...
:param index:
:param return_row_col:
:return:
"""
# Get row and col
row, col = self.get_distribution_row_col(index)
# Return the grid spec
if return_row_col: return self.grid[row, col], row, col
else: return self.grid[row, col]
# ------------------------------------------------------------------------------
def create_observation_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_observation_spec(index, return_row_col=True)
#print(spec)
#print("ROW", row, "COL", col)
# Get coordinates of the subplot
#points = spec.get_position(self.figure.figure).get_points()
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
# needs [xmin, ymin, dx, dy]
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_model_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_model_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def create_residuals_plot(self, index, frame):
"""
This function ...
:param index:
:param frame:
:return:
"""
# Get the subplot spec
spec, row, col = self.get_residuals_spec(index, return_row_col=True)
bbox = spec.get_position(self.figure.figure)
coordinates = [bbox.x0, bbox.y0, bbox.width, bbox.height]
# Create the plot
plot = aplpy.FITSFigure(frame.to_hdu(), figure=self.figure.figure, subplot=coordinates)
# Add the plot
self.plots[row][col] = plot
# Return the plot
return plot
# ------------------------------------------------------------------------------
def _plot_observation(self, index, frame, cmap, label=None, vmin=None, vmax=None, soft_vmin=False, soft_vmax=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param label:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_observation_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color or frame
plot.frame.set_color(self.frame_color)
# FOR FIRST
#f1._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#f1._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# Tick settings
plot._ax2.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax2.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# Set image background color
plot.set_nan_color(self.nan_color)
# FOR FIRST
#f1._ax1.scatter(ra, dec, marker='.', label='Observation')
# FOR FIRST
#legend1 = f1._ax1.legend(loc='upper right', fontsize=12, fancybox=True, framealpha=0, numpoints=None)
#plt.setp(legend1.get_texts(), color=config.text_color_in)
# Set title
if label is not None: plot._ax1.set_title(label, fontsize=self.config.label_fontsize)
# Return the vmin and vmax
return vmin, vmax
# ------------------------------------------------------------------------------
def _plot_model(self, index, frame, cmap, vmin=None, vmax=None, soft_vmin=None, soft_vmax=None):
"""
This function ...
:param index:
:param frame:
:param vmin:
:param vmax:
:param soft_vmin:
:param soft_vmax:
:return:
"""
# Create the plot
plot = self.create_model_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_vmin_vmax(frame, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap, stretch=self.config.scale)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
#f6._ax1.scatter(ra, dec, marker='.', label='Model')
#legend6 = f6._ax1.legend(loc='upper right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend6.get_texts(), color=config.text_color_in)
# Set image background color
plot.set_nan_color(self.nan_color)
# ------------------------------------------------------------------------------
def _plot_residuals(self, index, frame, cmap, amplitude=None, soft_amplitude=False):
"""
This function ...
:param index:
:param frame:
:param cmap:
:param amplitude:
:param soft_amplitude:
:return:
"""
# Create the plot
plot = self.create_residuals_plot(index, frame)
# Get vmin and vmax
vmin, vmax = self.get_residual_vmin_vmax(frame, amplitude=amplitude, soft_amplitude=soft_amplitude)
# Set colorscale
plot.show_colorscale(vmin=vmin, vmax=vmax, cmap=cmap)
# Set tick label font
plot.tick_labels.set_font(size='small')
# Set center, radius and spacing
plot.recenter(self.ra_center_deg, self.dec_center_deg, radius=self.radius_deg)
plot.ticks.set_xspacing(self.spacing_deg)
# Set color for frame
plot.frame.set_color(self.frame_color)
# Set ticks
plot._ax1.tick_params(direction='in', which='major', length=self.config.major_tick_length, top=True, right=True, bottom=True, left=True)
plot._ax1.tick_params(direction='in', which='minor', length=self.config.minor_tick_length, top=True, right=True, bottom=True, left=True)
# FOR FIRST
# f11._ax1.scatter(ra, dec, marker='.', label='Relative \nResidual')
# FOR FIRST
# Set legend
#legend11 = f11._ax1.legend(loc='lower right', fontsize=12, fancybox=False, framealpha=0, numpoints=None)
#plt.setp(legend11.get_texts(), color=config.text_color_in)
# Set background color
plot.set_nan_color(self.background_color)
# ------------------------------------------------------------------------------
def _plot_distribution(self, index, distribution):
"""
This function ...
:param index:
:param distribution:
:return:
"""
pass
# ------------------------------------------------------------------------------
def plot_observations(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the observed image frames ...")
# Loop over the names
#print(self.names)
#print(self.nimages)
#print(len(self.names))
for index, name in enumerate(self.names):
# Debugging
log.debug("Plotting the observed frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the observation
frame = self.get_observation(name)
# Get the label for this image
label = self.get_label(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
vmin, vmax = self._plot_observation(index, frame, cmap, label=label, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# Set new vmin and vmax (for corresponding model)
self.set_limits(name, vmin, vmax, soft_vmin=False, soft_vmax=False)
# ------------------------------------------------------------------------------
def plot_models(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the model image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_model(name): continue
# Debugging
log.debug("Plotting the model frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the model
frame = self.get_model(name)
# Get the colormap for this image
cmap = self.get_colormap(name)
# Get the limits
vmin, vmax, soft_vmin, soft_vmax = self.get_limits(name)
# Plot
self._plot_model(index, frame, cmap, vmin=vmin, vmax=vmax, soft_vmin=soft_vmin, soft_vmax=soft_vmax)
# ------------------------------------------------------------------------------
def plot_residuals(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual image frames ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_residuals(name): continue
# Debugging
log.debug("Plotting the residuals frame of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + ") ...")
# Get the residuals
frame = self.get_residuals(name)
# Get the colormap for this residual map
cmap = self.get_residual_colormap(name)
# Get the amplitude
amplitude, soft_amplitude = self.get_residual_amplitude(name)
# Plot
# index, frame, cmap, amplitude=None, soft_amplitude=False
self._plot_residuals(index, frame, cmap, amplitude=amplitude, soft_amplitude=soft_amplitude)
# ------------------------------------------------------------------------------
def plot_distributions(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Plotting the residual distributions ...")
# Loop over the names
for index, name in enumerate(self.names):
# Check
if not self.has_distribution(name): continue
# Debugging
log.debug("Plotting the residual distribution of the '" + name + "' image (panel " + str(index+1) + " of " + str(self.nimages) + " ) ...")
# Get the distribution
distribution = self.get_distribution(name)
# ------------------------------------------------------------------------------
def finish(self):
"""
This function ...
:param self:
:return:
"""
# Draw
self.figure.draw()
# Save to file
if self.config.path is not None: self.figure.figure.savefig(self.config.path, dpi=self.config.dpi)
# Show
else: plt.show()
# Close
#plt.close(fig)
plt.close()
# ------------------------------------------------------------------------------
def plot_images_aplpy(frames, filepath=None, center=None, radius=None, xy_ratio=None, dark=False, scale="log",
colormap="inferno", nrows=None, ncols=None, orientation="horizontal", plotsize=3., distance=None,
share_scale=None, descriptions=None, minmax_scaling=0.5):
"""
This function ...
:param frames:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param colormap:
:param nrows:
:param ncols:
:param orientation:
:param plotsize:
:param distance:
:param share_scale:
:param descriptions:
:param minmax_scaling: 0.5
:return:
"""
import matplotlib.gridspec as gridspec
#from matplotlib.colorbar import ColorbarBase
#from matplotlib.colors import LinearSegmentedColormap
#from matplotlib.colors import Normalize
from pts.magic.tools import plotting
# Set
set_theme(dark=dark)
nimages = len(frames)
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
#print("plotsize", xsize, ysize)
# Determine the number of columns and rows
if nrows is None and ncols is None:
if orientation == "horizontal": ncols, nrows = nimages, 1
elif orientation == "vertical": ncols, nrows = 1, nimages
else: raise ValueError("Invalid orientation: '" + orientation + "'")
# Nrows is none but ncols is not
elif nrows is None: ncols = numbers.round_up_to_int(nimages/nrows)
# Ncols is none but nrows is not
elif ncols is None: nrows = numbers.round_up_to_int(nimages/ncols)
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figsize", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nrows, ncols) # nimages ROWS, 4 COLUMNS
# gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Get frame labels
if types.is_dictionary(frames):
labels = frames.keys()
frames = frames.values()
else: labels = [frame.filter_name for frame in frames]
# Set scale for each image
scales = dict()
if types.is_string_type(scale):
for label in labels: scales[label] = scale
elif types.is_sequence(scale):
for label, scalei in zip(labels, scale): scales[label] = scalei
elif types.is_dictionary(scale): scales = scale
else: raise ValueError("Invalid type for 'scale'")
# Initialize dict for intervals
intervals = dict()
# Set descriptions
if descriptions is None:
descriptions = dict()
for label in labels: descriptions[label] = None
elif types.is_sequence(descriptions):
descrpts = descriptions
descriptions = dict()
for label, descr in zip(labels, descrpts): descriptions[label] = descr
elif types.is_dictionary(descriptions): pass # OK
else: raise ValueError("Invalid type for 'descriptions'")
# Set minmax scaling
if types.is_real_type(minmax_scaling):
factor = minmax_scaling
minmax_scaling = dict()
for label in labels: minmax_scaling[label] = factor
elif types.is_dictionary(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label in labels:
if label in minmax_scaling_orig: minmax_scaling[label] = minmax_scaling_orig[label]
else: minmax_scaling[label] = 0.5
elif types.is_sequence(minmax_scaling):
minmax_scaling_orig = minmax_scaling
minmax_scaling = dict()
for label, factor in zip(labels, minmax_scaling_orig): minmax_scaling[label] = factor
else: raise ValueError("Invalid type for 'minmax_scaling'")
# Loop over the frames
for label, frame, index in zip(labels, frames, range(nimages)):
rowi = index // ncols
coli = index % ncols
is_first_row = rowi == 0
is_last_row = rowi == nrows - 1
is_first_col = coli == 0
is_last_col = coli == ncols - 1
#print("row", rowi)
#print("col", coli)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the '" + label + "' image ...")
# Get HDU
hdu = frame.to_hdu()
# Get interval
if share_scale is not None and label in share_scale:
share_with = share_scale[label]
vmin, vmax = intervals[share_with]
scalei = scales[share_with]
else:
# Get scale
scalei = scales[label]
is_logscale = scalei == "log"
#print(label, minmax_scaling[label])
vmin, vmax = plotting.get_vmin_vmax(frame.data, logscale=is_logscale, minmax_scaling=minmax_scaling[label])
# Set interval
intervals[label] = (vmin, vmax,)
# Set title
if descriptions[label] is not None: title = descriptions[label]
else: title = label.replace("_", "\_").replace("um", "$\mu$m")
# Has sky coordinate system?
has_wcs = frame.has_wcs and frame.wcs.is_sky
# OBSERVATION
figi = aplpy.FITSFigure(hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(figi, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scalei, has_wcs=has_wcs)
set_ticks(figi, is_first_row, is_last_row)
# FIRST COLUMN
if is_first_col:
figi.tick_labels.show_y()
figi.axis_labels.show_y()
# LAST ROW
if is_last_row:
figi.tick_labels.show_x()
figi.axis_labels.show_x()
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset
reset_theme()
# ------------------------------------------------------------------------------
def plot_one_residual_aplpy(observation, model, residual=None, path=None, scale="log", plotsize=3., dark=False,
center=None, radius=None, xy_ratio=None, first_label="Observation", second_label="Model",
residual_label="Residual", filter_label=True):
"""
This function ...
:param observation:
:param model:
:param residual:
:param path:
:param scale:
:param plotsize:
:param dark:
:param center:
:param radius:
:param xy_ratio:
:param first_label:
:param second_label:
:param residual_label:
:param filter_label:
:return:
"""
# Make residual?
if residual is None: residual = (model - observation) / observation
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
import matplotlib.gridspec as gridspec
from pts.magic.tools import plotting
# Set theme
set_theme(dark=dark)
nrows = 1
ncols = 3
xsize = plotsize
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
# Create figure with appropriate size
#fig = plt.figure(figsize=(figxsize, figysize))
figure = MPLFigure(size=(figxsize,figysize))
# Create grid
gs1 = gridspec.GridSpec(1, 4) # nimages ROWS, 4 COLUMNS
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Percentual residuals
residual = residual * 100.
# Set title
if filter_label and observation.has_filter: title = str(observation.filter).replace("um", " $\mu$m")
else: title = first_label
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
# Get interval
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale, has_wcs=observation.has_celestial_wcs)
set_ticks(fig1, True, True)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig1.tick_labels.show_x()
fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label=second_label, center=center, radius=radius, scale=scale, has_wcs=model.has_celestial_wcs)
set_ticks(fig2, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig2.tick_labels.show_x()
fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=figure.figure, subplot=list(gs1[plot_idx].get_position(figure.figure).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label=residual_label + ' (\%)', center=center, radius=radius, has_wcs=residual.has_celestial_wcs)
set_ticks(fig3, True, True)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
fig3.tick_labels.show_x()
fig3.axis_labels.show_x()
# Show or save
if path is None: figure.show()
else: figure.saveto(path)
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def plot_residuals_aplpy(observations, models, residuals, filepath=None, center=None, radius=None, xy_ratio=None,
dark=False, scale="log", plotsize=3., distance=None, mask_simulated=False, masks=None):
"""
This function ...
:param observations:
:param models:
:param residuals:
:param filepath:
:param center:
:param radius:
:param xy_ratio:
:param dark:
:param scale:
:param plotsize:
:param distance:
:param mask_simulated:
:param masks: if passed, both observations, models and residuals are masked
:return:
"""
import numpy as np
import matplotlib.gridspec as gridspec
from matplotlib.colorbar import ColorbarBase
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import Normalize
import seaborn as sns
# Set theme
set_theme(dark=dark)
nimages = len(observations)
ncols = 4
nrows = nimages
# Colormaps
colormap = "inferno"
residual_colormap = "RdBu"
# Set individual map plot size
xsize = plotsize
#if xy_ratio is None: ysize = 3.5
#else: ysize = xsize / xy_ratio
#print("individual size", xsize, ysize)
if xy_ratio is None: xy_ratio = 0.85
ysize = xsize / xy_ratio
# Set figure size
figxsize = xsize * ncols
figysize = ysize * nrows
#print("figure size", figxsize, figysize)
# Create figure with appropriate size
fig = plt.figure(figsize=(figxsize, figysize))
# Create grid
gs1 = gridspec.GridSpec(nimages, 4) # nimages ROWS, 4 COLUMNS
#gs1.update(wspace=0.01, hspace=0.3)
gs1.update(wspace=0., hspace=0.)
plot_idx = 0
# Loop over the filters
if masks is None: masks = [None] * nimages
for observation, model, residual, mask, index in zip(observations, models, residuals, masks, range(nimages)):
#print("units:")
#print(observation.unit)
#print(model.unit)
observation.convert_to("mJy/sr", distance=distance)
model.convert_to("mJy/sr", distance=distance)
# MASK MODEL
if mask_simulated:
model.rebin(observation.wcs)
model.apply_mask_nans(observation.nans)
# MASK ALL?
if mask is not None:
observation.apply_mask_nans(mask)
model.apply_mask_nans(mask)
residual.apply_mask_nans(mask)
# IS FIRST OR LAST IMAGE?
is_first = index == 0
is_last = index == nimages - 1
# Debugging
log.debug("Plotting the observation, model and residuals for the " + str(observation.filter) + " filter ...")
# Percentual residuals
residual = residual * 100.
# Set title
title = str(observation.filter).replace("um", " $\mu$m")
# Create HDU's for Aplpy
observation_hdu = observation.to_hdu()
model_hdu = model.to_hdu()
residual_hdu = residual.to_hdu()
from pts.magic.tools import plotting
vmin, vmax = plotting.get_vmin_vmax(observation.data, logscale=scale=="log")
#vmax = 0.7 * vmax
#print("VMIN", vmin)
#print("VMAX", vmax)
# ------------------------------------------------------------------------------
# Plot obs, model and residual
# ------------------------------------------------------------------------------
# OBSERVATION
fig1 = aplpy.FITSFigure(observation_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig1, colormap, vmin=vmin, vmax=vmax, label=r'' + str(title), center=center, radius=radius, scale=scale)
set_ticks(fig1, is_first, is_last)
# Enable y ticks and axis labels BECAUSE OBSERVATION IS THE FIRST COLUMN
fig1.tick_labels.show_y()
fig1.axis_labels.show_y()
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig1.tick_labels.show_x()
if is_last: fig1.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# MODEL
fig2 = aplpy.FITSFigure(model_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig2, colormap, vmin=vmin, vmax=vmax, label='Model', center=center, radius=radius, scale=scale)
set_ticks(fig2, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig2.tick_labels.show_x()
if is_last: fig2.axis_labels.show_x()
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# RESIDUAL
fig3 = aplpy.FITSFigure(residual_hdu, figure=fig, subplot=list(gs1[plot_idx].get_position(fig).bounds))
setup_map_plot(fig3, residual_colormap, vmin=-100, vmax=100, label='Residual (\%)', center=center, radius=radius)
set_ticks(fig3, is_first, is_last)
# SHOW THE X TICK LABELS AND AXIS LABELS ONLY IF LAST ROW
if is_last: fig3.tick_labels.show_x()
if is_last: fig3.axis_labels.show_x()
# ------------------------------------------------------------------------------
# COLORBAR
colorbar_start_x = gs1[plot_idx].get_position(fig).bounds[0] + 0.025
colorbar_start_y = gs1[plot_idx].get_position(fig).bounds[1] + 0.085 / (nimages)
colorbar_x_width = gs1[plot_idx].get_position(fig).bounds[2] - 0.05
colorbar_y_height = gs1[plot_idx].get_position(fig).bounds[3]
cb_ax = fig.add_axes([colorbar_start_x, colorbar_start_y, colorbar_x_width, (0.02 + 0.002) / (nimages + 1)])
# Colourbar
cb = ColorbarBase(cb_ax, cmap=residual_colormap, norm=Normalize(vmin=-100, vmax=100), orientation='horizontal')
cb.ax.xaxis.set_ticks_position('bottom')
cb.ax.xaxis.set_label_position('bottom')
cb.ax.zorder = 99
cb.ax.xaxis.set_tick_params(color='white')
cb.outline.set_edgecolor('white')
plt.setp(plt.getp(cb.ax.axes, 'yticklabels'), color='white')
plt.setp(plt.getp(cb.ax.axes, 'xticklabels'), color='white')
cb.set_ticks([-100, -50, 0, 50, 100])
# Increment
plot_idx += 1
# ------------------------------------------------------------------------------
# KDE Plot of residuals
residual = residual_hdu.data
fig4 = plt.subplot(gs1[plot_idx])
residuals_to_kde = np.where((residual <= 200) & (residual >= -200))
if dark:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='white', shade=True)
fig4.axes.set_facecolor("black")
else:
sns.kdeplot(residual[residuals_to_kde], bw='silverman', c='k', shade=True)
fig4.axes.set_facecolor("white")
fig4.tick_params(labelleft='off')
plt.xlim([-150, 150])
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
fig4.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=True, left=False)
# Hide tick labels except for the last (bottom) plot
if not is_last: fig4.tick_params(labelbottom=False)
if dark: plt.axvline(0, c='white', ls='--', lw=2)
else: plt.axvline(0, c='k', ls='--', lw=2)
# Label for kde
plt.xlabel('Residual (\%)')
# Increment
plot_idx += 1
# Save the figure
if filepath is not None: plt.savefig(filepath, bbox_inches='tight', dpi=300)
else: plt.show()
# Close
plt.close()
# Reset theme
reset_theme()
# ------------------------------------------------------------------------------
def setup_map_plot(figure, colormap, vmin, vmax, label, smooth=None,text_x=0.05, text_y=0.95, center=None,
radius=None, scale="linear", has_wcs=True):
"""
This function ...
:param figure:
:param colormap:
:param vmin:
:param vmax:
:param label:
:param smooth:
:param text_x:
:param text_y:
:param center:
:param radius:
:param scale:
:param has_wcs:
:return:
"""
figure.show_colorscale(cmap=colormap, vmin=vmin, vmax=vmax, smooth=smooth, stretch=scale)
#figure.set_tick_labels_format(xformat='hh:mm:ss',yformat='dd:mm:ss')
if has_wcs:
figure.tick_labels.set_xformat('hh:mm:ss')
figure.tick_labels.set_yformat('dd:mm:ss')
figure._ax1.set_facecolor('black')
figure.set_nan_color('black')
# RECENTER
if center is not None:
if radius is None: raise ValueError("Cannot specify center without radius")
if has_wcs: figure.recenter(center.ra.to("deg").value, center.dec.to("deg").value, radius=radius.to("deg").value)
else: figure.recenter(center.x, center.y, radius=radius)
# Hide axes labels and tick labels by default (enable for y for first column and for x for last row)
figure.axis_labels.hide()
figure.tick_labels.hide()
# Axes spines
figure._ax1.spines['bottom'].set_color('white')
figure._ax1.spines['top'].set_color('white')
figure._ax1.spines["left"].set_color("white")
figure._ax1.spines["right"].set_color("white")
# TICKS
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax2.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# SET LABEL
figure.add_label(text_x, text_y, r'' + str(label), relative=True, size=13, weight='bold', color='white',
horizontalalignment='left', verticalalignment='top',
bbox=dict(facecolor='black', edgecolor='none', alpha=0.5))
# ------------------------------------------------------------------------------
def set_ticks(figure, is_first_row, is_last_row):
"""
This function ...
:param figure:
:param is_first_row:
:param is_last_row:
:return:
"""
# ONLY ROW?
is_only_row = is_first_row and is_last_row
# ONLY
if is_only_row:
# IN EVERYWHERE
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# FIRST
elif is_first_row:
# LEFT, RIGHT AND TOP
figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=False, left=True)
# LAST
elif is_last_row:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# BOTTOM, LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=True, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=True, bottom=True, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=True, bottom=True, left=True)
# In between
else:
# TOP
figure._ax1.tick_params(direction='inout', which='major', length=14, top=True, right=False, bottom=False, left=False)
figure._ax1.tick_params(direction='inout', which='minor', length=8, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='out', which='minor', length=4, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=True, right=False, bottom=False, left=False)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=True, right=False, bottom=False, left=False)
# LEFT AND RIGHT
figure._ax1.tick_params(direction='in', which='major', length=7, right=True, bottom=False, left=True)
figure._ax1.tick_params(direction='in', which='minor', length=4, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='major', length=7, top=False, right=True, bottom=False, left=True)
#figure._ax1.tick_params(direction='in', which='minor', length=4, top=False, right=True, bottom=False, left=True)
# ------------------------------------------------------------------------------
def set_theme(dark=False):
"""
This function ...
:param dark:
:return:
"""
# General settings
plt.rcParams["axes.labelsize"] = 14 # 16 #default 20
plt.rcParams["xtick.labelsize"] = 8 # 10 #default 16
plt.rcParams["ytick.labelsize"] = 8 # 10 #default 16
plt.rcParams["legend.fontsize"] = 14 # 10 #default 14
plt.rcParams["legend.markerscale"] = 0
plt.rcParams["lines.markersize"] = 2.5 # 4 #default 4
plt.rcParams["axes.linewidth"] = 1
# Colors
if dark:
plt.rcParams['axes.facecolor'] = 'black'
plt.rcParams['savefig.facecolor'] = 'black'
plt.rcParams['axes.edgecolor'] = 'white'
plt.rcParams['xtick.color'] = 'white'
plt.rcParams['ytick.color'] = 'white'
plt.rcParams["axes.labelcolor"] = 'white'
plt.rcParams["text.color"] = 'white'
else:
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['savefig.facecolor'] = 'white'
plt.rcParams['axes.edgecolor'] = 'black'
plt.rcParams['xtick.color'] = 'black'
plt.rcParams['ytick.color'] = 'black'
plt.rcParams["axes.labelcolor"] = 'black'
plt.rcParams["text.color"] = 'black'
# ------------------------------------------------------------------------------
def reset_theme():
"""
This function ...
:return:
"""
# Back to original settings
plt.rcParams.update(plt.rcParamsDefault)
# ------------------------------------------------------------------------------
| agpl-3.0 |
hsuantien/scikit-learn | examples/covariance/plot_sparse_cov.py | 300 | 5078 | """
======================================
Sparse inverse covariance estimation
======================================
Using the GraphLasso estimator to learn a covariance and sparse precision
from a small number of samples.
To estimate a probabilistic model (e.g. a Gaussian model), estimating the
precision matrix, that is the inverse covariance matrix, is as important
as estimating the covariance matrix. Indeed a Gaussian model is
parametrized by the precision matrix.
To be in favorable recovery conditions, we sample the data from a model
with a sparse inverse covariance matrix. In addition, we ensure that the
data is not too much correlated (limiting the largest coefficient of the
precision matrix) and that there a no small coefficients in the
precision matrix that cannot be recovered. In addition, with a small
number of observations, it is easier to recover a correlation matrix
rather than a covariance, thus we scale the time series.
Here, the number of samples is slightly larger than the number of
dimensions, thus the empirical covariance is still invertible. However,
as the observations are strongly correlated, the empirical covariance
matrix is ill-conditioned and as a result its inverse --the empirical
precision matrix-- is very far from the ground truth.
If we use l2 shrinkage, as with the Ledoit-Wolf estimator, as the number
of samples is small, we need to shrink a lot. As a result, the
Ledoit-Wolf precision is fairly close to the ground truth precision, that
is not far from being diagonal, but the off-diagonal structure is lost.
The l1-penalized estimator can recover part of this off-diagonal
structure. It learns a sparse precision. It is not able to
recover the exact sparsity pattern: it detects too many non-zero
coefficients. However, the highest non-zero coefficients of the l1
estimated correspond to the non-zero coefficients in the ground truth.
Finally, the coefficients of the l1 precision estimate are biased toward
zero: because of the penalty, they are all smaller than the corresponding
ground truth value, as can be seen on the figure.
Note that, the color range of the precision matrices is tweaked to
improve readability of the figure. The full range of values of the
empirical precision is not displayed.
The alpha parameter of the GraphLasso setting the sparsity of the model is
set by internal cross-validation in the GraphLassoCV. As can be
seen on figure 2, the grid to compute the cross-validation score is
iteratively refined in the neighborhood of the maximum.
"""
print(__doc__)
# author: Gael Varoquaux <gael.varoquaux@inria.fr>
# License: BSD 3 clause
# Copyright: INRIA
import numpy as np
from scipy import linalg
from sklearn.datasets import make_sparse_spd_matrix
from sklearn.covariance import GraphLassoCV, ledoit_wolf
import matplotlib.pyplot as plt
##############################################################################
# Generate the data
n_samples = 60
n_features = 20
prng = np.random.RandomState(1)
prec = make_sparse_spd_matrix(n_features, alpha=.98,
smallest_coef=.4,
largest_coef=.7,
random_state=prng)
cov = linalg.inv(prec)
d = np.sqrt(np.diag(cov))
cov /= d
cov /= d[:, np.newaxis]
prec *= d
prec *= d[:, np.newaxis]
X = prng.multivariate_normal(np.zeros(n_features), cov, size=n_samples)
X -= X.mean(axis=0)
X /= X.std(axis=0)
##############################################################################
# Estimate the covariance
emp_cov = np.dot(X.T, X) / n_samples
model = GraphLassoCV()
model.fit(X)
cov_ = model.covariance_
prec_ = model.precision_
lw_cov_, _ = ledoit_wolf(X)
lw_prec_ = linalg.inv(lw_cov_)
##############################################################################
# Plot the results
plt.figure(figsize=(10, 6))
plt.subplots_adjust(left=0.02, right=0.98)
# plot the covariances
covs = [('Empirical', emp_cov), ('Ledoit-Wolf', lw_cov_),
('GraphLasso', cov_), ('True', cov)]
vmax = cov_.max()
for i, (name, this_cov) in enumerate(covs):
plt.subplot(2, 4, i + 1)
plt.imshow(this_cov, interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s covariance' % name)
# plot the precisions
precs = [('Empirical', linalg.inv(emp_cov)), ('Ledoit-Wolf', lw_prec_),
('GraphLasso', prec_), ('True', prec)]
vmax = .9 * prec_.max()
for i, (name, this_prec) in enumerate(precs):
ax = plt.subplot(2, 4, i + 5)
plt.imshow(np.ma.masked_equal(this_prec, 0),
interpolation='nearest', vmin=-vmax, vmax=vmax,
cmap=plt.cm.RdBu_r)
plt.xticks(())
plt.yticks(())
plt.title('%s precision' % name)
ax.set_axis_bgcolor('.7')
# plot the model selection metric
plt.figure(figsize=(4, 3))
plt.axes([.2, .15, .75, .7])
plt.plot(model.cv_alphas_, np.mean(model.grid_scores, axis=1), 'o-')
plt.axvline(model.alpha_, color='.5')
plt.title('Model selection')
plt.ylabel('Cross-validation score')
plt.xlabel('alpha')
plt.show()
| bsd-3-clause |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_collections.py | 2 | 21231 | """
Tests specific to the collections module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import numpy as np
from numpy.testing import (
assert_array_equal, assert_array_almost_equal, assert_equal)
import pytest
import matplotlib.pyplot as plt
import matplotlib.collections as mcollections
import matplotlib.transforms as mtransforms
from matplotlib.collections import Collection, EventCollection
from matplotlib.testing.decorators import image_comparison
def generate_EventCollection_plot():
'''
generate the initial collection and plot it
'''
positions = np.array([0., 1., 2., 3., 5., 8., 13., 21.])
extra_positions = np.array([34., 55., 89.])
orientation = 'horizontal'
lineoffset = 1
linelength = .5
linewidth = 2
color = [1, 0, 0, 1]
linestyle = 'solid'
antialiased = True
coll = EventCollection(positions,
orientation=orientation,
lineoffset=lineoffset,
linelength=linelength,
linewidth=linewidth,
color=color,
linestyle=linestyle,
antialiased=antialiased
)
fig = plt.figure()
splt = fig.add_subplot(1, 1, 1)
splt.add_collection(coll)
splt.set_title('EventCollection: default')
props = {'positions': positions,
'extra_positions': extra_positions,
'orientation': orientation,
'lineoffset': lineoffset,
'linelength': linelength,
'linewidth': linewidth,
'color': color,
'linestyle': linestyle,
'antialiased': antialiased
}
splt.set_xlim(-1, 22)
splt.set_ylim(0, 2)
return splt, coll, props
@image_comparison(baseline_images=['EventCollection_plot__default'])
def test__EventCollection__get_segments():
'''
check to make sure the default segments have the correct coordinates
'''
_, coll, props = generate_EventCollection_plot()
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
props['orientation'])
def test__EventCollection__get_positions():
'''
check to make sure the default positions match the input positions
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['positions'], coll.get_positions())
def test__EventCollection__get_orientation():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['orientation'], coll.get_orientation())
def test__EventCollection__is_horizontal():
'''
check to make sure the default orientation matches the input
orientation
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(True, coll.is_horizontal())
def test__EventCollection__get_linelength():
'''
check to make sure the default linelength matches the input linelength
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['linelength'], coll.get_linelength())
def test__EventCollection__get_lineoffset():
'''
check to make sure the default lineoffset matches the input lineoffset
'''
_, coll, props = generate_EventCollection_plot()
assert_equal(props['lineoffset'], coll.get_lineoffset())
def test__EventCollection__get_linestyle():
'''
check to make sure the default linestyle matches the input linestyle
'''
_, coll, _ = generate_EventCollection_plot()
assert_equal(coll.get_linestyle(), [(None, None)])
def test__EventCollection__get_color():
'''
check to make sure the default color matches the input color
'''
_, coll, props = generate_EventCollection_plot()
np.testing.assert_array_equal(props['color'], coll.get_color())
check_allprop_array(coll.get_colors(), props['color'])
@image_comparison(baseline_images=['EventCollection_plot__set_positions'])
def test__EventCollection__set_positions():
'''
check to make sure set_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'], props['extra_positions']])
coll.set_positions(new_positions)
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll, new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__add_positions'])
def test__EventCollection__add_positions():
'''
check to make sure add_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][0]])
coll.add_positions(props['extra_positions'][0])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: add_positions')
splt.set_xlim(-1, 35)
@image_comparison(baseline_images=['EventCollection_plot__append_positions'])
def test__EventCollection__append_positions():
'''
check to make sure append_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][2]])
coll.append_positions(props['extra_positions'][2])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: append_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__extend_positions'])
def test__EventCollection__extend_positions():
'''
check to make sure extend_positions works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_positions = np.hstack([props['positions'],
props['extra_positions'][1:]])
coll.extend_positions(props['extra_positions'][1:])
np.testing.assert_array_equal(new_positions, coll.get_positions())
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: extend_positions')
splt.set_xlim(-1, 90)
@image_comparison(baseline_images=['EventCollection_plot__switch_orientation'])
def test__EventCollection__switch_orientation():
'''
check to make sure switch_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.switch_orientation()
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
new_positions = coll.get_positions()
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'], new_orientation)
splt.set_title('EventCollection: switch_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(
baseline_images=['EventCollection_plot__switch_orientation__2x'])
def test__EventCollection__switch_orientation_2x():
'''
check to make sure calling switch_orientation twice sets the
orientation back to the default
'''
splt, coll, props = generate_EventCollection_plot()
coll.switch_orientation()
coll.switch_orientation()
new_positions = coll.get_positions()
assert_equal(props['orientation'], coll.get_orientation())
assert_equal(True, coll.is_horizontal())
np.testing.assert_array_equal(props['positions'], new_positions)
check_segments(coll,
new_positions,
props['linelength'],
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: switch_orientation 2x')
@image_comparison(baseline_images=['EventCollection_plot__set_orientation'])
def test__EventCollection__set_orientation():
'''
check to make sure set_orientation works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_orientation = 'vertical'
coll.set_orientation(new_orientation)
assert_equal(new_orientation, coll.get_orientation())
assert_equal(False, coll.is_horizontal())
check_segments(coll,
props['positions'],
props['linelength'],
props['lineoffset'],
new_orientation)
splt.set_title('EventCollection: set_orientation')
splt.set_ylim(-1, 22)
splt.set_xlim(0, 2)
@image_comparison(baseline_images=['EventCollection_plot__set_linelength'])
def test__EventCollection__set_linelength():
'''
check to make sure set_linelength works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_linelength = 15
coll.set_linelength(new_linelength)
assert_equal(new_linelength, coll.get_linelength())
check_segments(coll,
props['positions'],
new_linelength,
props['lineoffset'],
props['orientation'])
splt.set_title('EventCollection: set_linelength')
splt.set_ylim(-20, 20)
@image_comparison(baseline_images=['EventCollection_plot__set_lineoffset'])
def test__EventCollection__set_lineoffset():
'''
check to make sure set_lineoffset works properly
'''
splt, coll, props = generate_EventCollection_plot()
new_lineoffset = -5.
coll.set_lineoffset(new_lineoffset)
assert_equal(new_lineoffset, coll.get_lineoffset())
check_segments(coll,
props['positions'],
props['linelength'],
new_lineoffset,
props['orientation'])
splt.set_title('EventCollection: set_lineoffset')
splt.set_ylim(-6, -4)
@image_comparison(baseline_images=['EventCollection_plot__set_linestyle'])
def test__EventCollection__set_linestyle():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = 'dashed'
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_ls_dash'],
remove_text=True)
def test__EventCollection__set_linestyle_single_dash():
'''
check to make sure set_linestyle accepts a single dash pattern
'''
splt, coll, _ = generate_EventCollection_plot()
new_linestyle = (0, (6., 6.))
coll.set_linestyle(new_linestyle)
assert_equal(coll.get_linestyle(), [(0, (6.0, 6.0))])
splt.set_title('EventCollection: set_linestyle')
@image_comparison(baseline_images=['EventCollection_plot__set_linewidth'])
def test__EventCollection__set_linewidth():
'''
check to make sure set_linestyle works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_linewidth = 5
coll.set_linewidth(new_linewidth)
assert_equal(coll.get_linewidth(), new_linewidth)
splt.set_title('EventCollection: set_linewidth')
@image_comparison(baseline_images=['EventCollection_plot__set_color'])
def test__EventCollection__set_color():
'''
check to make sure set_color works properly
'''
splt, coll, _ = generate_EventCollection_plot()
new_color = np.array([0, 1, 1, 1])
coll.set_color(new_color)
np.testing.assert_array_equal(new_color, coll.get_color())
check_allprop_array(coll.get_colors(), new_color)
splt.set_title('EventCollection: set_color')
def check_segments(coll, positions, linelength, lineoffset, orientation):
'''
check to make sure all values in the segment are correct, given a
particular set of inputs
note: this is not a test, it is used by tests
'''
segments = coll.get_segments()
if (orientation.lower() == 'horizontal'
or orientation.lower() == 'none' or orientation is None):
# if horizontal, the position in is in the y-axis
pos1 = 1
pos2 = 0
elif orientation.lower() == 'vertical':
# if vertical, the position in is in the x-axis
pos1 = 0
pos2 = 1
else:
raise ValueError("orientation must be 'horizontal' or 'vertical'")
# test to make sure each segment is correct
for i, segment in enumerate(segments):
assert_equal(segment[0, pos1], lineoffset + linelength / 2.)
assert_equal(segment[1, pos1], lineoffset - linelength / 2.)
assert_equal(segment[0, pos2], positions[i])
assert_equal(segment[1, pos2], positions[i])
def check_allprop_array(values, target):
'''
check to make sure all values match the given target if arrays
note: this is not a test, it is used by tests
'''
for value in values:
np.testing.assert_array_equal(value, target)
def test_null_collection_datalim():
col = mcollections.PathCollection([])
col_data_lim = col.get_datalim(mtransforms.IdentityTransform())
assert_array_equal(col_data_lim.get_points(),
mtransforms.Bbox.null().get_points())
def test_add_collection():
# Test if data limits are unchanged by adding an empty collection.
# Github issue #1490, pull #1497.
plt.figure()
ax = plt.axes()
coll = ax.scatter([0, 1], [0, 1])
ax.add_collection(coll)
bounds = ax.dataLim.bounds
coll = ax.scatter([], [])
assert_equal(ax.dataLim.bounds, bounds)
def test_quiver_limits():
ax = plt.axes()
x, y = np.arange(8), np.arange(10)
u = v = np.linspace(0, 10, 80).reshape(10, 8)
q = plt.quiver(x, y, u, v)
assert_equal(q.get_datalim(ax.transData).bounds, (0., 0., 7., 9.))
plt.figure()
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.quiver(x, y, np.sin(x), np.cos(y), transform=trans)
assert_equal(ax.dataLim.bounds, (20.0, 30.0, 15.0, 6.0))
def test_barb_limits():
ax = plt.axes()
x = np.linspace(-5, 10, 20)
y = np.linspace(-2, 4, 10)
y, x = np.meshgrid(y, x)
trans = mtransforms.Affine2D().translate(25, 32) + ax.transData
plt.barbs(x, y, np.sin(x), np.cos(y), transform=trans)
# The calculated bounds are approximately the bounds of the original data,
# this is because the entire path is taken into account when updating the
# datalim.
assert_array_almost_equal(ax.dataLim.bounds, (20, 30, 15, 6),
decimal=1)
@image_comparison(baseline_images=['EllipseCollection_test_image'],
extensions=['png'],
remove_text=True)
def test_EllipseCollection():
# Test basic functionality
fig, ax = plt.subplots()
x = np.arange(4)
y = np.arange(3)
X, Y = np.meshgrid(x, y)
XY = np.vstack((X.ravel(), Y.ravel())).T
ww = X/float(x[-1])
hh = Y/float(y[-1])
aa = np.ones_like(ww) * 20 # first axis is 20 degrees CCW from x axis
ec = mcollections.EllipseCollection(ww, hh, aa,
units='x',
offsets=XY,
transOffset=ax.transData,
facecolors='none')
ax.add_collection(ec)
ax.autoscale_view()
@image_comparison(baseline_images=['polycollection_close'],
extensions=['png'], remove_text=True)
def test_polycollection_close():
from mpl_toolkits.mplot3d import Axes3D
vertsQuad = [
[[0., 0.], [0., 1.], [1., 1.], [1., 0.]],
[[0., 1.], [2., 3.], [2., 2.], [1., 1.]],
[[2., 2.], [2., 3.], [4., 1.], [3., 1.]],
[[3., 0.], [3., 1.], [4., 1.], [4., 0.]]]
fig = plt.figure()
ax = Axes3D(fig)
colors = ['r', 'g', 'b', 'y', 'k']
zpos = list(range(5))
poly = mcollections.PolyCollection(
vertsQuad * len(zpos), linewidth=0.25)
poly.set_alpha(0.7)
# need to have a z-value for *each* polygon = element!
zs = []
cs = []
for z, c in zip(zpos, colors):
zs.extend([z] * len(vertsQuad))
cs.extend([c] * len(vertsQuad))
poly.set_color(cs)
ax.add_collection3d(poly, zs=zs, zdir='y')
# axis limit settings:
ax.set_xlim3d(0, 4)
ax.set_zlim3d(0, 3)
ax.set_ylim3d(0, 4)
@image_comparison(baseline_images=['regularpolycollection_rotate'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_rotate():
xx, yy = np.mgrid[:10, :10]
xy_points = np.transpose([xx.flatten(), yy.flatten()])
rotations = np.linspace(0, 2*np.pi, len(xy_points))
fig, ax = plt.subplots()
for xy, alpha in zip(xy_points, rotations):
col = mcollections.RegularPolyCollection(
4, sizes=(100,), rotation=alpha,
offsets=[xy], transOffset=ax.transData)
ax.add_collection(col, autolim=True)
ax.autoscale_view()
@image_comparison(baseline_images=['regularpolycollection_scale'],
extensions=['png'], remove_text=True)
def test_regularpolycollection_scale():
# See issue #3860
class SquareCollection(mcollections.RegularPolyCollection):
def __init__(self, **kwargs):
super(SquareCollection, self).__init__(
4, rotation=np.pi/4., **kwargs)
def get_transform(self):
"""Return transform scaling circle areas to data space."""
ax = self.axes
pts2pixels = 72.0 / ax.figure.dpi
scale_x = pts2pixels * ax.bbox.width / ax.viewLim.width
scale_y = pts2pixels * ax.bbox.height / ax.viewLim.height
return mtransforms.Affine2D().scale(scale_x, scale_y)
fig, ax = plt.subplots()
xy = [(0, 0)]
# Unit square has a half-diagonal of `1 / sqrt(2)`, so `pi * r**2`
# equals...
circle_areas = [np.pi / 2]
squares = SquareCollection(sizes=circle_areas, offsets=xy,
transOffset=ax.transData)
ax.add_collection(squares, autolim=True)
ax.axis([-1, 1, -1, 1])
def test_picking():
fig, ax = plt.subplots()
col = ax.scatter([0], [0], [1000], picker=True)
fig.savefig(io.BytesIO(), dpi=fig.dpi)
class MouseEvent(object):
pass
event = MouseEvent()
event.x = 325
event.y = 240
found, indices = col.contains(event)
assert found
assert_array_equal(indices['ind'], [0])
def test_linestyle_single_dashes():
plt.scatter([0, 1, 2], [0, 1, 2], linestyle=(0., [2., 2.]))
plt.draw()
@image_comparison(baseline_images=['size_in_xy'], remove_text=True,
extensions=['png'])
def test_size_in_xy():
fig, ax = plt.subplots()
widths, heights, angles = (10, 10), 10, 0
widths = 10, 10
coords = [(10, 10), (15, 15)]
e = mcollections.EllipseCollection(
widths, heights, angles,
units='xy',
offsets=coords,
transOffset=ax.transData)
ax.add_collection(e)
ax.set_xlim(0, 30)
ax.set_ylim(0, 30)
def test_pandas_indexing():
pd = pytest.importorskip('pandas')
# Should not fail break when faced with a
# non-zero indexed series
index = [11, 12, 13]
ec = fc = pd.Series(['red', 'blue', 'green'], index=index)
lw = pd.Series([1, 2, 3], index=index)
ls = pd.Series(['solid', 'dashed', 'dashdot'], index=index)
aa = pd.Series([True, False, True], index=index)
Collection(edgecolors=ec)
Collection(facecolors=fc)
Collection(linewidths=lw)
Collection(linestyles=ls)
Collection(antialiaseds=aa)
@pytest.mark.style('default')
def test_lslw_bcast():
col = mcollections.PathCollection([])
col.set_linestyles(['-', '-'])
col.set_linewidths([1, 2, 3])
assert_equal(col.get_linestyles(), [(None, None)] * 6)
assert_equal(col.get_linewidths(), [1, 2, 3] * 2)
col.set_linestyles(['-', '-', '-'])
assert_equal(col.get_linestyles(), [(None, None)] * 3)
assert_equal(col.get_linewidths(), [1, 2, 3])
@image_comparison(baseline_images=['scatter_post_alpha'],
extensions=['png'], remove_text=True,
style='default')
def test_scatter_post_alpha():
fig, ax = plt.subplots()
sc = ax.scatter(range(5), range(5), c=range(5))
# this needs to be here to update internal state
fig.canvas.draw()
sc.set_alpha(.1)
| mit |
sthyme/ZFSchizophrenia | BehaviorAnalysis/Alternative_Analyses/Correlation_between_genes/correlations_DISTANCE_betweengenes.py | 1 | 5605 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import matplotlib.colors as mat_col
from matplotlib.colors import LinearSegmentedColormap
import scipy
import scipy.cluster.hierarchy as sch
from scipy.cluster.hierarchy import set_link_color_palette
import numpy as np
import pandas as pd
import glob
from scipy.stats import pearsonr
from scipy.stats import spearmanr
from scipy.spatial import distance
#Dig=pd.read_csv("all_regions_sum_nPix_perk_red_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_nPix_perk_green_channel_PaperData_thres50_newnames.csv")
#Dig=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_thres50_newnames.csv")
#Dir=pd.read_csv("all_regions_sum_perk_red_channel_PaperData_newnames.csv")
#Db=pd.read_csv("MAYbehaviorfullset_transposed.csv")
Db=pd.read_csv("AUG16_12_dectest.csv")
#Db=pd.read_csv("AUGMAY18testingfinalfullgoodonesoct30nonoise_transposed.csv")
#Dig = Dig.applymap(np.log)
#Digl = Dig # use if skipping log10
#Digl = Dig.applymap(np.log10)
#print Dig
#Digl = Digl.replace([np.inf, -np.inf], 0)
#Digl = Digl.replace([np.inf, -np.inf], np.nan)
# use if not doing log10
#Digl = Digl.replace([0], np.nan)
#Dig = Dig.replace([0], np.nan)
#DignoNA = Dig.dropna()
#Db = Db.apply(lambda x: [y if 0 < y < 0.05 else np.nan for y in x])
#Db = Db.apply(lambda x: [y if -0.05 < y < 0 else np.nan for y in x])
#print Db["adamtsl3"]
#for binarizing
# DEC 2018, THIS BINARIZING WORKS, BUT NOT DOIN GIT
# only binarizing the "non-significant" data
Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 1 for y in x])
# convert all non-significant values to large number
##Db = Db.apply(lambda x: [y if -0.05 < y < 0.05 else 5 for y in x])
#print Db["adamtsl3"]
# keep all positive values, everything negative (between 0 and -0.05) becomes -1
##Db = Db.apply(lambda x: [y if y > 0 else -1 for y in x])
#print Db["adamtsl3"]
##Db = Db.apply(lambda x: [y if y < 2 else 0 for y in x])
#print Db["adamtsl3"]
# everything that is negative or 0 stays the same, everything else (between 0 and 0.05) becomes 1
##Db = Db.apply(lambda x: [y if y <= 0 else 1 for y in x])
#print Db["adamtsl3"]
#Db = Db.apply(lambda x: [y if y == np.nan else 1 for y in x])
#Db = Db.apply(lambda x: [y if y != np.nan else 0 for y in x])
# TRYING LOG ON P-VALUES, NOT SURE IF GOOD IDEA
#Db = Db.applymap(np.log10)
###Db = Db.apply(lambda x: [y if -0.1 < y < 0.1 else np.nan for y in x])
#print Db
#exit()
corrlist = []
dfdict = {}
dfdictdist = {}
collist = []
for column1 in Db:
for column2 in Db:
corr = Db[column1].corr(Db[column2], min_periods=6)
# dist = np.square(Db[column1] - Db[column2])
# print dist
dist = distance.euclidean(Db[column1], Db[column2])
# print dist
#corr = Db[column1].corr(Dig[column2], method='spearman', min_periods=7)
# if corr > 0.6 or corr < -0.6:
#corrlist.append( (corr, column1, column2))
#newdf = pd.concat([Dig[column2], Digl[column2], Db[column1]], axis=1)
newdf = pd.concat([Db[column2], Db[column1]], axis=1)
# newdf = newdf.dropna()
corrlist.append( (corr, newdf, column1, column2, dist))
if column1 in dfdict.keys():
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
else:
dfdict[column1] = []
dfdictdist[column1] = []
dfdict[column1].append(corr)
dfdictdist[column1].append(dist)
if column2 not in collist:
collist.append(column2)
#corrlist.append( (corr, column1, column2, newdf))
#newdf = Dig[column2].copy()
#newdf2 = newdf.concat(Db[column1])
#newdf[column1] = Db[column1]
#print newdf.dropna()
#exit()
# break
#break
#print dfdict
#print dfdictdist
#print collist
dfcor = pd.DataFrame.from_dict(dfdict, orient='index')
dfcor.columns = collist
dfdist = pd.DataFrame.from_dict(dfdictdist, orient='index')
dfdist.columns = collist
dfcor = dfcor.sort_index()
dfdist = dfdist.sort_index()
dfcor.to_csv("dec_correlation_sort1.csv")
dfdist.to_csv("dec_distance_sort1.csv")
#print dfcor
#corrlist.sort(key=lambda tup: tup[0])
#old way of just printing before generate the DF
##for i in range(0, len(corrlist)):
## print corrlist[i][0], corrlist[i][4], corrlist[i][2], corrlist[i][3]
#print corrlist[i][1]
#print corrlist[i][2]
#Db=pd.read_csv("MAY2018fullheatmapsetfinal_0.csv")
#Db = Db.transpose()
#Dig = Dig.values
#Dir = Dir.values
#Db = Db.values
#print "test1"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Dig
#print "test2"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
# print Db
#Digb = Dig[:,1:]
#Dirb = Dir[:,1:]
#Digb = np.delete(Dig, 0, axis=1)
#Dbb = Db[:,1:]
#Dbb = np.delete(Db, 0, axis=1)
#Digb = np.log(Digb)
#Digb = Dig.values
#Dbb = Db.values
#print "test1"
#print Dbb
#print "test2"
#print Digb
#print np.shape(Dbb)
#print np.shape(Digb)
#for row in range(Digb.shape[0]):
#print str(pearsonr(Dbb[row,:], Digb[row,:]))
#print str(pearsonr(Dbb[:,row], Digb[:,row]))
#spearlist = []
#print "green correlation"
#for column1 in Digb.T:
# for column2 in Dbb.T:
# spearlist.append(str(spearmanr(column1, column2, nan_policy='omit')))
#spearlist.sort()
#for s in spearlist:
# print s
#print "red correlation"
#for column3 in Dirb.T:
# for column4 in Dbb.T:
# print str(pearsonr(column3, column4))
#for column1 in Dig:
# for column2 in Db:
# print column1.corr
#print "green correlation"
#with pd.option_context('display.max_rows', None, 'display.max_columns', None):
#print Dig.corrwith(Db.set_axis(Dig.columns, axis='columns', inplace=False))
#print Dig.corrwith(Db)
#print "red correlation"
#Dir.corrwith(Db)
| mit |
sillvan/hyperspy | hyperspy/drawing/_markers/horizontal_line_segment.py | 1 | 3320 | # -*- coding: utf-8 -*-
# Copyright 2007-2011 The Hyperspy developers
#
# This file is part of Hyperspy.
#
# Hyperspy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hyperspy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hyperspy. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
from hyperspy.drawing.marker import MarkerBase
class HorizontalLineSegment(MarkerBase):
"""Horizontal line segment marker that can be added to the signal figure
Parameters
---------
x1: array or float
The position of the start of the line segment in x.
If float, the marker is fixed.
If array, the marker will be updated when navigating. The array should
have the same dimensions in the nagivation axes.
x2: array or float
The position of the end of the line segment in x.
see x1 arguments
y: array or float
The position of line segment in y.
see x1 arguments
kwargs:
Kewywords argument of axvline valid properties (i.e. recognized by
mpl.plot).
Example
-------
>>> import numpy as np
>>> im = signals.Image(np.zeros((100, 100)))
>>> m = utils.plot.markers.horizontal_line_segment(
>>> x1=20, x2=70, y=70, linewidth=4, color='red', linestyle='dotted')
>>> im.add_marker(m)
"""
def __init__(self, x1, x2, y, **kwargs):
MarkerBase.__init__(self)
lp = {}
lp['color'] = 'black'
lp['linewidth'] = 1
self.marker_properties = lp
self.set_data(x1=x1, x2=x2, y1=y)
self.set_marker_properties(**kwargs)
def update(self):
if self.auto_update is False:
return
self._update_segment()
def plot(self):
if self.ax is None:
raise AttributeError(
"To use this method the marker needs to be first add to a " +
"figure using `s._plot.signal_plot.add_marker(m)` or " +
"`s._plot.navigator_plot.add_marker(m)`")
self.marker = self.ax.vlines(0, 0, 1, **self.marker_properties)
self._update_segment()
self.marker.set_animated(True)
try:
self.ax.hspy_fig._draw_animated()
except:
pass
def _update_segment(self):
segments = self.marker.get_segments()
segments[0][0, 1] = self.get_data_position('y1')
segments[0][1, 1] = segments[0][0, 1]
if self.get_data_position('x1') is None:
segments[0][0, 0] = plt.getp(self.marker.axes, 'xlim')[0]
else:
segments[0][0, 0] = self.get_data_position('x1')
if self.get_data_position('x2') is None:
segments[0][1, 0] = plt.getp(self.marker.axes, 'xlim')[1]
else:
segments[0][1, 0] = self.get_data_position('x2')
self.marker.set_segments(segments)
| gpl-3.0 |
giacomov/lclike | lclike/duration_computation.py | 1 | 12141 | __author__ = 'giacomov'
# !/usr/bin/env python
# add |^| to the top line to run the script without needing 'python' to run it at cmd
# importing modules1
import numpy as np
# cant use 'show' inside the farm
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib import gridspec
import os
import argparse
import decayLikelihood
import warnings
####################################################################
mycmd = argparse.ArgumentParser() # this is a class
mycmd.add_argument('triggername', help="The name of the GRB in YYMMDDXXX format (ex. bn080916009)")
mycmd.add_argument('redshift', help="Redshift for object.")
mycmd.add_argument('function', help="Function to model. (ex. crystalball2, band)")
mycmd.add_argument('directory', help="Directory containing the file produced by gtburst")
if __name__ == "__main__":
args = mycmd.parse_args()
os.chdir(args.directory)
##############################################################################
textfile = os.path.join(args.directory, '%s_res.txt' % (args.triggername))
tbin = np.recfromtxt(textfile, names=True)
textfile = os.path.join(args.directory, '%s_MCsamples_%s.txt' % (args.triggername, args.function))
samples = np.recfromtxt(textfile, names=True)
# function for returning 1 and 2 sigma errors from sample median
def getErr(sampleArr):
# compute sample percentiles for 1 and 2 sigma
m, c, p = np.percentile(sampleArr, [16, 50, 84])
# print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
m2, c2, p2 = np.percentile(sampleArr, [3, 50, 97])
return m, c, p, m2, c2, p2
# prepare for plotting and LOOP
t = np.logspace(0, 4, 100)
t = np.append(t, np.linspace(0, 1, 10))
t.sort()
t = np.unique(t)
print('NUMBER OF times to iterate: %s' % (len(t)))
x = decayLikelihood.DecayLikelihood()
if args.function == 'crystalball2':
crystal = decayLikelihood.CrystalBall2() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(crystal)
# CrystalBall DiffFlux####################################################
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0])
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
# NORMALIZATION IS THE FLUX AT THE PEAK
pB = parameters[3] # decay time is independent of scale # (y*.001) # scale =0.001, for all xml files
fBe = pB / np.e
# t = (fBe/N)**(-1/a) defined to be 1
mu = parameters[0]
tP = mu
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
teP = mu + (fBe / parameters[3]) ** (
-1 / parameters[2]) # sometimes 'RuntimeWarning: overflow encountered in double_scalars'
except Warning:
print('RuntimeWarning Raised! mu,sigma,decayIndex,and N:', parameters)
teP = parameters[0] + (fBe / parameters[3]) ** (-1 / parameters[2])
Peak[i] = pB
ePeak[i] = fBe
# redshift correcting t/(1+z)
tPeak[i] = tP / (1 + float(args.redshift)) ################################
tePeak[i] = teP / (1 + float(args.redshift)) ################################
elif args.function == 'band':
band = decayLikelihood.DecayBand() # declaring instance of DecayLikelihood using POWER LAW FIT
x.setDecayFunction(band)
Peak = np.zeros(samples.shape[0])
ePeak = np.zeros(samples.shape[0]) # fractional brightness used in calcuating char-time, but not needed otherwise
tPeak = np.zeros(samples.shape[0])
tePeak = np.zeros(samples.shape[0]) # characteristic time
T05 = np.zeros(samples.shape[0])
T90 = np.zeros(samples.shape[0])
T95 = np.zeros(samples.shape[0])
T25 = np.zeros(samples.shape[0])
T50 = np.zeros(samples.shape[0])
T75 = np.zeros(samples.shape[0])
print('ENTERING samples LOOP')
# mu,sigma,decayIndex, and N
for i, parameters in enumerate(samples):
x.decayFunction.setParameters(*parameters)
tc = band.getCharacteristicTime() # get the characteristic time.
# T50/T90 TAKING TOO LONG (1/4)
# t90, t05, t95 = band.getTsomething( 90 ) # if the argument is 90, returns the T90 as well as the T05 and the T95. If the argument is 50, returns the T50 as well as the T25 and T75, and so on.
# t50, t25, t75 = band.getTsomething( 50 )
tp, fp = band.getPeakTimeAndFlux() # returns the time of the peak, as well as the peak flux
tePeak[i] = tc / (1 + float(args.redshift)) ################################
tPeak[i] = tp / (1 + float(args.redshift))
Peak[i] = fp
# T50/T90 TAKING TOO LONG (2/4)
# T05[i] = t05/(1+float(args.redshift))
# T90[i] = t90/(1+float(args.redshift))
# T95[i] = t95/(1+float(args.redshift))
# T50/T90 TAKING TOO LONG (3/4)
# T25[i] = t25/(1+float(args.redshift))
# T50[i] = t50/(1+float(args.redshift))
# T75[i] = t75/(1+float(args.redshift))
# Defining sigma bands
print('ENTERING Percentile LOOP')
upper = np.zeros(t.shape[0])
lower = np.zeros(t.shape[0])
upper2 = np.zeros(t.shape[0])
lower2 = np.zeros(t.shape[0])
meas = np.zeros(t.shape[0])
fluxMatrix = np.zeros([samples.shape[0], t.shape[0]])
for i, s in enumerate(samples):
x.decayFunction.setParameters(*s)
fluxes = map(x.decayFunction.getDifferentialFlux, t)
fluxMatrix[i, :] = np.array(fluxes)
for i, tt in enumerate(t):
allFluxes = fluxMatrix[:, i]
m, p = np.percentile(allFluxes, [16, 84])
lower[i] = m
upper[i] = p
m2, p2 = np.percentile(allFluxes, [2.5, 97.5])
lower2[i] = m2
upper2[i] = p2
wdir = '%s' % (args.directory)
# save TXT files instead of .npy
placeFile = os.path.join(wdir, "%s_tBrightness_%s" % (args.triggername, args.function))
with open(placeFile, 'w+') as f:
f.write("Peak tPeak ePeak tePeak\n")
for i, s in enumerate(Peak):
f.write("%s %s %s %s\n" % (Peak[i], tPeak[i], ePeak[i], tePeak[i]))
# CALCULATING T50/T90 TAKES TOO LONG
# T50/T90 TAKING TOO LONG (4/4)
# if args.function == 'band':
# #compute percentiles for 1 sigma
# m90,c90,p90 = np.percentile(T90,[16,50,84])
# m50,c50,p50 = np.percentile(T50,[16,50,84])
# #compute percentiles for 1 and 2 sigma
# #90m,90c,90p,90m2,90c2,90p2 = getErr(T90)
# #50m,50c,50p,50m2,50c2,50p2 = getErr(T50)
# #print("%.3f -%.3f +%.3f" %(c,m-c,p-c)) median, minus, plus
#
# placeFile=os.path.join(wdir,"%s_t90_t50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 90minus 90plus t50 50minus 50plus\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s %s %s %s\n" % (m90,m90-c90,p90-c90,c50,m50-c50,p50-c50)) #c,m-c,p-c
#
# placeFile=os.path.join(wdir,"%s_samplesT90_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t90 t05 t95\n")
# for i,s in enumerate(T90):
# f.write("%s %s %s\n" % (T90[i],T05[i],T95[i]))
# placeFile=os.path.join(wdir,"%s_samplesT50_%s" % (args.triggername, args.function) )
# with open(placeFile,'w+') as f:
# f.write("t50 t25 t25\n")
# for i,s in enumerate(T50):
# f.write("%s %s %s\n" % (T50[i],T25[i],T75[i]))
# compute char-time percentiles for 1 and 2 sigma
m, c, p, m2, c2, p2 = getErr(tePeak)
# saves txt file
wkdir = '%s' % (args.directory)
fileDir = os.path.join(wkdir, '%s_timeRes_%s' % (args.triggername, args.function))
with open(fileDir, 'w+') as f:
f.write('%s %s %s\n' % ('median', 'minus', 'plus'))
f.write('%s %s %s\n' % (c, m - c, p - c))
# PLOTTING BINS AND SIGMA BAND
print("PLOTTING...")
fig = plt.figure()
# median is your "x"
# Y is your "y"
# DY is the array containing the errors
# DY==0 filters only the zero error
data = tbin
# redshift correction /(1+args.redshif)
median = (data["tstart"] + data["tstop"]) / 2 / (1 + float(args.redshift))
start = data['tstart'] / (1 + float(args.redshift)) ##
stop = data['tstop'] / (1 + float(args.redshift)) ##
y = data["photonFlux"]
Dy = data["photonFluxError"]
try:
y = np.core.defchararray.replace(y, "<", "", count=None) # runs through array and removes strings
except:
print('No Upper-Limits Found in %s.' % (args.triggername))
try:
Dy = np.core.defchararray.replace(Dy, "n.a.", "0",
count=None) ## 0 error is nonphysical, and will be checked for in plotting
except:
print('No 0-Error Found in %s.' % (args.triggername))
bar = 0.5
color = "blue"
Y = np.empty(0, dtype=float) # makes empty 1-D array for float values
for i in y:
Y = np.append(Y, float(i))
DY = np.empty(0, dtype=float)
for i in Dy:
DY = np.append(DY, float(i))
plt.clf()
if (DY > 0).sum() > 0: # if sum() gives a non-zero value then there are error values
plt.errorbar(median[DY > 0], Y[DY > 0],
xerr=[median[DY > 0] - start[DY > 0], stop[DY > 0] - median[DY > 0]],
yerr=DY[DY > 0], ls='None', marker='o', mfc=color, mec=color, ecolor=color, lw=2, label=None)
if (DY == 0).sum() > 0:
plt.errorbar(median[DY == 0], Y[DY == 0],
xerr=[median[DY == 0] - start[DY == 0], stop[DY == 0] - median[DY == 0]],
yerr=[bar * Y[DY == 0], 0.0 * Y[DY == 0]], lolims=True, ls='None', marker='', mfc=color, mec=color,
ecolor=color, lw=2, label=None)
plt.suptitle('%s photonFlux per Time' % (args.triggername))
plt.xlabel('Rest Frame Time(s)')
plt.ylabel('Photon Flux')
plt.xscale('symlog')
plt.yscale('log')
plt.grid(True)
if args.function == 'crystalball2':
SCALE = 0.001
elif args.function == 'band':
SCALE = 1.0 # 0.1 # shouldn't need a scale anymore for Band function
ylo = 1e-7 # min(lower2*SCALE)*1e-1 # CANT GET THIS TO WORK YET DYNAMICALLY
yup = max(upper2 * SCALE) * 10
plt.ylim([ylo, yup])
# correcting for redshift t/(1+args.redshift)
plt.fill_between(t / (1 + float(args.redshift)), lower * SCALE, upper * SCALE, alpha=0.5, color='blue')
plt.fill_between(t / (1 + float(args.redshift)), lower2 * SCALE, upper2 * SCALE, alpha=0.3, color='green')
# y = map(x.decayFunction.getDifferentialFlux, t) # maps infinitesimal values of flux at time t to y
# raw_input("Press ENTER")
# PowerLaw
# plt.plot(t,,'o')
# saves plots
wdir = '%s' % (args.directory)
imsave = os.path.join(wdir, '%s_objFit_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
# histograms of 1/e and save
print("Making histograms")
fig = plt.figure(figsize=(10, 6))
gs = gridspec.GridSpec(1, 2, width_ratios=[1, 1])
bins = np.linspace(min(tePeak), np.max(tePeak), 100)
ax0 = plt.subplot(gs[0])
ax0.hist(tePeak, bins, normed=True)
plt.title('1/e (min to medx2)')
plt.xlabel('1/e time (s)')
plt.xlim([min(tePeak), np.median(tePeak) * 2])
ax1 = plt.subplot(gs[1])
ax1.hist(tePeak, bins, normed=True)
plt.title('1/e (min to max)')
plt.xlabel('time (s)')
plt.tight_layout()
imsave = os.path.join(wdir, '%s_hist_%s' % (args.triggername, args.function))
plt.savefig(imsave + '.png')
print("Finished Potting/Saving!")
| bsd-3-clause |
numenta/nupic.vision | src/nupic/vision/data/OCR/characters/parseJPG.py | 3 | 7772 | #!/usr/bin/python2
'''
This script parses JPEG images of text documents to isolate and save images
of individual characters. The size of these output images in pixels is
specified by the parameters desired_height and desired_width.
The JPEG images are converted to grey scale using a parameter called
luminance_threshold to distinguish between light and dark pixels. Lines of
text are found by searching for rows that contain dark pixels, and
characters are found by searching for columns that contain dark pixels. Once
a character is found it is padded with blank rows and columns to obtain the
desired size. The images are saved using the filenames given in the XML file.
'''
# Set desired output image height and width in pixels
desired_height = 32
desired_width = 32
DEBUG = False
import matplotlib.pyplot as plot
import numpy as np
import operator
import sys
import re
import os
from PIL import Image
from xml.dom import minidom
jpg_list = [ 'characters-0.jpg', 'characters-1.jpg', 'characters-2.jpg',
'characters-3.jpg', 'characters-4.jpg', 'characters-5.jpg',
'characters-6.jpg', 'characters-7.jpg', 'characters-8.jpg',
'characters-9.jpg', 'characters-10.jpg', 'characters-11.jpg',
'characters-12.jpg', 'characters-13.jpg', 'characters-14.jpg',
'characters-15.jpg', 'characters-16.jpg', 'characters-17.jpg',
'characters-18.jpg', 'characters-19.jpg' ]
#jpg_list = [ 'debug_doc.jpg' ]
# Parse XML file for filenames to use when saving each character image
xmldoc = minidom.parse('characters.xml')
#xmldoc = minidom.parse('debug_doc.xml')
filelist = xmldoc.getElementsByTagName('image')
print len(filelist)
#for i in range(145):
#print filelist[62*i].attributes['file'].value
# this counter gets used to select file names from an xml file
output_files_saved = 0
for jpg in jpg_list:
print jpg
im = Image.open(jpg)
width, length = im.size
if DEBUG:
print "image size: ", im.size
print "image mode: ", im.mode
print im.size[1],im.size[0]
# read pixel data from image into a numpy array
if im.mode == 'L':
pixels = np.array(list(im.getdata())).reshape(im.size[1],im.size[0])
elif im.mode == 'RGB':
pixels = np.array(list(im.convert('L').getdata())).reshape(im.size[1],
im.size[0])
#im.show()
##############################################################################
# Removed all logic for determining the value to use to distinguish between
# light and dark pixels because this is a non-trivial challenge of its own and
# I want to get to generating a data set for OCR which I can do much faster by
# choosing the threshold manually.
##############################################################################
luminance_threshold = 100
##############################################################################
# parse document for lines of text
##############################################################################
row = 0
while row < length:
# Find the first row of pixels in next line of text by ignoring blank rows
# of pixels which will have a non-zero product since white pixels have a
# luminance value of 255
#row_data = pixels[row * width : row * width + width]
while (row < length and pixels[row,:].min() > luminance_threshold):
row += 1
first_row = row
if DEBUG:
print "the first row of pixels in the line of text is ", first_row
# Find the last row of pixels in this line of text by counting rows with
# dark pixels. These rows have a product of zero since the luminance value
# of all dark pixels was set to zero
while (row < length and pixels[row:row + 2,:].min() < luminance_threshold):
row += 1
last_row = row
#if row < length:
#last_row = row + 2 # this is a hack for Cochin font Q
#row += 5 # this is a hack for Cochin font Q
if DEBUG:
print "the last row of pixels in the line of text is ", last_row
##############################################################################
# parse line of text for characters
##############################################################################
if first_row < last_row:
col = 0
while col < width:
# find first column of pixels in the next character by ignoring blank
# cols of pixels
while col < width and pixels[first_row:last_row,col].min() > luminance_threshold:
col += 1
first_col = col
# find last column of pixels in the next character by counting columns
# with dark pixels
while col < width and \
pixels[first_row:last_row,col:col + 5].min() < luminance_threshold:
col += 1
last_col = col
##############################################################################
# remove blank rows from the top and bottom of characters
##############################################################################
if first_col < last_col:
# remove blank rows from the top of the character
r = first_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r + 1;
char_first_row = r;
# remove blank rows from the bottom of the character
r = last_row;
while pixels[r,first_col:last_col].min() > luminance_threshold:
r = r - 1;
char_last_row = r + 1;
if DEBUG:
# isolate an image of this character
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
print "Character size after whitespace removal", character.size
print first_col, first_row, last_col, last_row
#character.show()
# pad character width out to desired_width
char_width = last_col - first_col
if char_width > desired_width:
print "Character is wider than ", desired_width
else:
# add the same number of blank columns to the left and right
first_col = first_col - (desired_width - char_width) / 2
last_col = last_col + (desired_width - char_width) / 2
# if the difference was odd we'll be short one column
char_width = last_col - first_col
if char_width < desired_width:
last_col = last_col + 1
# pad character height out to desired_height
char_height = char_last_row - char_first_row
if char_height > desired_height:
print "Character is taller than ", desired_height
else:
# add the same number of blank rows to the left and right
char_first_row = char_first_row - (desired_height - char_height) / 2
char_last_row = char_last_row + (desired_height - char_height) / 2
# if the difference was odd we'll be short one row
char_height = char_last_row - char_first_row
if char_height < desired_height:
char_last_row = char_last_row + 1
character = im.crop([first_col, char_first_row, last_col,
char_last_row])
if DEBUG:
print "Character size after padding", character.size
print first_col, char_first_row, last_col, char_last_row
#character.show()
#garbage = raw_input()
# save image to filename specified in ground truth file
filename = filelist[output_files_saved].attributes['file'].value
directory = filename.split('/')[0]
if not os.path.exists(directory):
os.makedirs(directory)
character.save(filename, "JPEG", quality=80)
output_files_saved = output_files_saved + 1
print output_files_saved
| agpl-3.0 |
jaeilepp/eggie | mne/viz/_3d.py | 1 | 24122 | """Functions to make 3D plots with M/EEG data
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
# Eric Larson <larson.eric.d@gmail.com>
# Mainak Jas <mainak@neuro.hut.fi>
#
# License: Simplified BSD
from ..externals.six import string_types, advance_iterator
from distutils.version import LooseVersion
import os
import inspect
import warnings
from itertools import cycle
import numpy as np
from scipy import linalg
from ..io.pick import pick_types
from ..surface import get_head_surf, get_meg_helmet_surf, read_surface
from ..transforms import read_trans, _find_trans, apply_trans
from ..utils import get_subjects_dir, logger, _check_subject
from .utils import mne_analyze_colormap, _prepare_trellis, COLORS
def plot_evoked_field(evoked, surf_maps, time=None, time_label='t = %0.0f ms',
n_jobs=1):
"""Plot MEG/EEG fields on head surface and helmet in 3D
Parameters
----------
evoked : instance of mne.Evoked
The evoked object.
surf_maps : list
The surface mapping information obtained with make_field_map.
time : float | None
The time point at which the field map shall be displayed. If None,
the average peak latency (across sensor types) is used.
time_label : str
How to print info about the time instant visualized.
n_jobs : int
Number of jobs to eggie in parallel.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
types = [t for t in ['eeg', 'grad', 'mag'] if t in evoked]
time_idx = None
if time is None:
time = np.mean([evoked.get_peak(ch_type=t)[1] for t in types])
if not evoked.times[0] <= time <= evoked.times[-1]:
raise ValueError('`time` (%0.3f) must be inside `evoked.times`' % time)
time_idx = np.argmin(np.abs(evoked.times - time))
types = [sm['kind'] for sm in surf_maps]
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (1.0, 1.0, 1.0)]
colormap = mne_analyze_colormap(format='mayavi')
colormap_lines = np.concatenate([np.tile([0., 0., 255., 255.], (127, 1)),
np.tile([0., 0., 0., 255.], (2, 1)),
np.tile([255., 0., 0., 255.], (127, 1))])
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, this_map in enumerate(surf_maps):
surf = this_map['surf']
map_data = this_map['data']
map_type = this_map['kind']
map_ch_names = this_map['ch_names']
if map_type == 'eeg':
pick = pick_types(evoked.info, meg=False, eeg=True)
else:
pick = pick_types(evoked.info, meg=True, eeg=False, ref_meg=False)
ch_names = [evoked.ch_names[k] for k in pick]
set_ch_names = set(ch_names)
set_map_ch_names = set(map_ch_names)
if set_ch_names != set_map_ch_names:
message = ['Channels in map and data do not match.']
diff = set_map_ch_names - set_ch_names
if len(diff):
message += ['%s not in data file. ' % list(diff)]
diff = set_ch_names - set_map_ch_names
if len(diff):
message += ['%s not in map file.' % list(diff)]
raise RuntimeError(' '.join(message))
data = np.dot(map_data, evoked.data[pick, time_idx])
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
vlim = np.max(np.abs(data))
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
# Now show our field pattern
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
fsurf = mlab.pipeline.surface(mesh, vmin=-vlim, vmax=vlim)
fsurf.module_manager.scalar_lut_manager.lut.table = colormap
# And the field lines on top
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'],
scalars=data)
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
with warnings.catch_warnings(record=True): # traits
cont = mlab.pipeline.contour_surface(mesh, contours=21,
line_width=1.0,
vmin=-vlim, vmax=vlim,
opacity=alpha)
cont.module_manager.scalar_lut_manager.lut.table = colormap_lines
if '%' in time_label:
time_label %= (1e3 * evoked.times[time_idx])
mlab.text(0.01, 0.01, time_label, width=0.4)
mlab.view(10, 60)
return fig
def _plot_mri_contours(mri_fname, surf_fnames, orientation='coronal',
slices=None, show=True):
"""Plot BEM contours on anatomical slices.
Parameters
----------
mri_fname : str
The name of the file containing anatomical data.
surf_fnames : list of str
The filenames for the BEM surfaces in the format
['inner_skull.surf', 'outer_skull.surf', 'outer_skin.surf'].
orientation : str
'coronal' or 'transverse' or 'sagittal'
slices : list of int
Slice indices.
show : bool
Call pyplot.show() at the end.
Returns
-------
fig : Instance of matplotlib.figure.Figure
The figure.
"""
import matplotlib.pyplot as plt
import nibabel as nib
if orientation not in ['coronal', 'axial', 'sagittal']:
raise ValueError("Orientation must be 'coronal', 'axial' or "
"'sagittal'. Got %s." % orientation)
# Load the T1 data
nim = nib.load(mri_fname)
data = nim.get_data()
affine = nim.get_affine()
n_sag, n_axi, n_cor = data.shape
orientation_name2axis = dict(sagittal=0, axial=1, coronal=2)
orientation_axis = orientation_name2axis[orientation]
if slices is None:
n_slices = data.shape[orientation_axis]
slices = np.linspace(0, n_slices, 12, endpoint=False).astype(np.int)
# create of list of surfaces
surfs = list()
trans = linalg.inv(affine)
# XXX : next line is a hack don't ask why
trans[:3, -1] = [n_sag // 2, n_axi // 2, n_cor // 2]
for surf_fname in surf_fnames:
surf = dict()
surf['rr'], surf['tris'] = read_surface(surf_fname)
# move back surface to MRI coordinate system
surf['rr'] = nib.affines.apply_affine(trans, surf['rr'])
surfs.append(surf)
fig, axs = _prepare_trellis(len(slices), 4)
for ax, sl in zip(axs, slices):
# adjust the orientations for good view
if orientation == 'coronal':
dat = data[:, :, sl].transpose()
elif orientation == 'axial':
dat = data[:, sl, :]
elif orientation == 'sagittal':
dat = data[sl, :, :]
# First plot the anatomical data
ax.imshow(dat, cmap=plt.cm.gray)
ax.axis('off')
# and then plot the contours on top
for surf in surfs:
if orientation == 'coronal':
ax.tricontour(surf['rr'][:, 0], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 2],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'axial':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 0],
surf['tris'], surf['rr'][:, 1],
levels=[sl], colors='yellow', linewidths=2.0)
elif orientation == 'sagittal':
ax.tricontour(surf['rr'][:, 2], surf['rr'][:, 1],
surf['tris'], surf['rr'][:, 0],
levels=[sl], colors='yellow', linewidths=2.0)
if show:
plt.subplots_adjust(left=0., bottom=0., right=1., top=1., wspace=0.,
hspace=0.)
plt.show()
return fig
def plot_trans(info, trans_fname='auto', subject=None, subjects_dir=None,
ch_type=None, source='bem'):
"""Plot MEG/EEG head surface and helmet in 3D.
Parameters
----------
info : dict
The measurement info.
trans_fname : str | 'auto'
The full path to the `*-trans.fif` file produced during
coregistration.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
ch_type : None | 'eeg' | 'meg'
If None, both the MEG helmet and EEG electrodes will be shown.
If 'meg', only the MEG helmet will be shown. If 'eeg', only the
EEG electrodes will be shown.
source : str
Type to load. Common choices would be `'bem'` or `'head'`. We first
try loading `'$SUBJECTS_DIR/$SUBJECT/bem/$SUBJECT-$SOURCE.fif'`, and
then look for `'$SUBJECT*$SOURCE.fif'` in the same directory. Defaults
to 'bem'. Note. For single layer bems it is recommended to use 'head'.
Returns
-------
fig : instance of mlab.Figure
The mayavi figure.
"""
if ch_type not in [None, 'eeg', 'meg']:
raise ValueError('Argument ch_type must be None | eeg | meg. Got %s.'
% ch_type)
if trans_fname == 'auto':
# let's try to do this in MRI coordinates so they're easy to plot
trans_fname = _find_trans(subject, subjects_dir)
trans = read_trans(trans_fname)
surfs = [get_head_surf(subject, source=source, subjects_dir=subjects_dir)]
if ch_type is None or ch_type == 'meg':
surfs.append(get_meg_helmet_surf(info, trans))
# Plot them
from mayavi import mlab
alphas = [1.0, 0.5]
colors = [(0.6, 0.6, 0.6), (0.0, 0.0, 0.6)]
fig = mlab.figure(bgcolor=(0.0, 0.0, 0.0), size=(600, 600))
for ii, surf in enumerate(surfs):
x, y, z = surf['rr'].T
nn = surf['nn']
# make absolutely sure these are normalized for Mayavi
nn = nn / np.sum(nn * nn, axis=1)[:, np.newaxis]
# Make a solid surface
alpha = alphas[ii]
with warnings.catch_warnings(record=True): # traits
mesh = mlab.pipeline.triangular_mesh_source(x, y, z, surf['tris'])
mesh.data.point_data.normals = nn
mesh.data.cell_data.normals = None
mlab.pipeline.surface(mesh, color=colors[ii], opacity=alpha)
if ch_type is None or ch_type == 'eeg':
eeg_locs = [l['eeg_loc'][:, 0] for l in info['chs']
if l['eeg_loc'] is not None]
if len(eeg_locs) > 0:
eeg_loc = np.array(eeg_locs)
# Transform EEG electrodes to MRI coordinates
eeg_loc = apply_trans(trans['trans'], eeg_loc)
with warnings.catch_warnings(record=True): # traits
mlab.points3d(eeg_loc[:, 0], eeg_loc[:, 1], eeg_loc[:, 2],
color=(1.0, 0.0, 0.0), scale_factor=0.005)
else:
warnings.warn('EEG electrode locations not found. '
'Cannot plot EEG electrodes.')
mlab.view(90, 90)
return fig
def plot_source_estimates(stc, subject=None, surface='inflated', hemi='lh',
colormap='hot', time_label='time=%0.2f ms',
smoothing_steps=10, fmin=5., fmid=10., fmax=15.,
transparent=True, alpha=1.0, time_viewer=False,
config_opts={}, subjects_dir=None, figure=None,
views='lat', colorbar=True):
"""Plot SourceEstimates with PySurfer
Note: PySurfer currently needs the SUBJECTS_DIR environment variable,
which will automatically be set by this function. Plotting multiple
SourceEstimates with different values for subjects_dir will cause
PySurfer to use the wrong FreeSurfer surfaces when using methods of
the returned Brain object. It is therefore recommended to set the
SUBJECTS_DIR environment variable or always use the same value for
subjects_dir (within the same Python session).
Parameters
----------
stc : SourceEstimates
The source estimates to plot.
subject : str | None
The subject name corresponding to FreeSurfer environment
variable SUBJECT. If None stc.subject will be used. If that
is None, the environment will be used.
surface : str
The type of surface (inflated, white etc.).
hemi : str, 'lh' | 'rh' | 'split' | 'both'
The hemisphere to display. Using 'both' or 'split' requires
PySurfer version 0.4 or above.
colormap : str
The type of colormap to use.
time_label : str
How to print info about the time instant visualized.
smoothing_steps : int
The amount of smoothing
fmin : float
The minimum value to display.
fmid : float
The middle value on the colormap.
fmax : float
The maximum value for the colormap.
transparent : bool
If True, use a linear transparency between fmin and fmid.
alpha : float
Alpha value to apply globally to the overlay.
time_viewer : bool
Display time viewer GUI.
config_opts : dict
Keyword arguments for Brain initialization.
See pysurfer.viz.Brain.
subjects_dir : str
The path to the freesurfer subjects reconstructions.
It corresponds to Freesurfer environment variable SUBJECTS_DIR.
figure : instance of mayavi.core.scene.Scene | list | int | None
If None, a new figure will be created. If multiple views or a
split view is requested, this must be a list of the appropriate
length. If int is provided it will be used to identify the Mayavi
figure by it's id or create a new figure with the given id.
views : str | list
View to use. See surfer.Brain().
colorbar : bool
If True, display colorbar on scene.
Returns
-------
brain : Brain
A instance of surfer.viz.Brain from PySurfer.
"""
import surfer
from surfer import Brain, TimeViewer
if hemi in ['split', 'both'] and LooseVersion(surfer.__version__) < '0.4':
raise NotImplementedError('hemi type "%s" not supported with your '
'version of pysurfer. Please upgrade to '
'version 0.4 or higher.' % hemi)
try:
import mayavi
from mayavi import mlab
except ImportError:
from enthought import mayavi
from enthought.mayavi import mlab
# import here to avoid circular import problem
from ..source_estimate import SourceEstimate
if not isinstance(stc, SourceEstimate):
raise ValueError('stc has to be a surface source estimate')
if hemi not in ['lh', 'rh', 'split', 'both']:
raise ValueError('hemi has to be either "lh", "rh", "split", '
'or "both"')
n_split = 2 if hemi == 'split' else 1
n_views = 1 if isinstance(views, string_types) else len(views)
if figure is not None:
# use figure with specified id or create new figure
if isinstance(figure, int):
figure = mlab.figure(figure, size=(600, 600))
# make sure it is of the correct type
if not isinstance(figure, list):
figure = [figure]
if not all([isinstance(f, mayavi.core.scene.Scene) for f in figure]):
raise TypeError('figure must be a mayavi scene or list of scenes')
# make sure we have the right number of figures
n_fig = len(figure)
if not n_fig == n_split * n_views:
raise RuntimeError('`figure` must be a list with the same '
'number of elements as PySurfer plots that '
'will be created (%s)' % n_split * n_views)
subjects_dir = get_subjects_dir(subjects_dir=subjects_dir)
subject = _check_subject(stc.subject, subject, False)
if subject is None:
if 'SUBJECT' in os.environ:
subject = os.environ['SUBJECT']
else:
raise ValueError('SUBJECT environment variable not set')
if hemi in ['both', 'split']:
hemis = ['lh', 'rh']
else:
hemis = [hemi]
title = subject if len(hemis) > 1 else '%s - %s' % (subject, hemis[0])
args = inspect.getargspec(Brain.__init__)[0]
kwargs = dict(title=title, figure=figure, config_opts=config_opts,
subjects_dir=subjects_dir)
if 'views' in args:
kwargs['views'] = views
else:
logger.info('PySurfer does not support "views" argument, please '
'consider updating to a newer version (0.4 or later)')
with warnings.catch_warnings(record=True): # traits warnings
brain = Brain(subject, hemi, surface, **kwargs)
for hemi in hemis:
hemi_idx = 0 if hemi == 'lh' else 1
if hemi_idx == 0:
data = stc.data[:len(stc.vertno[0])]
else:
data = stc.data[len(stc.vertno[0]):]
vertices = stc.vertno[hemi_idx]
time = 1e3 * stc.times
with warnings.catch_warnings(record=True): # traits warnings
brain.add_data(data, colormap=colormap, vertices=vertices,
smoothing_steps=smoothing_steps, time=time,
time_label=time_label, alpha=alpha, hemi=hemi,
colorbar=colorbar)
# scale colormap and set time (index) to display
brain.scale_data_colormap(fmin=fmin, fmid=fmid, fmax=fmax,
transparent=transparent)
if time_viewer:
TimeViewer(brain)
return brain
def plot_sparse_source_estimates(src, stcs, colors=None, linewidth=2,
fontsize=18, bgcolor=(.05, 0, .1),
opacity=0.2, brain_color=(0.7,) * 3,
show=True, high_resolution=False,
fig_name=None, fig_number=None, labels=None,
modes=['cone', 'sphere'],
scale_factors=[1, 0.6],
verbose=None, **kwargs):
"""Plot source estimates obtained with sparse solver
Active dipoles are represented in a "Glass" brain.
If the same source is active in multiple source estimates it is
displayed with a sphere otherwise with a cone in 3D.
Parameters
----------
src : dict
The source space.
stcs : instance of SourceEstimate or list of instances of SourceEstimate
The source estimates (up to 3).
colors : list
List of colors
linewidth : int
Line width in 2D plot.
fontsize : int
Font size.
bgcolor : tuple of length 3
Background color in 3D.
opacity : float in [0, 1]
Opacity of brain mesh.
brain_color : tuple of length 3
Brain color.
show : bool
Show figures if True.
fig_name :
Mayavi figure name.
fig_number :
Matplotlib figure number.
labels : ndarray or list of ndarrays
Labels to show sources in clusters. Sources with the same
label and the waveforms within each cluster are presented in
the same color. labels should be a list of ndarrays when
stcs is a list ie. one label for each stc.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
kwargs : kwargs
Keyword arguments to pass to mlab.triangular_mesh.
"""
if not isinstance(stcs, list):
stcs = [stcs]
if labels is not None and not isinstance(labels, list):
labels = [labels]
if colors is None:
colors = COLORS
linestyles = ['-', '--', ':']
# Show 3D
lh_points = src[0]['rr']
rh_points = src[1]['rr']
points = np.r_[lh_points, rh_points]
lh_normals = src[0]['nn']
rh_normals = src[1]['nn']
normals = np.r_[lh_normals, rh_normals]
if high_resolution:
use_lh_faces = src[0]['tris']
use_rh_faces = src[1]['tris']
else:
use_lh_faces = src[0]['use_tris']
use_rh_faces = src[1]['use_tris']
use_faces = np.r_[use_lh_faces, lh_points.shape[0] + use_rh_faces]
points *= 170
vertnos = [np.r_[stc.lh_vertno, lh_points.shape[0] + stc.rh_vertno]
for stc in stcs]
unique_vertnos = np.unique(np.concatenate(vertnos).ravel())
try:
from mayavi import mlab
except ImportError:
from enthought.mayavi import mlab
from matplotlib.colors import ColorConverter
color_converter = ColorConverter()
f = mlab.figure(figure=fig_name, bgcolor=bgcolor, size=(600, 600))
mlab.clf()
if mlab.options.backend != 'test':
f.scene.disable_render = True
with warnings.catch_warnings(record=True): # traits warnings
surface = mlab.triangular_mesh(points[:, 0], points[:, 1],
points[:, 2], use_faces,
color=brain_color,
opacity=opacity, **kwargs)
import matplotlib.pyplot as plt
# Show time courses
plt.figure(fig_number)
plt.clf()
colors = cycle(colors)
logger.info("Total number of active sources: %d" % len(unique_vertnos))
if labels is not None:
colors = [advance_iterator(colors) for _ in
range(np.unique(np.concatenate(labels).ravel()).size)]
for idx, v in enumerate(unique_vertnos):
# get indices of stcs it belongs to
ind = [k for k, vertno in enumerate(vertnos) if v in vertno]
is_common = len(ind) > 1
if labels is None:
c = advance_iterator(colors)
else:
# if vertex is in different stcs than take label from first one
c = colors[labels[ind[0]][vertnos[ind[0]] == v]]
mode = modes[1] if is_common else modes[0]
scale_factor = scale_factors[1] if is_common else scale_factors[0]
if (isinstance(scale_factor, (np.ndarray, list, tuple))
and len(unique_vertnos) == len(scale_factor)):
scale_factor = scale_factor[idx]
x, y, z = points[v]
nx, ny, nz = normals[v]
with warnings.catch_warnings(record=True): # traits
mlab.quiver3d(x, y, z, nx, ny, nz, color=color_converter.to_rgb(c),
mode=mode, scale_factor=scale_factor)
for k in ind:
vertno = vertnos[k]
mask = (vertno == v)
assert np.sum(mask) == 1
linestyle = linestyles[k]
plt.plot(1e3 * stc.times, 1e9 * stcs[k].data[mask].ravel(), c=c,
linewidth=linewidth, linestyle=linestyle)
plt.xlabel('Time (ms)', fontsize=18)
plt.ylabel('Source amplitude (nAm)', fontsize=18)
if fig_name is not None:
plt.title(fig_name)
if show:
plt.show()
surface.actor.property.backface_culling = True
surface.actor.property.shading = True
return surface
| bsd-2-clause |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/nltk/probability.py | 12 | 81647 | # -*- coding: utf-8 -*-
# Natural Language Toolkit: Probability and Statistics
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (additions)
# Trevor Cohn <tacohn@cs.mu.oz.au> (additions)
# Peter Ljunglöf <peter.ljunglof@heatherleaf.se> (additions)
# Liang Dong <ldong@clemson.edu> (additions)
# Geoffrey Sampson <sampson@cantab.net> (additions)
#
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Classes for representing and processing probabilistic information.
The ``FreqDist`` class is used to encode "frequency distributions",
which count the number of times that each outcome of an experiment
occurs.
The ``ProbDistI`` class defines a standard interface for "probability
distributions", which encode the probability of each outcome for an
experiment. There are two types of probability distribution:
- "derived probability distributions" are created from frequency
distributions. They attempt to model the probability distribution
that generated the frequency distribution.
- "analytic probability distributions" are created directly from
parameters (such as variance).
The ``ConditionalFreqDist`` class and ``ConditionalProbDistI`` interface
are used to encode conditional distributions. Conditional probability
distributions can be derived or analytic; but currently the only
implementation of the ``ConditionalProbDistI`` interface is
``ConditionalProbDist``, a derived distribution.
"""
_NINF = float('-1e300')
import math
import random
import warnings
from operator import itemgetter
from itertools import imap, islice
from collections import defaultdict
##//////////////////////////////////////////////////////
## Frequency Distributions
##//////////////////////////////////////////////////////
# [SB] inherit from defaultdict?
# [SB] for NLTK 3.0, inherit from collections.Counter?
class FreqDist(dict):
"""
A frequency distribution for the outcomes of an experiment. A
frequency distribution records the number of times each outcome of
an experiment has occurred. For example, a frequency distribution
could be used to record the frequency of each word type in a
document. Formally, a frequency distribution can be defined as a
function mapping from each sample to the number of times that
sample occurred as an outcome.
Frequency distributions are generally constructed by running a
number of experiments, and incrementing the count for a sample
every time it is an outcome of an experiment. For example, the
following code will produce a frequency distribution that encodes
how often each word occurs in a text:
>>> from nltk.tokenize import word_tokenize
>>> from nltk.probability import FreqDist
>>> sent = 'This is an example sentence'
>>> fdist = FreqDist()
>>> for word in word_tokenize(sent):
... fdist.inc(word.lower())
An equivalent way to do this is with the initializer:
>>> fdist = FreqDist(word.lower() for word in word_tokenize(sent))
"""
def __init__(self, samples=None):
"""
Construct a new frequency distribution. If ``samples`` is
given, then the frequency distribution will be initialized
with the count of each object in ``samples``; otherwise, it
will be initialized to be empty.
In particular, ``FreqDist()`` returns an empty frequency
distribution; and ``FreqDist(samples)`` first creates an empty
frequency distribution, and then calls ``update`` with the
list ``samples``.
:param samples: The samples to initialize the frequency
distribution with.
:type samples: Sequence
"""
dict.__init__(self)
self._N = 0
self._reset_caches()
if samples:
self.update(samples)
def inc(self, sample, count=1):
"""
Increment this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any
:param count: The amount to increment the sample's count by.
:type count: int
:rtype: None
:raise NotImplementedError: If ``sample`` is not a
supported sample type.
"""
if count == 0: return
self[sample] = self.get(sample,0) + count
def __setitem__(self, sample, value):
"""
Set this FreqDist's count for the given sample.
:param sample: The sample whose count should be incremented.
:type sample: any hashable object
:param count: The new value for the sample's count
:type count: int
:rtype: None
:raise TypeError: If ``sample`` is not a supported sample type.
"""
self._N += (value - self.get(sample, 0))
dict.__setitem__(self, sample, value)
# Invalidate the caches
self._reset_caches()
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this FreqDist. For the number of unique
sample values (or bins) with counts greater than zero, use
``FreqDist.B()``.
:rtype: int
"""
return self._N
def B(self):
"""
Return the total number of sample values (or "bins") that
have counts greater than zero. For the total
number of sample outcomes recorded, use ``FreqDist.N()``.
(FreqDist.B() is the same as len(FreqDist).)
:rtype: int
"""
return len(self)
def samples(self):
"""
Return a list of all samples that have been recorded as
outcomes by this frequency distribution. Use ``fd[sample]``
to determine the count for each sample.
:rtype: list
"""
return self.keys()
def hapaxes(self):
"""
Return a list of all samples that occur once (hapax legomena)
:rtype: list
"""
return [item for item in self if self[item] == 1]
def Nr(self, r, bins=None):
"""
Return the number of samples with count r.
:type r: int
:param r: A sample count.
:type bins: int
:param bins: The number of possible sample outcomes. ``bins``
is used to calculate Nr(0). In particular, Nr(0) is
``bins-self.B()``. If ``bins`` is not specified, it
defaults to ``self.B()`` (so Nr(0) will be 0).
:rtype: int
"""
if r < 0: raise IndexError, 'FreqDist.Nr(): r must be non-negative'
# Special case for Nr(0):
if r == 0:
if bins is None: return 0
else: return bins-self.B()
# We have to search the entire distribution to find Nr. Since
# this is an expensive operation, and is likely to be used
# repeatedly, cache the results.
if self._Nr_cache is None:
self._cache_Nr_values()
if r >= len(self._Nr_cache): return 0
return self._Nr_cache[r]
def _cache_Nr_values(self):
Nr = [0]
for sample in self:
c = self.get(sample, 0)
if c >= len(Nr):
Nr += [0]*(c+1-len(Nr))
Nr[c] += 1
self._Nr_cache = Nr
def _cumulative_frequencies(self, samples=None):
"""
Return the cumulative frequencies of the specified samples.
If no samples are specified, all counts are returned, starting
with the largest.
:param samples: the samples whose frequencies should be returned.
:type sample: any
:rtype: list(float)
"""
cf = 0.0
if not samples:
samples = self.keys()
for sample in samples:
cf += self[sample]
yield cf
# slightly odd nomenclature freq() if FreqDist does counts and ProbDist does probs,
# here, freq() does probs
def freq(self, sample):
"""
Return the frequency of a given sample. The frequency of a
sample is defined as the count of that sample divided by the
total number of sample outcomes that have been recorded by
this FreqDist. The count of a sample is defined as the
number of times that sample outcome was recorded by this
FreqDist. Frequencies are always real numbers in the range
[0, 1].
:param sample: the sample whose frequency
should be returned.
:type sample: any
:rtype: float
"""
if self._N is 0:
return 0
return float(self[sample]) / self._N
def max(self):
"""
Return the sample with the greatest number of outcomes in this
frequency distribution. If two or more samples have the same
number of outcomes, return one of them; which sample is
returned is undefined. If no outcomes have occurred in this
frequency distribution, return None.
:return: The sample with the maximum number of outcomes in this
frequency distribution.
:rtype: any or None
"""
if self._max_cache is None:
if len(self) == 0:
raise ValueError('A FreqDist must have at least one sample before max is defined.')
self._max_cache = max([(a,b) for (b,a) in self.items()])[1]
return self._max_cache
def plot(self, *args, **kwargs):
"""
Plot samples from the frequency distribution
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param title: The title for the graph
:type title: str
:param cumulative: A flag to specify whether the plot is cumulative (default = False)
:type title: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab). '
'See http://matplotlib.sourceforge.net/')
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
else:
freqs = [self[sample] for sample in samples]
ylabel = "Counts"
# percents = [f * 100 for f in freqs] only in ProbDist?
pylab.grid(True, color="silver")
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
if "title" in kwargs:
pylab.title(kwargs["title"])
del kwargs["title"]
pylab.plot(freqs, **kwargs)
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the frequency distribution (cumulative),
displaying the most frequent sample first. If an integer
parameter is supplied, stop after this many samples have been
plotted. If two integer parameters m, n are supplied, plot a
subset of the samples, beginning with m and stopping at n-1.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot (default is all samples)
:type samples: list
"""
if len(args) == 0:
args = [len(self)]
samples = list(islice(self, *args))
cumulative = _get_kwarg(kwargs, 'cumulative', False)
if cumulative:
freqs = list(self._cumulative_frequencies(samples))
else:
freqs = [self[sample] for sample in samples]
# percents = [f * 100 for f in freqs] only in ProbDist?
for i in range(len(samples)):
print "%4s" % str(samples[i]),
print
for i in range(len(samples)):
print "%4d" % freqs[i],
print
def _sort_keys_by_value(self):
if not self._item_cache:
self._item_cache = sorted(dict.items(self), key=lambda x:(-x[1], x[0]))
def keys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(0), self._item_cache)
def values(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: list(any)
"""
self._sort_keys_by_value()
return map(itemgetter(1), self._item_cache)
def items(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: list(tuple)
"""
self._sort_keys_by_value()
return self._item_cache[:]
def __iter__(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def iterkeys(self):
"""
Return the samples sorted in decreasing order of frequency.
:rtype: iter
"""
return iter(self.keys())
def itervalues(self):
"""
Return the values sorted in decreasing order.
:rtype: iter
"""
return iter(self.values())
def iteritems(self):
"""
Return the items sorted in decreasing order of frequency.
:rtype: iter of any
"""
self._sort_keys_by_value()
return iter(self._item_cache)
def copy(self):
"""
Create a copy of this frequency distribution.
:rtype: FreqDist
"""
return self.__class__(self)
def update(self, samples):
"""
Update the frequency distribution with the provided list of samples.
This is a faster way to add multiple samples to the distribution.
:param samples: The samples to add.
:type samples: list
"""
try:
sample_iter = samples.iteritems()
except:
sample_iter = imap(lambda x: (x,1), samples)
for sample, count in sample_iter:
self.inc(sample, count=count)
def pop(self, other):
self._N -= 1
self._reset_caches()
return dict.pop(self, other)
def popitem(self):
self._N -= 1
self._reset_caches()
return dict.popitem(self)
def clear(self):
self._N = 0
self._reset_caches()
dict.clear(self)
def _reset_caches(self):
self._Nr_cache = None
self._max_cache = None
self._item_cache = None
def __add__(self, other):
clone = self.copy()
clone.update(other)
return clone
def __le__(self, other):
if not isinstance(other, FreqDist): return False
return set(self).issubset(other) and all(self[key] <= other[key] for key in self)
def __lt__(self, other):
if not isinstance(other, FreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, FreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, FreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
return '<FreqDist with %d samples and %d outcomes>' % (len(self), self.N())
def __str__(self):
"""
Return a string representation of this FreqDist.
:rtype: string
"""
items = ['%r: %r' % (s, self[s]) for s in self.keys()[:10]]
if len(self) > 10:
items.append('...')
return '<FreqDist: %s>' % ', '.join(items)
def __getitem__(self, sample):
return self.get(sample, 0)
##//////////////////////////////////////////////////////
## Probability Distributions
##//////////////////////////////////////////////////////
class ProbDistI(object):
"""
A probability distribution for the outcomes of an experiment. A
probability distribution specifies how likely it is that an
experiment will have any given outcome. For example, a
probability distribution could be used to predict the probability
that a token in a document will have a given type. Formally, a
probability distribution can be defined as a function mapping from
samples to nonnegative real numbers, such that the sum of every
number in the function's range is 1.0. A ``ProbDist`` is often
used to model the probability distribution of the experiment used
to generate a frequency distribution.
"""
SUM_TO_ONE = True
"""True if the probabilities of the samples in this probability
distribution will always sum to one."""
def __init__(self):
if self.__class__ == ProbDistI:
raise NotImplementedError("Interfaces can't be instantiated")
def prob(self, sample):
"""
Return the probability for a given sample. Probabilities
are always real numbers in the range [0, 1].
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
raise NotImplementedError()
def logprob(self, sample):
"""
Return the base 2 logarithm of the probability for a given sample.
:param sample: The sample whose probability
should be returned.
:type sample: any
:rtype: float
"""
# Default definition, in terms of prob()
p = self.prob(sample)
if p == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return math.log(p, 2)
def max(self):
"""
Return the sample with the greatest probability. If two or
more samples have the same probability, return one of them;
which sample is returned is undefined.
:rtype: any
"""
raise NotImplementedError()
def samples(self):
"""
Return a list of all samples that have nonzero probabilities.
Use ``prob`` to find the probability of each sample.
:rtype: list
"""
raise NotImplementedError()
# cf self.SUM_TO_ONE
def discount(self):
"""
Return the ratio by which counts are discounted on average: c*/c
:rtype: float
"""
return 0.0
# Subclasses should define more efficient implementations of this,
# where possible.
def generate(self):
"""
Return a randomly selected sample from this probability distribution.
The probability of returning each sample ``samp`` is equal to
``self.prob(samp)``.
"""
p = random.random()
for sample in self.samples():
p -= self.prob(sample)
if p <= 0: return sample
# allow for some rounding error:
if p < .0001:
return sample
# we *should* never get here
if self.SUM_TO_ONE:
warnings.warn("Probability distribution %r sums to %r; generate()"
" is returning an arbitrary sample." % (self, 1-p))
return random.choice(list(self.samples()))
class UniformProbDist(ProbDistI):
"""
A probability distribution that assigns equal probability to each
sample in a given set; and a zero probability to all other
samples.
"""
def __init__(self, samples):
"""
Construct a new uniform probability distribution, that assigns
equal probability to each sample in ``samples``.
:param samples: The samples that should be given uniform
probability.
:type samples: list
:raise ValueError: If ``samples`` is empty.
"""
if len(samples) == 0:
raise ValueError('A Uniform probability distribution must '+
'have at least one sample.')
self._sampleset = set(samples)
self._prob = 1.0/len(self._sampleset)
self._samples = list(self._sampleset)
def prob(self, sample):
if sample in self._sampleset: return self._prob
else: return 0
def max(self): return self._samples[0]
def samples(self): return self._samples
def __repr__(self):
return '<UniformProbDist with %d samples>' % len(self._sampleset)
class DictionaryProbDist(ProbDistI):
"""
A probability distribution whose probabilities are directly
specified by a given dictionary. The given dictionary maps
samples to probabilities.
"""
def __init__(self, prob_dict=None, log=False, normalize=False):
"""
Construct a new probability distribution from the given
dictionary, which maps values to probabilities (or to log
probabilities, if ``log`` is true). If ``normalize`` is
true, then the probability values are scaled by a constant
factor such that they sum to 1.
If called without arguments, the resulting probability
distribution assigns zero probabiliy to all values.
"""
if prob_dict is None:
self._prob_dict = {}
else:
self._prob_dict = prob_dict.copy()
self._log = log
# Normalize the distribution, if requested.
if normalize:
if log:
value_sum = sum_logs(self._prob_dict.values())
if value_sum <= _NINF:
logp = math.log(1.0/len(prob_dict), 2)
for x in prob_dict:
self._prob_dict[x] = logp
else:
for (x, p) in self._prob_dict.items():
self._prob_dict[x] -= value_sum
else:
value_sum = sum(self._prob_dict.values())
if value_sum == 0:
p = 1.0/len(prob_dict)
for x in prob_dict:
self._prob_dict[x] = p
else:
norm_factor = 1.0/value_sum
for (x, p) in self._prob_dict.items():
self._prob_dict[x] *= norm_factor
def prob(self, sample):
if self._log:
if sample not in self._prob_dict: return 0
else: return 2**(self._prob_dict[sample])
else:
return self._prob_dict.get(sample, 0)
def logprob(self, sample):
if self._log:
return self._prob_dict.get(sample, _NINF)
else:
if sample not in self._prob_dict: return _NINF
elif self._prob_dict[sample] == 0: return _NINF
else: return math.log(self._prob_dict[sample], 2)
def max(self):
if not hasattr(self, '_max'):
self._max = max((p,v) for (v,p) in self._prob_dict.items())[1]
return self._max
def samples(self):
return self._prob_dict.keys()
def __repr__(self):
return '<ProbDist with %d samples>' % len(self._prob_dict)
class MLEProbDist(ProbDistI):
"""
The maximum likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"maximum likelihood estimate" approximates the probability of
each sample as the frequency of that sample in the frequency
distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the maximum likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
"""
self._freqdist = freqdist
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
return self._freqdist.freq(sample)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<MLEProbDist based on %d samples>' % self._freqdist.N()
class LidstoneProbDist(ProbDistI):
"""
The Lidstone estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Lidstone estimate" is paramaterized by a real number *gamma*,
which typically ranges from 0 to 1. The Lidstone estimate
approximates the probability of a sample with count *c* from an
experiment with *N* outcomes and *B* bins as
``c+gamma)/(N+B*gamma)``. This is equivalant to adding
*gamma* to the count for each bin, and taking the maximum
likelihood estimate of the resulting frequency distribution.
"""
SUM_TO_ONE = False
def __init__(self, freqdist, gamma, bins=None):
"""
Use the Lidstone estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type gamma: float
:param gamma: A real number used to paramaterize the
estimate. The Lidstone estimate is equivalant to adding
*gamma* to the count for each bin, and taking the
maximum likelihood estimate of the resulting frequency
distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
if (bins == 0) or (bins is None and freqdist.N() == 0):
name = self.__class__.__name__[:-8]
raise ValueError('A %s probability distribution ' % name +
'must have at least one bin.')
if (bins is not None) and (bins < freqdist.B()):
name = self.__class__.__name__[:-8]
raise ValueError('\nThe number of bins in a %s distribution ' % name +
'(%d) must be greater than or equal to\n' % bins +
'the number of bins in the FreqDist used ' +
'to create it (%d).' % freqdist.N())
self._freqdist = freqdist
self._gamma = float(gamma)
self._N = self._freqdist.N()
if bins is None: bins = freqdist.B()
self._bins = bins
self._divisor = self._N + bins * gamma
if self._divisor == 0.0:
# In extreme cases we force the probability to be 0,
# which it will be, since the count will be 0:
self._gamma = 0
self._divisor = 1
def freqdist(self):
"""
Return the frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._freqdist
def prob(self, sample):
c = self._freqdist[sample]
return (c + self._gamma) / self._divisor
def max(self):
# For Lidstone distributions, probability is monotonic with
# frequency, so the most probable sample is the one that
# occurs most frequently.
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
gb = self._gamma * self._bins
return gb / (self._N + gb)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<LidstoneProbDist based on %d samples>' % self._freqdist.N()
class LaplaceProbDist(LidstoneProbDist):
"""
The Laplace estimate for the probability distribution of the
experiment used to generate a frequency distribution. The
"Laplace estimate" approximates the probability of a sample with
count *c* from an experiment with *N* outcomes and *B* bins as
*(c+1)/(N+B)*. This is equivalant to adding one to the count for
each bin, and taking the maximum likelihood estimate of the
resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the Laplace estimate to create a probability distribution
for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 1, bins)
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
return '<LaplaceProbDist based on %d samples>' % self._freqdist.N()
class ELEProbDist(LidstoneProbDist):
"""
The expected likelihood estimate for the probability distribution
of the experiment used to generate a frequency distribution. The
"expected likelihood estimate" approximates the probability of a
sample with count *c* from an experiment with *N* outcomes and
*B* bins as *(c+0.5)/(N+B/2)*. This is equivalant to adding 0.5
to the count for each bin, and taking the maximum likelihood
estimate of the resulting frequency distribution.
"""
def __init__(self, freqdist, bins=None):
"""
Use the expected likelihood estimate to create a probability
distribution for the experiment used to generate ``freqdist``.
:type freqdist: FreqDist
:param freqdist: The frequency distribution that the
probability estimates should be based on.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
LidstoneProbDist.__init__(self, freqdist, 0.5, bins)
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<ELEProbDist based on %d samples>' % self._freqdist.N()
class HeldoutProbDist(ProbDistI):
"""
The heldout estimate for the probability distribution of the
experiment used to generate two frequency distributions. These
two frequency distributions are called the "heldout frequency
distribution" and the "base frequency distribution." The
"heldout estimate" uses uses the "heldout frequency
distribution" to predict the probability of each sample, given its
frequency in the "base frequency distribution".
In particular, the heldout estimate approximates the probability
for a sample that occurs *r* times in the base distribution as
the average frequency in the heldout distribution of all samples
that occur *r* times in the base distribution.
This average frequency is *Tr[r]/(Nr[r].N)*, where:
- *Tr[r]* is the total count in the heldout distribution for
all samples that occur *r* times in the base distribution.
- *Nr[r]* is the number of samples that occur *r* times in
the base distribution.
- *N* is the number of outcomes recorded by the heldout
frequency distribution.
In order to increase the efficiency of the ``prob`` member
function, *Tr[r]/(Nr[r].N)* is precomputed for each value of *r*
when the ``HeldoutProbDist`` is created.
:type _estimate: list(float)
:ivar _estimate: A list mapping from *r*, the number of
times that a sample occurs in the base distribution, to the
probability estimate for that sample. ``_estimate[r]`` is
calculated by finding the average frequency in the heldout
distribution of all samples that occur *r* times in the base
distribution. In particular, ``_estimate[r]`` =
*Tr[r]/(Nr[r].N)*.
:type _max_r: int
:ivar _max_r: The maximum number of times that any sample occurs
in the base distribution. ``_max_r`` is used to decide how
large ``_estimate`` must be.
"""
SUM_TO_ONE = False
def __init__(self, base_fdist, heldout_fdist, bins=None):
"""
Use the heldout estimate to create a probability distribution
for the experiment used to generate ``base_fdist`` and
``heldout_fdist``.
:type base_fdist: FreqDist
:param base_fdist: The base frequency distribution.
:type heldout_fdist: FreqDist
:param heldout_fdist: The heldout frequency distribution.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._base_fdist = base_fdist
self._heldout_fdist = heldout_fdist
# The max number of times any sample occurs in base_fdist.
self._max_r = base_fdist[base_fdist.max()]
# Calculate Tr, Nr, and N.
Tr = self._calculate_Tr()
Nr = [base_fdist.Nr(r, bins) for r in range(self._max_r+1)]
N = heldout_fdist.N()
# Use Tr, Nr, and N to compute the probability estimate for
# each value of r.
self._estimate = self._calculate_estimate(Tr, Nr, N)
def _calculate_Tr(self):
"""
Return the list *Tr*, where *Tr[r]* is the total count in
``heldout_fdist`` for all samples that occur *r*
times in ``base_fdist``.
:rtype: list(float)
"""
Tr = [0.0] * (self._max_r+1)
for sample in self._heldout_fdist:
r = self._base_fdist[sample]
Tr[r] += self._heldout_fdist[sample]
return Tr
def _calculate_estimate(self, Tr, Nr, N):
"""
Return the list *estimate*, where *estimate[r]* is the probability
estimate for any sample that occurs *r* times in the base frequency
distribution. In particular, *estimate[r]* is *Tr[r]/(N[r].N)*.
In the special case that *N[r]=0*, *estimate[r]* will never be used;
so we define *estimate[r]=None* for those cases.
:rtype: list(float)
:type Tr: list(float)
:param Tr: the list *Tr*, where *Tr[r]* is the total count in
the heldout distribution for all samples that occur *r*
times in base distribution.
:type Nr: list(float)
:param Nr: The list *Nr*, where *Nr[r]* is the number of
samples that occur *r* times in the base distribution.
:type N: int
:param N: The total number of outcomes recorded by the heldout
frequency distribution.
"""
estimate = []
for r in range(self._max_r+1):
if Nr[r] == 0: estimate.append(None)
else: estimate.append(Tr[r]/(Nr[r]*N))
return estimate
def base_fdist(self):
"""
Return the base frequency distribution that this probability
distribution is based on.
:rtype: FreqDist
"""
return self._base_fdist
def heldout_fdist(self):
"""
Return the heldout frequency distribution that this
probability distribution is based on.
:rtype: FreqDist
"""
return self._heldout_fdist
def samples(self):
return self._base_fdist.keys()
def prob(self, sample):
# Use our precomputed probability estimate.
r = self._base_fdist[sample]
return self._estimate[r]
def max(self):
# Note: the Heldout estimation is *not* necessarily monotonic;
# so this implementation is currently broken. However, it
# should give the right answer *most* of the time. :)
return self._base_fdist.max()
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
:rtype: str
:return: A string representation of this ``ProbDist``.
"""
s = '<HeldoutProbDist: %d base samples; %d heldout samples>'
return s % (self._base_fdist.N(), self._heldout_fdist.N())
class CrossValidationProbDist(ProbDistI):
"""
The cross-validation estimate for the probability distribution of
the experiment used to generate a set of frequency distribution.
The "cross-validation estimate" for the probability of a sample
is found by averaging the held-out estimates for the sample in
each pair of frequency distributions.
"""
SUM_TO_ONE = False
def __init__(self, freqdists, bins):
"""
Use the cross-validation estimate to create a probability
distribution for the experiment used to generate
``freqdists``.
:type freqdists: list(FreqDist)
:param freqdists: A list of the frequency distributions
generated by the experiment.
:type bins: int
:param bins: The number of sample values that can be generated
by the experiment that is described by the probability
distribution. This value must be correctly set for the
probabilities of the sample values to sum to one. If
``bins`` is not specified, it defaults to ``freqdist.B()``.
"""
self._freqdists = freqdists
# Create a heldout probability distribution for each pair of
# frequency distributions in freqdists.
self._heldout_probdists = []
for fdist1 in freqdists:
for fdist2 in freqdists:
if fdist1 is not fdist2:
probdist = HeldoutProbDist(fdist1, fdist2, bins)
self._heldout_probdists.append(probdist)
def freqdists(self):
"""
Return the list of frequency distributions that this ``ProbDist`` is based on.
:rtype: list(FreqDist)
"""
return self._freqdists
def samples(self):
# [xx] nb: this is not too efficient
return set(sum([fd.keys() for fd in self._freqdists], []))
def prob(self, sample):
# Find the average probability estimate returned by each
# heldout distribution.
prob = 0.0
for heldout_probdist in self._heldout_probdists:
prob += heldout_probdist.prob(sample)
return prob/len(self._heldout_probdists)
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<CrossValidationProbDist: %d-way>' % len(self._freqdists)
class WittenBellProbDist(ProbDistI):
"""
The Witten-Bell estimate of a probability distribution. This distribution
allocates uniform probability mass to as yet unseen events by using the
number of events that have only been seen once. The probability mass
reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood estimate
of a new type event occurring. The remaining probability mass is discounted
such that all probability estimates sum to one, yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
"""
def __init__(self, freqdist, bins=None):
"""
Creates a distribution of Witten-Bell probability estimates. This
distribution allocates uniform probability mass to as yet unseen
events by using the number of events that have only been seen once. The
probability mass reserved for unseen events is equal to *T / (N + T)*
where *T* is the number of observed event types and *N* is the total
number of observed events. This equates to the maximum likelihood
estimate of a new type event occurring. The remaining probability mass
is discounted such that all probability estimates sum to one,
yielding:
- *p = T / Z (N + T)*, if count = 0
- *p = c / (N + T)*, otherwise
The parameters *T* and *N* are taken from the ``freqdist`` parameter
(the ``B()`` and ``N()`` values). The normalising factor *Z* is
calculated using these values along with the ``bins`` parameter.
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._T = self._freqdist.B()
self._Z = bins - self._freqdist.B()
self._N = self._freqdist.N()
# self._P0 is P(0), precalculated for efficiency:
if self._N==0:
# if freqdist is empty, we approximate P(0) by a UniformProbDist:
self._P0 = 1.0 / self._Z
else:
self._P0 = self._T / float(self._Z * (self._N + self._T))
def prob(self, sample):
# inherit docs from ProbDistI
c = self._freqdist[sample]
if c == 0:
return self._P0
else:
return c / float(self._N + self._T)
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def discount(self):
raise NotImplementedError()
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<WittenBellProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
# Good-Turing frequency estimation was contributed by Alan Turing and
# his statistical assistant I.J. Good, during their collaboration in
# the WWII. It is a statistical technique for predicting the
# probability of occurrence of objects belonging to an unknown number
# of species, given past observations of such objects and their
# species. (In drawing balls from an urn, the 'objects' would be balls
# and the 'species' would be the distinct colors of the balls (finite
# but unknown in number).
#
# The situation frequency zero is quite common in the original
# Good-Turing estimation. Bill Gale and Geoffrey Sampson present a
# simple and effective approach, Simple Good-Turing. As a smoothing
# curve they simply use a power curve:
#
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationsihp)
#
# They estimate a and b by simple linear regression technique on the
# logarithmic form of the equation:
#
# log Nr = a + b*log(r)
#
# However, they suggest that such a simple curve is probably only
# appropriate for high values of r. For low values of r, they use the
# measured Nr directly. (see M&S, p.213)
#
# Gale and Sampson propose to use r while the difference between r and
# r* is 1.96 greather than the standar deviation, and switch to r* if
# it is less or equal:
#
# |r - r*| > 1.96 * sqrt((r + 1)^2 (Nr+1 / Nr^2) (1 + Nr+1 / Nr))
#
# The 1.96 coefficient correspond to a 0.05 significance criterion,
# some implementations can use a coefficient of 1.65 for a 0.1
# significance criterion.
#
class GoodTuringProbDist(ProbDistI):
"""
The Good-Turing estimate of a probability distribution. This method
calculates the probability mass to assign to events with zero or low
counts based on the number of events with higher counts. It does so by
using the smoothed count *c\**:
- *c\* = (c + 1) N(c + 1) / N(c)* for c >= 1
- *things with frequency zero in training* = N(1) for c == 0
where *c* is the original count, *N(i)* is the number of event types
observed with count *i*. We can think the count of unseen as the count
of frequency one (see Jurafsky & Martin 2nd Edition, p101).
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be at least
as large as the number of bins in the ``freqdist``. If None, then
it's assumed to be equal to that of the ``freqdist``
:type bins: int
"""
assert bins is None or bins >= freqdist.B(),\
'Bins parameter must not be less than freqdist.B()'
if bins is None:
bins = freqdist.B()
self._freqdist = freqdist
self._bins = bins
def prob(self, sample):
count = self._freqdist[sample]
# unseen sample's frequency (count zero) uses frequency one's
if count == 0 and self._freqdist.N() != 0:
p0 = 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._bins == self._freqdist.B():
p0 = 0.0
else:
p0 = p0 / (1.0 * self._bins - self._freqdist.B())
nc = self._freqdist.Nr(count)
ncn = self._freqdist.Nr(count + 1)
# avoid divide-by-zero errors for sparse datasets
if nc == 0 or self._freqdist.N() == 0:
return 0
return 1.0 * (count + 1) * ncn / (nc * self._freqdist.N())
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def discount(self):
"""
:return: The probability mass transferred from the
seen samples to the unseen samples.
:rtype: float
"""
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<GoodTuringProbDist based on %d samples>' % self._freqdist.N()
##//////////////////////////////////////////////////////
## Simple Good-Turing Probablity Distributions
##//////////////////////////////////////////////////////
class SimpleGoodTuringProbDist(ProbDistI):
"""
SimpleGoodTuring ProbDist approximates from frequency to freqency of
frequency into a linear line under log space by linear regression.
Details of Simple Good-Turing algorithm can be found in:
- Good Turing smoothing without tears" (Gale & Sampson 1995),
Journal of Quantitative Linguistics, vol. 2 pp. 217-237.
- "Speech and Language Processing (Jurafsky & Martin),
2nd Edition, Chapter 4.5 p103 (log(Nc) = a + b*log(c))
- http://www.grsampson.net/RGoodTur.html
Given a set of pair (xi, yi), where the xi denotes the freqency and
yi denotes the freqency of freqency, we want to minimize their
square variation. E(x) and E(y) represent the mean of xi and yi.
- slope: b = sigma ((xi-E(x)(yi-E(y))) / sigma ((xi-E(x))(xi-E(x)))
- intercept: a = E(y) - b.E(x)
"""
def __init__(self, freqdist, bins=None):
"""
:param freqdist: The frequency counts upon which to base the
estimation.
:type freqdist: FreqDist
:param bins: The number of possible event types. This must be
larger than the number of bins in the ``freqdist``. If None,
then it's assumed to be equal to ``freqdist``.B() + 1
:type bins: int
"""
assert bins is None or bins > freqdist.B(),\
'Bins parameter must not be less than freqdist.B() + 1'
if bins is None:
bins = freqdist.B() + 1
self._freqdist = freqdist
self._bins = bins
r, nr = self._r_Nr()
self.find_best_fit(r, nr)
self._switch(r, nr)
self._renormalize(r, nr)
def _r_Nr(self):
"""
Split the frequency distribution in two list (r, Nr), where Nr(r) > 0
"""
r, nr = [], []
b, i = 0, 0
while b != self._freqdist.B():
nr_i = self._freqdist.Nr(i)
if nr_i > 0:
b += nr_i
r.append(i)
nr.append(nr_i)
i += 1
return (r, nr)
def find_best_fit(self, r, nr):
"""
Use simple linear regression to tune parameters self._slope and
self._intercept in the log-log space based on count and Nr(count)
(Work in log space to avoid floating point underflow.)
"""
# For higher sample frequencies the data points becomes horizontal
# along line Nr=1. To create a more evident linear model in log-log
# space, we average positive Nr values with the surrounding zero
# values. (Church and Gale, 1991)
if not r or not nr:
# Empty r or nr?
return
zr = []
for j in range(len(r)):
if j > 0:
i = r[j-1]
else:
i = 0
if j != len(r) - 1:
k = r[j+1]
else:
k = 2 * r[j] - i
zr_ = 2.0 * nr[j] / (k - i)
zr.append(zr_)
log_r = [math.log(i) for i in r]
log_zr = [math.log(i) for i in zr]
xy_cov = x_var = 0.0
x_mean = 1.0 * sum(log_r) / len(log_r)
y_mean = 1.0 * sum(log_zr) / len(log_zr)
for (x, y) in zip(log_r, log_zr):
xy_cov += (x - x_mean) * (y - y_mean)
x_var += (x - x_mean)**2
if x_var != 0:
self._slope = xy_cov / x_var
else:
self._slope = 0.0
self._intercept = y_mean - self._slope * x_mean
def _switch(self, r, nr):
"""
Calculate the r frontier where we must switch from Nr to Sr
when estimating E[Nr].
"""
for i, r_ in enumerate(r):
if len(r) == i + 1 or r[i+1] != r_ + 1:
# We are at the end of r, or there is a gap in r
self._switch_at = r_
break
Sr = self.smoothedNr
smooth_r_star = (r_ + 1) * Sr(r_+1) / Sr(r_)
unsmooth_r_star = 1.0 * (r_ + 1) * nr[i+1] / nr[i]
std = math.sqrt(self._variance(r_, nr[i], nr[i+1]))
if abs(unsmooth_r_star-smooth_r_star) <= 1.96 * std:
self._switch_at = r_
break
def _variance(self, r, nr, nr_1):
r = float(r)
nr = float(nr)
nr_1 = float(nr_1)
return (r + 1.0)**2 * (nr_1 / nr**2) * (1.0 + nr_1 / nr)
def _renormalize(self, r, nr):
"""
It is necessary to renormalize all the probability estimates to
ensure a proper probability distribution results. This can be done
by keeping the estimate of the probability mass for unseen items as
N(1)/N and renormalizing all the estimates for previously seen items
(as Gale and Sampson (1995) propose). (See M&S P.213, 1999)
"""
prob_cov = 0.0
for r_, nr_ in zip(r, nr):
prob_cov += nr_ * self._prob_measure(r_)
if prob_cov:
self._renormal = (1 - self._prob_measure(0)) / prob_cov
def smoothedNr(self, r):
"""
Return the number of samples with count r.
:param r: The amount of freqency.
:type r: int
:rtype: float
"""
# Nr = a*r^b (with b < -1 to give the appropriate hyperbolic
# relationship)
# Estimate a and b by simple linear regression technique on
# the logarithmic form of the equation: log Nr = a + b*log(r)
return math.exp(self._intercept + self._slope * math.log(r))
def prob(self, sample):
"""
Return the sample's probability.
:param sample: sample of the event
:type sample: str
:rtype: float
"""
count = self._freqdist[sample]
p = self._prob_measure(count)
if count == 0:
if self._bins == self._freqdist.B():
p = 0.0
else:
p = p / (1.0 * self._bins - self._freqdist.B())
else:
p = p * self._renormal
return p
def _prob_measure(self, count):
if count == 0 and self._freqdist.N() == 0 :
return 1.0
elif count == 0 and self._freqdist.N() != 0:
return 1.0 * self._freqdist.Nr(1) / self._freqdist.N()
if self._switch_at > count:
Er_1 = 1.0 * self._freqdist.Nr(count+1)
Er = 1.0 * self._freqdist.Nr(count)
else:
Er_1 = self.smoothedNr(count+1)
Er = self.smoothedNr(count)
r_star = (count + 1) * Er_1 / Er
return r_star / self._freqdist.N()
def check(self):
prob_sum = 0.0
for i in range(0, len(self._Nr)):
prob_sum += self._Nr[i] * self._prob_measure(i) / self._renormal
print "Probability Sum:", prob_sum
#assert prob_sum != 1.0, "probability sum should be one!"
def discount(self):
"""
This function returns the total mass of probability transfers from the
seen samples to the unseen samples.
"""
return 1.0 * self.smoothedNr(1) / self._freqdist.N()
def max(self):
return self._freqdist.max()
def samples(self):
return self._freqdist.keys()
def freqdist(self):
return self._freqdist
def __repr__(self):
"""
Return a string representation of this ``ProbDist``.
:rtype: str
"""
return '<SimpleGoodTuringProbDist based on %d samples>'\
% self._freqdist.N()
class MutableProbDist(ProbDistI):
"""
An mutable probdist where the probabilities may be easily modified. This
simply copies an existing probdist, storing the probability values in a
mutable dictionary and providing an update method.
"""
def __init__(self, prob_dist, samples, store_logs=True):
"""
Creates the mutable probdist based on the given prob_dist and using
the list of samples given. These values are stored as log
probabilities if the store_logs flag is set.
:param prob_dist: the distribution from which to garner the
probabilities
:type prob_dist: ProbDist
:param samples: the complete set of samples
:type samples: sequence of any
:param store_logs: whether to store the probabilities as logarithms
:type store_logs: bool
"""
try:
import numpy
except ImportError:
print "Error: Please install numpy; for instructions see http://www.nltk.org/"
exit()
self._samples = samples
self._sample_dict = dict((samples[i], i) for i in range(len(samples)))
self._data = numpy.zeros(len(samples), numpy.float64)
for i in range(len(samples)):
if store_logs:
self._data[i] = prob_dist.logprob(samples[i])
else:
self._data[i] = prob_dist.prob(samples[i])
self._logs = store_logs
def samples(self):
# inherit documentation
return self._samples
def prob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return 2**(self._data[i])
else:
return self._data[i]
else:
return 0.0
def logprob(self, sample):
# inherit documentation
i = self._sample_dict.get(sample)
if i is not None:
if self._logs:
return self._data[i]
else:
return math.log(self._data[i], 2)
else:
return float('-inf')
def update(self, sample, prob, log=True):
"""
Update the probability for the given sample. This may cause the object
to stop being the valid probability distribution - the user must
ensure that they update the sample probabilities such that all samples
have probabilities between 0 and 1 and that all probabilities sum to
one.
:param sample: the sample for which to update the probability
:type sample: any
:param prob: the new probability
:type prob: float
:param log: is the probability already logged
:type log: bool
"""
i = self._sample_dict.get(sample)
assert i is not None
if self._logs:
if log: self._data[i] = prob
else: self._data[i] = math.log(prob, 2)
else:
if log: self._data[i] = 2**(prob)
else: self._data[i] = prob
##//////////////////////////////////////////////////////
## Probability Distribution Operations
##//////////////////////////////////////////////////////
def log_likelihood(test_pdist, actual_pdist):
if (not isinstance(test_pdist, ProbDistI) or
not isinstance(actual_pdist, ProbDistI)):
raise ValueError('expected a ProbDist.')
# Is this right?
return sum(actual_pdist.prob(s) * math.log(test_pdist.prob(s), 2)
for s in actual_pdist)
def entropy(pdist):
probs = [pdist.prob(s) for s in pdist.samples()]
return -sum([p * math.log(p,2) for p in probs])
##//////////////////////////////////////////////////////
## Conditional Distributions
##//////////////////////////////////////////////////////
class ConditionalFreqDist(defaultdict):
"""
A collection of frequency distributions for a single experiment
run under different conditions. Conditional frequency
distributions are used to record the number of times each sample
occurred, given the condition under which the experiment was run.
For example, a conditional frequency distribution could be used to
record the frequency of each word (type) in a document, given its
length. Formally, a conditional frequency distribution can be
defined as a function that maps from each condition to the
FreqDist for the experiment under that condition.
Conditional frequency distributions are typically constructed by
repeatedly running an experiment under a variety of conditions,
and incrementing the sample outcome counts for the appropriate
conditions. For example, the following code will produce a
conditional frequency distribution that encodes how often each
word type occurs, given the length of that word type:
>>> from nltk.probability import ConditionalFreqDist
>>> from nltk.tokenize import word_tokenize
>>> sent = "the the the dog dog some other words that we do not care about"
>>> cfdist = ConditionalFreqDist()
>>> for word in word_tokenize(sent):
... condition = len(word)
... cfdist[condition].inc(word)
An equivalent way to do this is with the initializer:
>>> cfdist = ConditionalFreqDist((len(word), word) for word in word_tokenize(sent))
The frequency distribution for each condition is accessed using
the indexing operator:
>>> cfdist[3]
<FreqDist with 6 outcomes>
>>> cfdist[3].freq('the')
0.5
>>> cfdist[3]['dog']
2
When the indexing operator is used to access the frequency
distribution for a condition that has not been accessed before,
``ConditionalFreqDist`` creates a new empty FreqDist for that
condition.
"""
def __init__(self, cond_samples=None):
"""
Construct a new empty conditional frequency distribution. In
particular, the count for every sample, under every condition,
is zero.
:param cond_samples: The samples to initialize the conditional
frequency distribution with
:type cond_samples: Sequence of (condition, sample) tuples
"""
defaultdict.__init__(self, FreqDist)
if cond_samples:
for (cond, sample) in cond_samples:
self[cond].inc(sample)
def conditions(self):
"""
Return a list of the conditions that have been accessed for
this ``ConditionalFreqDist``. Use the indexing operator to
access the frequency distribution for a given condition.
Note that the frequency distributions for some conditions
may contain zero sample outcomes.
:rtype: list
"""
return sorted(self.keys())
def N(self):
"""
Return the total number of sample outcomes that have been
recorded by this ``ConditionalFreqDist``.
:rtype: int
"""
return sum(fdist.N() for fdist in self.itervalues())
def plot(self, *args, **kwargs):
"""
Plot the given samples from the conditional frequency distribution.
For a cumulative plot, specify cumulative=True.
(Requires Matplotlib to be installed.)
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
title = _get_kwarg(kwargs, 'title', '')
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
if not "linewidth" in kwargs:
kwargs["linewidth"] = 2
for condition in conditions:
if cumulative:
freqs = list(self[condition]._cumulative_frequencies(samples))
ylabel = "Cumulative Counts"
legend_loc = 'lower right'
else:
freqs = [self[condition][sample] for sample in samples]
ylabel = "Counts"
legend_loc = 'upper right'
# percents = [f * 100 for f in freqs] only in ConditionalProbDist?
kwargs['label'] = str(condition)
pylab.plot(freqs, *args, **kwargs)
pylab.legend(loc=legend_loc)
pylab.grid(True, color="silver")
pylab.xticks(range(len(samples)), [unicode(s) for s in samples], rotation=90)
if title:
pylab.title(title)
pylab.xlabel("Samples")
pylab.ylabel(ylabel)
pylab.show()
def tabulate(self, *args, **kwargs):
"""
Tabulate the given samples from the conditional frequency distribution.
:param samples: The samples to plot
:type samples: list
:param title: The title for the graph
:type title: str
:param conditions: The conditions to plot (default is all)
:type conditions: list
"""
cumulative = _get_kwarg(kwargs, 'cumulative', False)
conditions = _get_kwarg(kwargs, 'conditions', self.conditions())
samples = _get_kwarg(kwargs, 'samples',
sorted(set(v for c in conditions for v in self[c]))) # this computation could be wasted
condition_size = max(len(str(c)) for c in conditions)
print ' ' * condition_size,
for s in samples:
print "%4s" % str(s),
print
for c in conditions:
print "%*s" % (condition_size, str(c)),
if cumulative:
freqs = list(self[c]._cumulative_frequencies(samples))
else:
freqs = [self[c][sample] for sample in samples]
for f in freqs:
print "%4d" % f,
print
def __le__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return set(self.conditions()).issubset(other.conditions()) \
and all(self[c] <= other[c] for c in self.conditions())
def __lt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return self <= other and self != other
def __ge__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other <= self
def __gt__(self, other):
if not isinstance(other, ConditionalFreqDist): return False
return other < self
def __repr__(self):
"""
Return a string representation of this ``ConditionalFreqDist``.
:rtype: str
"""
return '<ConditionalFreqDist with %d conditions>' % len(self)
class ConditionalProbDistI(defaultdict):
"""
A collection of probability distributions for a single experiment
run under different conditions. Conditional probability
distributions are used to estimate the likelihood of each sample,
given the condition under which the experiment was run. For
example, a conditional probability distribution could be used to
estimate the probability of each word type in a document, given
the length of the word type. Formally, a conditional probability
distribution can be defined as a function that maps from each
condition to the ``ProbDist`` for the experiment under that
condition.
"""
def __init__(self):
raise NotImplementedError("Interfaces can't be instantiated")
def conditions(self):
"""
Return a list of the conditions that are represented by
this ``ConditionalProbDist``. Use the indexing operator to
access the probability distribution for a given condition.
:rtype: list
"""
return self.keys()
def __repr__(self):
"""
Return a string representation of this ``ConditionalProbDist``.
:rtype: str
"""
return '<%s with %d conditions>' % (type(self).__name__, len(self))
class ConditionalProbDist(ConditionalProbDistI):
"""
A conditional probability distribution modelling the experiments
that were used to generate a conditional frequency distribution.
A ConditionalProbDist is constructed from a
``ConditionalFreqDist`` and a ``ProbDist`` factory:
- The ``ConditionalFreqDist`` specifies the frequency
distribution for each condition.
- The ``ProbDist`` factory is a function that takes a
condition's frequency distribution, and returns its
probability distribution. A ``ProbDist`` class's name (such as
``MLEProbDist`` or ``HeldoutProbDist``) can be used to specify
that class's constructor.
The first argument to the ``ProbDist`` factory is the frequency
distribution that it should model; and the remaining arguments are
specified by the ``factory_args`` parameter to the
``ConditionalProbDist`` constructor. For example, the following
code constructs a ``ConditionalProbDist``, where the probability
distribution for each condition is an ``ELEProbDist`` with 10 bins:
>>> from nltk.probability import ConditionalProbDist, ELEProbDist
>>> cpdist = ConditionalProbDist(cfdist, ELEProbDist, 10)
>>> print cpdist['run'].max()
'NN'
>>> print cpdist['run'].prob('NN')
0.0813
"""
def __init__(self, cfdist, probdist_factory,
*factory_args, **factory_kw_args):
"""
Construct a new conditional probability distribution, based on
the given conditional frequency distribution and ``ProbDist``
factory.
:type cfdist: ConditionalFreqDist
:param cfdist: The ``ConditionalFreqDist`` specifying the
frequency distribution for each condition.
:type probdist_factory: class or function
:param probdist_factory: The function or class that maps
a condition's frequency distribution to its probability
distribution. The function is called with the frequency
distribution as its first argument,
``factory_args`` as its remaining arguments, and
``factory_kw_args`` as keyword arguments.
:type factory_args: (any)
:param factory_args: Extra arguments for ``probdist_factory``.
These arguments are usually used to specify extra
properties for the probability distributions of individual
conditions, such as the number of bins they contain.
:type factory_kw_args: (any)
:param factory_kw_args: Extra keyword arguments for ``probdist_factory``.
"""
# self._probdist_factory = probdist_factory
# self._cfdist = cfdist
# self._factory_args = factory_args
# self._factory_kw_args = factory_kw_args
factory = lambda: probdist_factory(FreqDist(),
*factory_args, **factory_kw_args)
defaultdict.__init__(self, factory)
for condition in cfdist:
self[condition] = probdist_factory(cfdist[condition],
*factory_args, **factory_kw_args)
class DictionaryConditionalProbDist(ConditionalProbDistI):
"""
An alternative ConditionalProbDist that simply wraps a dictionary of
ProbDists rather than creating these from FreqDists.
"""
def __init__(self, probdist_dict):
"""
:param probdist_dict: a dictionary containing the probdists indexed
by the conditions
:type probdist_dict: dict any -> probdist
"""
defaultdict.__init__(self, DictionaryProbDist)
self.update(probdist_dict)
##//////////////////////////////////////////////////////
## Adding in log-space.
##//////////////////////////////////////////////////////
# If the difference is bigger than this, then just take the bigger one:
_ADD_LOGS_MAX_DIFF = math.log(1e-30, 2)
def add_logs(logx, logy):
"""
Given two numbers ``logx`` = *log(x)* and ``logy`` = *log(y)*, return
*log(x+y)*. Conceptually, this is the same as returning
``log(2**(logx)+2**(logy))``, but the actual implementation
avoids overflow errors that could result from direct computation.
"""
if (logx < logy + _ADD_LOGS_MAX_DIFF):
return logy
if (logy < logx + _ADD_LOGS_MAX_DIFF):
return logx
base = min(logx, logy)
return base + math.log(2**(logx-base) + 2**(logy-base), 2)
def sum_logs(logs):
if len(logs) == 0:
# Use some approximation to infinity. What this does
# depends on your system's float implementation.
return _NINF
else:
return reduce(add_logs, logs[1:], logs[0])
##//////////////////////////////////////////////////////
## Probabilistic Mix-in
##//////////////////////////////////////////////////////
class ProbabilisticMixIn(object):
"""
A mix-in class to associate probabilities with other classes
(trees, rules, etc.). To use the ``ProbabilisticMixIn`` class,
define a new class that derives from an existing class and from
ProbabilisticMixIn. You will need to define a new constructor for
the new class, which explicitly calls the constructors of both its
parent classes. For example:
>>> from nltk.probability import ProbabilisticMixIn
>>> class A:
... def __init__(self, x, y): self.data = (x,y)
...
>>> class ProbabilisticA(A, ProbabilisticMixIn):
... def __init__(self, x, y, **prob_kwarg):
... A.__init__(self, x, y)
... ProbabilisticMixIn.__init__(self, **prob_kwarg)
See the documentation for the ProbabilisticMixIn
``constructor<__init__>`` for information about the arguments it
expects.
You should generally also redefine the string representation
methods, the comparison methods, and the hashing method.
"""
def __init__(self, **kwargs):
"""
Initialize this object's probability. This initializer should
be called by subclass constructors. ``prob`` should generally be
the first argument for those constructors.
:param prob: The probability associated with the object.
:type prob: float
:param logprob: The log of the probability associated with
the object.
:type logprob: float
"""
if 'prob' in kwargs:
if 'logprob' in kwargs:
raise TypeError('Must specify either prob or logprob '
'(not both)')
else:
ProbabilisticMixIn.set_prob(self, kwargs['prob'])
elif 'logprob' in kwargs:
ProbabilisticMixIn.set_logprob(self, kwargs['logprob'])
else:
self.__prob = self.__logprob = None
def set_prob(self, prob):
"""
Set the probability associated with this object to ``prob``.
:param prob: The new probability
:type prob: float
"""
self.__prob = prob
self.__logprob = None
def set_logprob(self, logprob):
"""
Set the log probability associated with this object to
``logprob``. I.e., set the probability associated with this
object to ``2**(logprob)``.
:param logprob: The new log probability
:type logprob: float
"""
self.__logprob = logprob
self.__prob = None
def prob(self):
"""
Return the probability associated with this object.
:rtype: float
"""
if self.__prob is None:
if self.__logprob is None: return None
self.__prob = 2**(self.__logprob)
return self.__prob
def logprob(self):
"""
Return ``log(p)``, where ``p`` is the probability associated
with this object.
:rtype: float
"""
if self.__logprob is None:
if self.__prob is None: return None
self.__logprob = math.log(self.__prob, 2)
return self.__logprob
class ImmutableProbabilisticMixIn(ProbabilisticMixIn):
def set_prob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
def set_logprob(self, prob):
raise ValueError, '%s is immutable' % self.__class__.__name__
## Helper function for processing keyword arguments
def _get_kwarg(kwargs, key, default):
if key in kwargs:
arg = kwargs[key]
del kwargs[key]
else:
arg = default
return arg
##//////////////////////////////////////////////////////
## Demonstration
##//////////////////////////////////////////////////////
def _create_rand_fdist(numsamples, numoutcomes):
"""
Create a new frequency distribution, with random samples. The
samples are numbers from 1 to ``numsamples``, and are generated by
summing two numbers, each of which has a uniform distribution.
"""
import random
fdist = FreqDist()
for x in range(numoutcomes):
y = (random.randint(1, (1+numsamples)/2) +
random.randint(0, numsamples/2))
fdist.inc(y)
return fdist
def _create_sum_pdist(numsamples):
"""
Return the true probability distribution for the experiment
``_create_rand_fdist(numsamples, x)``.
"""
fdist = FreqDist()
for x in range(1, (1+numsamples)/2+1):
for y in range(0, numsamples/2+1):
fdist.inc(x+y)
return MLEProbDist(fdist)
def demo(numsamples=6, numoutcomes=500):
"""
A demonstration of frequency distributions and probability
distributions. This demonstration creates three frequency
distributions with, and uses them to sample a random process with
``numsamples`` samples. Each frequency distribution is sampled
``numoutcomes`` times. These three frequency distributions are
then used to build six probability distributions. Finally, the
probability estimates of these distributions are compared to the
actual probability of each sample.
:type numsamples: int
:param numsamples: The number of samples to use in each demo
frequency distributions.
:type numoutcomes: int
:param numoutcomes: The total number of outcomes for each
demo frequency distribution. These outcomes are divided into
``numsamples`` bins.
:rtype: None
"""
# Randomly sample a stochastic process three times.
fdist1 = _create_rand_fdist(numsamples, numoutcomes)
fdist2 = _create_rand_fdist(numsamples, numoutcomes)
fdist3 = _create_rand_fdist(numsamples, numoutcomes)
# Use our samples to create probability distributions.
pdists = [
MLEProbDist(fdist1),
LidstoneProbDist(fdist1, 0.5, numsamples),
HeldoutProbDist(fdist1, fdist2, numsamples),
HeldoutProbDist(fdist2, fdist1, numsamples),
CrossValidationProbDist([fdist1, fdist2, fdist3], numsamples),
GoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1),
SimpleGoodTuringProbDist(fdist1, 7),
_create_sum_pdist(numsamples),
]
# Find the probability of each sample.
vals = []
for n in range(1,numsamples+1):
vals.append(tuple([n, fdist1.freq(n)] +
[pdist.prob(n) for pdist in pdists]))
# Print the results in a formatted table.
print ('%d samples (1-%d); %d outcomes were sampled for each FreqDist' %
(numsamples, numsamples, numoutcomes))
print '='*9*(len(pdists)+2)
FORMATSTR = ' FreqDist '+ '%8s '*(len(pdists)-1) + '| Actual'
print FORMATSTR % tuple(`pdist`[1:9] for pdist in pdists[:-1])
print '-'*9*(len(pdists)+2)
FORMATSTR = '%3d %8.6f ' + '%8.6f '*(len(pdists)-1) + '| %8.6f'
for val in vals:
print FORMATSTR % val
# Print the totals for each column (should all be 1.0)
zvals = zip(*vals)
def sum(lst): return reduce(lambda x,y:x+y, lst, 0)
sums = [sum(val) for val in zvals[1:]]
print '-'*9*(len(pdists)+2)
FORMATSTR = 'Total ' + '%8.6f '*(len(pdists)) + '| %8.6f'
print FORMATSTR % tuple(sums)
print '='*9*(len(pdists)+2)
# Display the distributions themselves, if they're short enough.
if len(`str(fdist1)`) < 70:
print ' fdist1:', str(fdist1)
print ' fdist2:', str(fdist2)
print ' fdist3:', str(fdist3)
print
print 'Generating:'
for pdist in pdists:
fdist = FreqDist(pdist.generate() for i in range(5000))
print '%20s %s' % (pdist.__class__.__name__[:20], str(fdist)[:55])
print
def gt_demo():
from nltk import corpus
emma_words = corpus.gutenberg.words('austen-emma.txt')
fd = FreqDist(emma_words)
gt = GoodTuringProbDist(fd)
sgt = SimpleGoodTuringProbDist(fd)
katz = SimpleGoodTuringProbDist(fd, 7)
print '%18s %8s %12s %14s %12s' \
% ("word", "freqency", "GoodTuring", "SimpleGoodTuring", "Katz-cutoff" )
for key in fd:
print '%18s %8d %12e %14e %12e' \
% (key, fd[key], gt.prob(key), sgt.prob(key), katz.prob(key))
if __name__ == '__main__':
demo(6, 10)
demo(5, 5000)
gt_demo()
__all__ = ['ConditionalFreqDist', 'ConditionalProbDist',
'ConditionalProbDistI', 'CrossValidationProbDist',
'DictionaryConditionalProbDist', 'DictionaryProbDist', 'ELEProbDist',
'FreqDist', 'GoodTuringProbDist', 'SimpleGoodTuringProbDist', 'HeldoutProbDist',
'ImmutableProbabilisticMixIn', 'LaplaceProbDist', 'LidstoneProbDist',
'MLEProbDist', 'MutableProbDist', 'ProbDistI', 'ProbabilisticMixIn',
'UniformProbDist', 'WittenBellProbDist', 'add_logs',
'log_likelihood', 'sum_logs', 'entropy']
| agpl-3.0 |
nvoron23/statsmodels | statsmodels/graphics/mosaicplot.py | 6 | 26886 | """Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
see the docstring of the mosaic function for more informations.
"""
# Author: Enrico Giampieri - 21 Jan 2013
from __future__ import division
from statsmodels.compat.python import (iteritems, iterkeys, lrange, string_types, lzip,
itervalues, zip, range)
import numpy as np
from statsmodels.compat.collections import OrderedDict
from itertools import product
from numpy import iterable, r_, cumsum, array
from statsmodels.graphics import utils
from pandas import DataFrame
__all__ = ["mosaic"]
def _normalize_split(proportion):
"""
return a list of proportions of the available space given the division
if only a number is given, it will assume a split in two pieces
"""
if not iterable(proportion):
if proportion == 0:
proportion = array([0.0, 1.0])
elif proportion >= 1:
proportion = array([1.0, 0.0])
elif proportion < 0:
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
else:
proportion = array([proportion, 1.0 - proportion])
proportion = np.asarray(proportion, dtype=float)
if np.any(proportion < 0):
raise ValueError("proportions should be positive,"
"given value: {}".format(proportion))
if np.allclose(proportion, 0):
raise ValueError("at least one proportion should be"
"greater than zero".format(proportion))
# ok, data are meaningful, so go on
if len(proportion) < 2:
return array([0.0, 1.0])
left = r_[0, cumsum(proportion)]
left /= left[-1] * 1.0
return left
def _split_rect(x, y, width, height, proportion, horizontal=True, gap=0.05):
"""
Split the given rectangle in n segments whose proportion is specified
along the given axis if a gap is inserted, they will be separated by a
certain amount of space, retaining the relative proportion between them
a gap of 1 correspond to a plot that is half void and the remaining half
space is proportionally divided among the pieces.
"""
x, y, w, h = float(x), float(y), float(width), float(height)
if (w < 0) or (h < 0):
raise ValueError("dimension of the square less than"
"zero w={} h=()".format(w, h))
proportions = _normalize_split(proportion)
# extract the starting point and the dimension of each subdivision
# in respect to the unit square
starting = proportions[:-1]
amplitude = proportions[1:] - starting
# how much each extrema is going to be displaced due to gaps
starting += gap * np.arange(len(proportions) - 1)
# how much the squares plus the gaps are extended
extension = starting[-1] + amplitude[-1] - starting[0]
# normalize everything for fit again in the original dimension
starting /= extension
amplitude /= extension
# bring everything to the original square
starting = (x if horizontal else y) + starting * (w if horizontal else h)
amplitude = amplitude * (w if horizontal else h)
# create each 4-tuple for each new block
results = [(s, y, a, h) if horizontal else (x, s, w, a)
for s, a in zip(starting, amplitude)]
return results
def _reduce_dict(count_dict, partial_key):
"""
Make partial sum on a counter dict.
Given a match for the beginning of the category, it will sum each value.
"""
L = len(partial_key)
count = sum(v for k, v in iteritems(count_dict) if k[:L] == partial_key)
return count
def _key_splitting(rect_dict, keys, values, key_subset, horizontal, gap):
"""
Given a dictionary where each entry is a rectangle, a list of key and
value (count of elements in each category) it split each rect accordingly,
as long as the key start with the tuple key_subset. The other keys are
returned without modification.
"""
result = OrderedDict()
L = len(key_subset)
for name, (x, y, w, h) in iteritems(rect_dict):
if key_subset == name[:L]:
# split base on the values given
divisions = _split_rect(x, y, w, h, values, horizontal, gap)
for key, rect in zip(keys, divisions):
result[name + (key,)] = rect
else:
result[name] = (x, y, w, h)
return result
def _tuplify(obj):
"""convert an object in a tuple of strings (even if it is not iterable,
like a single integer number, but keep the string healthy)
"""
if np.iterable(obj) and not isinstance(obj, string_types):
res = tuple(str(o) for o in obj)
else:
res = (str(obj),)
return res
def _categories_level(keys):
"""use the Ordered dict to implement a simple ordered set
return each level of each category
[[key_1_level_1,key_2_level_1],[key_1_level_2,key_2_level_2]]
"""
res = []
for i in zip(*(keys)):
tuplefied = _tuplify(i)
res.append(list(OrderedDict([(j, None) for j in tuplefied])))
return res
def _hierarchical_split(count_dict, horizontal=True, gap=0.05):
"""
Split a square in a hierarchical way given a contingency table.
Hierarchically split the unit square in alternate directions
in proportion to the subdivision contained in the contingency table
count_dict. This is the function that actually perform the tiling
for the creation of the mosaic plot. If the gap array has been specified
it will insert a corresponding amount of space (proportional to the
unit lenght), while retaining the proportionality of the tiles.
Parameters
----------
count_dict : dict
Dictionary containing the contingency table.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0
horizontal : bool
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
Returns
----------
base_rect : dict
A dictionary containing the result of the split.
To each key is associated a 4-tuple of coordinates
that are required to create the corresponding rectangle:
0 - x position of the lower left corner
1 - y position of the lower left corner
2 - width of the rectangle
3 - height of the rectangle
"""
# this is the unit square that we are going to divide
base_rect = OrderedDict([(tuple(), (0, 0, 1, 1))])
# get the list of each possible value for each level
categories_levels = _categories_level(list(iterkeys(count_dict)))
L = len(categories_levels)
# recreate the gaps vector starting from an int
if not np.iterable(gap):
gap = [gap / 1.5 ** idx for idx in range(L)]
# extend if it's too short
if len(gap) < L:
last = gap[-1]
gap = list(*gap) + [last / 1.5 ** idx for idx in range(L)]
# trim if it's too long
gap = gap[:L]
# put the count dictionay in order for the keys
# this will allow some code simplification
count_ordered = OrderedDict([(k, count_dict[k])
for k in list(product(*categories_levels))])
for cat_idx, cat_enum in enumerate(categories_levels):
# get the partial key up to the actual level
base_keys = list(product(*categories_levels[:cat_idx]))
for key in base_keys:
# for each partial and each value calculate how many
# observation we have in the counting dictionary
part_count = [_reduce_dict(count_ordered, key + (partial,))
for partial in cat_enum]
# reduce the gap for subsequents levels
new_gap = gap[cat_idx]
# split the given subkeys in the rectangle dictionary
base_rect = _key_splitting(base_rect, cat_enum, part_count, key,
horizontal, new_gap)
horizontal = not horizontal
return base_rect
def _single_hsv_to_rgb(hsv):
"""Transform a color from the hsv space to the rgb."""
from matplotlib.colors import hsv_to_rgb
return hsv_to_rgb(array(hsv).reshape(1, 1, 3)).reshape(3)
def _create_default_properties(data):
""""Create the default properties of the mosaic given the data
first it will varies the color hue (first category) then the color
saturation (second category) and then the color value
(third category). If a fourth category is found, it will put
decoration on the rectangle. Doesn't manage more than four
level of categories
"""
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
# first level, the hue
L = len(categories_levels[0])
# hue = np.linspace(1.0, 0.0, L+1)[:-1]
hue = np.linspace(0.0, 1.0, L + 2)[:-2]
# second level, the saturation
L = len(categories_levels[1]) if Nlevels > 1 else 1
saturation = np.linspace(0.5, 1.0, L + 1)[:-1]
# third level, the value
L = len(categories_levels[2]) if Nlevels > 2 else 1
value = np.linspace(0.5, 1.0, L + 1)[:-1]
# fourth level, the hatch
L = len(categories_levels[3]) if Nlevels > 3 else 1
hatch = ['', '/', '-', '|', '+'][:L + 1]
# convert in list and merge with the levels
hue = lzip(list(hue), categories_levels[0])
saturation = lzip(list(saturation),
categories_levels[1] if Nlevels > 1 else [''])
value = lzip(list(value),
categories_levels[2] if Nlevels > 2 else [''])
hatch = lzip(list(hatch),
categories_levels[3] if Nlevels > 3 else [''])
# create the properties dictionary
properties = {}
for h, s, v, t in product(hue, saturation, value, hatch):
hv, hn = h
sv, sn = s
vv, vn = v
tv, tn = t
level = (hn,) + ((sn,) if sn else tuple())
level = level + ((vn,) if vn else tuple())
level = level + ((tn,) if tn else tuple())
hsv = array([hv, sv, vv])
prop = {'color': _single_hsv_to_rgb(hsv), 'hatch': tv, 'lw': 0}
properties[level] = prop
return properties
def _normalize_data(data, index):
"""normalize the data to a dict with tuples of strings as keys
right now it works with:
0 - dictionary (or equivalent mappable)
1 - pandas.Series with simple or hierarchical indexes
2 - numpy.ndarrays
3 - everything that can be converted to a numpy array
4 - pandas.DataFrame (via the _normalize_dataframe function)
"""
# if data is a dataframe we need to take a completely new road
# before coming back here. Use the hasattr to avoid importing
# pandas explicitly
if hasattr(data, 'pivot') and hasattr(data, 'groupby'):
data = _normalize_dataframe(data, index)
index = None
# can it be used as a dictionary?
try:
items = list(iteritems(data))
except AttributeError:
# ok, I cannot use the data as a dictionary
# Try to convert it to a numpy array, or die trying
data = np.asarray(data)
temp = OrderedDict()
for idx in np.ndindex(data.shape):
name = tuple(i for i in idx)
temp[name] = data[idx]
data = temp
items = list(iteritems(data))
# make all the keys a tuple, even if simple numbers
data = OrderedDict([_tuplify(k), v] for k, v in items)
categories_levels = _categories_level(list(iterkeys(data)))
# fill the void in the counting dictionary
indexes = product(*categories_levels)
contingency = OrderedDict([(k, data.get(k, 0)) for k in indexes])
data = contingency
# reorder the keys order according to the one specified by the user
# or if the index is None convert it into a simple list
# right now it doesn't do any check, but can be modified in the future
index = lrange(len(categories_levels)) if index is None else index
contingency = OrderedDict()
for key, value in iteritems(data):
new_key = tuple(key[i] for i in index)
contingency[new_key] = value
data = contingency
return data
def _normalize_dataframe(dataframe, index):
"""Take a pandas DataFrame and count the element present in the
given columns, return a hierarchical index on those columns
"""
#groupby the given keys, extract the same columns and count the element
# then collapse them with a mean
data = dataframe[index].dropna()
grouped = data.groupby(index, sort=False)
counted = grouped[index].count()
averaged = counted.mean(axis=1)
return averaged
def _statistical_coloring(data):
"""evaluate colors from the indipendence properties of the matrix
It will encounter problem if one category has all zeros
"""
data = _normalize_data(data, None)
categories_levels = _categories_level(list(iterkeys(data)))
Nlevels = len(categories_levels)
total = 1.0 * sum(v for v in itervalues(data))
# count the proportion of observation
# for each level that has the given name
# at each level
levels_count = []
for level_idx in range(Nlevels):
proportion = {}
for level in categories_levels[level_idx]:
proportion[level] = 0.0
for key, value in iteritems(data):
if level == key[level_idx]:
proportion[level] += value
proportion[level] /= total
levels_count.append(proportion)
# for each key I obtain the expected value
# and it's standard deviation from a binomial distribution
# under the hipothesys of independence
expected = {}
for key, value in iteritems(data):
base = 1.0
for i, k in enumerate(key):
base *= levels_count[i][k]
expected[key] = base * total, np.sqrt(total * base * (1.0 - base))
# now we have the standard deviation of distance from the
# expected value for each tile. We create the colors from this
sigmas = dict((k, (data[k] - m) / s) for k, (m, s) in iteritems(expected))
props = {}
for key, dev in iteritems(sigmas):
red = 0.0 if dev < 0 else (dev / (1 + dev))
blue = 0.0 if dev > 0 else (dev / (-1 + dev))
green = (1.0 - red - blue) / 2.0
hatch = 'x' if dev > 2 else 'o' if dev < -2 else ''
props[key] = {'color': [red, green, blue], 'hatch': hatch}
return props
def _create_labels(rects, horizontal, ax, rotation):
"""find the position of the label for each value of each category
right now it supports only up to the four categories
ax: the axis on which the label should be applied
rotation: the rotation list for each side
"""
categories = _categories_level(list(iterkeys(rects)))
if len(categories) > 4:
msg = ("maximum of 4 level supported for axes labeling..and 4"
"is alreay a lot of level, are you sure you need them all?")
raise NotImplementedError(msg)
labels = {}
#keep it fixed as will be used a lot of times
items = list(iteritems(rects))
vertical = not horizontal
#get the axis ticks and labels locator to put the correct values!
ax2 = ax.twinx()
ax3 = ax.twiny()
#this is the order of execution for horizontal disposition
ticks_pos = [ax.set_xticks, ax.set_yticks, ax3.set_xticks, ax2.set_yticks]
ticks_lab = [ax.set_xticklabels, ax.set_yticklabels,
ax3.set_xticklabels, ax2.set_yticklabels]
#for the vertical one, rotate it by one
if vertical:
ticks_pos = ticks_pos[1:] + ticks_pos[:1]
ticks_lab = ticks_lab[1:] + ticks_lab[:1]
#clean them
for pos, lab in zip(ticks_pos, ticks_lab):
pos([])
lab([])
#for each level, for each value in the level, take the mean of all
#the sublevel that correspond to that partial key
for level_idx, level in enumerate(categories):
#this dictionary keep the labels only for this level
level_ticks = dict()
for value in level:
#to which level it should refer to get the preceding
#values of labels? it's rather a tricky question...
#this is dependent on the side. It's a very crude management
#but I couldn't think a more general way...
if horizontal:
if level_idx == 3:
index_select = [-1, -1, -1]
else:
index_select = [+0, -1, -1]
else:
if level_idx == 3:
index_select = [+0, -1, +0]
else:
index_select = [-1, -1, -1]
#now I create the base key name and append the current value
#It will search on all the rects to find the corresponding one
#and use them to evaluate the mean position
basekey = tuple(categories[i][index_select[i]]
for i in range(level_idx))
basekey = basekey + (value,)
subset = dict((k, v) for k, v in items
if basekey == k[:level_idx + 1])
#now I extract the center of all the tiles and make a weighted
#mean of all these center on the area of the tile
#this should give me the (more or less) correct position
#of the center of the category
vals = list(itervalues(subset))
W = sum(w * h for (x, y, w, h) in vals)
x_lab = sum((x + w / 2.0) * w * h / W for (x, y, w, h) in vals)
y_lab = sum((y + h / 2.0) * w * h / W for (x, y, w, h) in vals)
#now base on the ordering, select which position to keep
#needs to be written in a more general form of 4 level are enough?
#should give also the horizontal and vertical alignment
side = (level_idx + vertical) % 4
level_ticks[value] = y_lab if side % 2 else x_lab
#now we add the labels of this level to the correct axis
ticks_pos[level_idx](list(itervalues(level_ticks)))
ticks_lab[level_idx](list(iterkeys(level_ticks)),
rotation=rotation[level_idx])
return labels
def mosaic(data, index=None, ax=None, horizontal=True, gap=0.005,
properties=lambda key: None, labelizer=None,
title='', statistic=False, axes_label=True,
label_rotation=0.0):
"""Create a mosaic plot from a contingency table.
It allows to visualize multivariate categorical data in a rigorous
and informative way.
Parameters
----------
data : dict, pandas.Series, np.ndarray, pandas.DataFrame
The contingency table that contains the data.
Each category should contain a non-negative number
with a tuple as index. It expects that all the combination
of keys to be representes; if that is not true, will
automatically consider the missing values as 0. The order
of the keys will be the same as the one of insertion.
If a dict of a Series (or any other dict like object)
is used, it will take the keys as labels. If a
np.ndarray is provided, it will generate a simple
numerical labels.
index: list, optional
Gives the preferred order for the category ordering. If not specified
will default to the given order. It doesn't support named indexes
for hierarchical Series. If a DataFrame is provided, it expects
a list with the name of the columns.
ax : matplotlib.Axes, optional
The graph where display the mosaic. If not given, will
create a new figure
horizontal : bool, optional (default True)
The starting direction of the split (by default along
the horizontal axis)
gap : float or array of floats
The list of gaps to be applied on each subdivision.
If the lenght of the given array is less of the number
of subcategories (or if it's a single number) it will extend
it with exponentially decreasing gaps
labelizer : function (key) -> string, optional
A function that generate the text to display at the center of
each tile base on the key of that tile
properties : function (key) -> dict, optional
A function that for each tile in the mosaic take the key
of the tile and returns the dictionary of properties
of the generated Rectangle, like color, hatch or similar.
A default properties set will be provided fot the keys whose
color has not been defined, and will use color variation to help
visually separates the various categories. It should return None
to indicate that it should use the default property for the tile.
A dictionary of the properties for each key can be passed,
and it will be internally converted to the correct function
statistic: bool, optional (default False)
if true will use a crude statistical model to give colors to the plot.
If the tile has a containt that is more than 2 standard deviation
from the expected value under independence hipotesys, it will
go from green to red (for positive deviations, blue otherwise) and
will acquire an hatching when crosses the 3 sigma.
title: string, optional
The title of the axis
axes_label: boolean, optional
Show the name of each value of each category
on the axis (default) or hide them.
label_rotation: float or list of float
the rotation of the axis label (if present). If a list is given
each axis can have a different rotation
Returns
----------
fig : matplotlib.Figure
The generate figure
rects : dict
A dictionary that has the same keys of the original
dataset, that holds a reference to the coordinates of the
tile and the Rectangle that represent it
See Also
----------
A Brief History of the Mosaic Display
Michael Friendly, York University, Psychology Department
Journal of Computational and Graphical Statistics, 2001
Mosaic Displays for Loglinear Models.
Michael Friendly, York University, Psychology Department
Proceedings of the Statistical Graphics Section, 1992, 61-68.
Mosaic displays for multi-way contingecy tables.
Michael Friendly, York University, Psychology Department
Journal of the american statistical association
March 1994, Vol. 89, No. 425, Theory and Methods
Examples
----------
The most simple use case is to take a dictionary and plot the result
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> mosaic(data, title='basic dictionary')
>>> pylab.show()
A more useful example is given by a dictionary with multiple indices.
In this case we use a wider gap to a better visual separation of the
resulting plot
>>> data = {('a', 'b'): 1, ('a', 'c'): 2, ('d', 'b'): 3, ('d', 'c'): 4}
>>> mosaic(data, gap=0.05, title='complete dictionary')
>>> pylab.show()
The same data can be given as a simple or hierarchical indexed Series
>>> rand = np.random.random
>>> from itertools import product
>>>
>>> tuples = list(product(['bar', 'baz', 'foo', 'qux'], ['one', 'two']))
>>> index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
>>> data = pd.Series(rand(8), index=index)
>>> mosaic(data, title='hierarchical index series')
>>> pylab.show()
The third accepted data structureis the np array, for which a
very simple index will be created.
>>> rand = np.random.random
>>> data = 1+rand((2,2))
>>> mosaic(data, title='random non-labeled array')
>>> pylab.show()
If you need to modify the labeling and the coloring you can give
a function tocreate the labels and one with the graphical properties
starting from the key tuple
>>> data = {'a': 10, 'b': 15, 'c': 16}
>>> props = lambda key: {'color': 'r' if 'a' in key else 'gray'}
>>> labelizer = lambda k: {('a',): 'first', ('b',): 'second',
('c',): 'third'}[k]
>>> mosaic(data, title='colored dictionary',
properties=props, labelizer=labelizer)
>>> pylab.show()
Using a DataFrame as source, specifying the name of the columns of interest
>>> gender = ['male', 'male', 'male', 'female', 'female', 'female']
>>> pet = ['cat', 'dog', 'dog', 'cat', 'dog', 'cat']
>>> data = pandas.DataFrame({'gender': gender, 'pet': pet})
>>> mosaic(data, ['pet', 'gender'])
>>> pylab.show()
"""
if isinstance(data, DataFrame) and index is None:
raise ValueError("You must pass an index if data is a DataFrame."
" See examples.")
from pylab import Rectangle
fig, ax = utils.create_mpl_ax(ax)
# normalize the data to a dict with tuple of strings as keys
data = _normalize_data(data, index)
# split the graph into different areas
rects = _hierarchical_split(data, horizontal=horizontal, gap=gap)
# if there is no specified way to create the labels
# create a default one
if labelizer is None:
labelizer = lambda k: "\n".join(k)
if statistic:
default_props = _statistical_coloring(data)
else:
default_props = _create_default_properties(data)
if isinstance(properties, dict):
color_dict = properties
properties = lambda key: color_dict.get(key, None)
for k, v in iteritems(rects):
# create each rectangle and put a label on it
x, y, w, h = v
conf = properties(k)
props = conf if conf else default_props[k]
text = labelizer(k)
Rect = Rectangle((x, y), w, h, label=text, **props)
ax.add_patch(Rect)
ax.text(x + w / 2, y + h / 2, text, ha='center',
va='center', size='smaller')
#creating the labels on the axis
#o clearing it
if axes_label:
if np.iterable(label_rotation):
rotation = label_rotation
else:
rotation = [label_rotation] * 4
labels = _create_labels(rects, horizontal, ax, rotation)
else:
ax.set_xticks([])
ax.set_xticklabels([])
ax.set_yticks([])
ax.set_yticklabels([])
ax.set_title(title)
return fig, rects
| bsd-3-clause |
LAIRLAB/qr_trees | src/python/run_ilqr_diffdrive.py | 1 | 2328 | #!/usr/bin/env python
#
# Arun Venkatraman (arunvenk@cs.cmu.edu)
# December 2016
#
# If we are not running from the build directory, then add lib to path from
# build assuming we are running from the python folder
import os
full_path = os.path.realpath(__file__)
if full_path.count("src/python") > 0:
import sys
to_add = os.path.abspath(os.path.join(os.path.split(full_path)[0], "../../build/"))
sys.path.append(to_add)
from IPython import embed
import lib.ilqr_diffdrive as ilqr
import visualize_circle_world as vis
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
obs_prior = [0.5, 0.5]
world_dims = [-30, 30, -30, 30]
w1 = ilqr.CircleWorld(world_dims)
w2 = ilqr.CircleWorld(world_dims)
obs_pos_1 = [-2, 0.0]
obs_pos_2 = [2, 0.0]
obs_radius = 10.0
obstacle_1 = ilqr.Circle(obs_radius, obs_pos_1);
obstacle_2 = ilqr.Circle(obs_radius, obs_pos_2);
# add obstacle to world 1
w1.add_obstacle(obstacle_1);
# add obstacle to world 2
w2.add_obstacle(obstacle_2);
cost, states_true_1, obs_fname_1 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w1, w2, obs_prior, "true1", "true1")
cost, states_true_2, obs_fname_2 = ilqr.control_diffdrive(ilqr.TRUE_ILQR,
w2, w1, obs_prior, "true2", "true2")
cost, states_weighted_1, obs_fname_3 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w1, w2, obs_prior, "weight3", "weight3")
cost, states_weighted_2, obs_fname_4 =\
ilqr.control_diffdrive(ilqr.PROB_WEIGHTED_CONTROL,
w2, w1, obs_prior, "weight4", "weight4")
cost, states_hind_1, obs_fname_5 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w1, w2, obs_prior, "hind3", "hind3")
cost, states_hind_2, obs_fname_6 =\
ilqr.control_diffdrive(ilqr.HINDSIGHT,
w2, w1, obs_prior, "hind4", "hind4")
print("Drawing world 1")
ax1 = vis.parse_draw_files([states_true_1, states_weighted_1, states_hind_1], obs_fname_1,
show=False)
plt.title('World 1')
print("Drawing world 2")
ax2 = vis.parse_draw_files([states_true_2, states_weighted_2, states_hind_2],
obs_fname_2, show=False)
plt.title('World 2')
plt.show()
embed()
| bsd-3-clause |
mcvidomi/poim2motif | run_svm_real.py | 1 | 1483 | '''
Created on 08.06.2015
@author: marinavidovic
'''
import os
import pdb
import utils_svm
import pickle
import numpy as np
import copy
import genQ
import makePOIM
import view
import matplotlib
matplotlib.use('Agg')
if __name__ == '__main__':
read_data = 1
datapath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data.txt"
savepath = "/home/mvidovic/POIMall/data/real/human_acceptor_splice_data0.pkl"
lines=1000
if read_data:
x,y=utils_svm.extractRealData(datapath,savepath,lines)
else:
fobj=open(savepath,'rb')
x,y=pickle.load(fobj)
fobj.close()
num_pos = 100
num_neg = 4*num_pos
print "reduce samples"
x_red,y_red = utils_svm.reduce_samples(copy.deepcopy(x),copy.deepcopy(y),num_pos,num_neg)
nploci_letters,nploci_positions = utils_svm.non_polymorphic_loci(x_red)
#read data
experiment_name = "real1"
if not os.path.exists(experiment_name):
os.makedirs(experiment_name)
poimpath=experiment_name+"/poim.pkl"
tally=30
positives=25
sequenceno=100
mutation_prob=0.0
motif="ATTTT"
mu=13
x,y = makePOIM.gensequences(tally,positives,sequenceno,mutation_prob,motif,mu)
#compute POIM
poim_degree = 6
kernel_degree = 8
print "start poim computation"
poims = makePOIM.computePOIM(x,y,poim_degree,kernel_degree,poimpath)
Q2 = poims[0][1]
#view.test()
view.figurepoimsimple(Q2, "poim_pic", 0)
| mit |
xfaxca/pygaero | example/tmax_peakfind_example.py | 1 | 4986 | # tmax_peakfind_example.py
"""
Demonstration of some of the primary functions in pygaero, including Tmax finding and elemental analysis.
"""
# Module import
from pygaero import pio
from pygaero import therm
from pygaero import gen_chem
import os
import matplotlib.pyplot as plt
def example1():
# ------------------------------- File I/O and Data Cleaning Example -------------------------------- #
indir = "" # input directory (same folder as script by default)
infiles = ['desorb1.csv', 'desorb2.csv'] # input files as a list of strings
# Read in list of csvs with figaero desorptions
df_desorbs_ls = pio.read_files(fdir=indir, flist=infiles)
print('# of files imported: ', len(df_desorbs_ls))
# Clean ion names from default A_CxHyOzI_Avg format (strip underscores '_' and remove iodide
for df in df_desorbs_ls:
print("Example of ion names before clean: ", df.columns.values[0:3])
df.columns = gen_chem.cln_molec_names(idx_names=df.columns.values, delim="_") # remove underscores
df.columns = gen_chem.replace_group(molec_ls=df.columns.values, old_groups=["I"], new_group="") # remove I
print('Example of ion names after clean: ', df.columns.values[0:3])
# Alternatively, one can just assign a single thermogram by df_example = pd.DataFrame.from_csv(indir+infile)
# Adjust thermogram signals for 4.0 LPM figaero flow rate relative to nominal 2.0 LPM sample rate
# print('Before flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
therm.flow_correction(thermograms=df_desorbs_ls, aero_samp_rates=[4.0, 4.0])
# print('After flow rate adjust:', df_desorbs_ls[0].values[0:3, 5])
# ---------------------------------- Elemental Stats Example --------------------------------------- #
# A. Calculate elemental statistics for species in each desorb CSV that was read in. Then append the DataFrames
# containing these statistics into a list. Note, Iodide has been stripped from the names at this point, so
# the parameter cluster_group=None
ele_stats_ls = []
for df in df_desorbs_ls:
df_ele_temp = gen_chem.ele_stats(molec_ls=df.columns.values, ion_state=-1, cluster_group=None,
clst_group_mw=0.0, xtra_elements=["Cl", "F"])
ele_stats_ls.append(df_ele_temp)
# -------------------------------- Peak Finding (TMax) Example --------------------------------------#
# A. Smooth time series as step prior to Tmax (helps prevent mis-identification of TMax in noisy signals)
for df in df_desorbs_ls:
for series in df.columns.values:
# print('series: ', series)
df.ix[:, series] = therm.smooth(x=df.ix[:, series].values, window='hamming', window_len=15)
plt.show()
# B. Find TMax for all loaded thermograms. Returns a pandas DataFrame with ion names as index values and columns:
# TMax1, MaxSig1, TMax2, MaxSig2, DubFlag (double peak flag - binary; -1 for no peaks found). Depending on the
# specific data set, the [pk_thresh] and [pk_win] parameters may need to be optimized. See documentation for
# function peakfind_df_ls in module therm.py for more details. Results are drastically improved by first
# smoothing the time series, so that small fluctuations in signal are not mistaken for a peak.
df_tmax_ls = therm.peakfind_df_ls(df_ls=df_desorbs_ls, pk_thresh=0.05, pk_win=20,
min_temp=40.0, max_temp=190.0)
# C. Quick plot to visualize Tmax values for 15 example ions
# therm.plot_tmax(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
# tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'], tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'])
therm.plot_tmax_double(df=df_desorbs_ls[0], ions=df_tmax_ls[0].index.values[15:29],
tmax_temps=df_tmax_ls[0].ix[15:29, 'TMax1'],
tmax_temps2=df_tmax_ls[0].ix[15:29, 'TMax2'],
tmax_vals=df_tmax_ls[0].ix[15:29, 'MaxSig1'],
tmax_vals2=df_tmax_ls[0].ix[15:29, 'MaxSig2'])
# ----------------------------------- Saving Results Example -------------------------------------- #
# Uncomment the following lines to save the example output
# outdir = 'testout'
# if outdir[-1] != '/':
# outdir += '/'
# if not os.path.exists(outdir):
# os.makedirs(outdir)
# # A. Save TMax data
# for df, fname in zip(df_tmax_ls, ["desorb1_tmax", "desorb2_tmax"]):
# df.to_csv(outdir+fname+".csv")
# # B. Save smoothed desorption thermogram time series
# for df, fname in zip(df_desorbs_ls, ["desorb1_smth", "desorb2_smth"]):
# df.to_csv(outdir+fname+".csv")
# # C. Save elemental stats for each desorption
# for df, fname in zip(ele_stats_ls, ["desorb1_ele", "desorb2_ele"]):
# df.to_csv(outdir+fname+".csv")
return 0
if __name__ == '__main__':
example1()
| gpl-3.0 |
FrankWang33/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
aabadie/scikit-learn | examples/mixture/plot_gmm_selection.py | 95 | 3310 | """
================================
Gaussian Mixture Model Selection
================================
This example shows that model selection can be performed with
Gaussian Mixture Models using information-theoretic criteria (BIC).
Model selection concerns both the covariance type
and the number of components in the model.
In that case, AIC also provides the right result (not shown to save time),
but BIC is better suited if the problem is to identify the right model.
Unlike Bayesian procedures, such inferences are prior-free.
In that case, the model with 2 components and full covariance
(which corresponds to the true generative model) is selected.
"""
import numpy as np
import itertools
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
print(__doc__)
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
lowest_bic = np.infty
bic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a Gaussian mixture with EM
gmm = mixture.GaussianMixture(n_components=n_components,
covariance_type=cv_type)
gmm.fit(X)
bic.append(gmm.bic(X))
if bic[-1] < lowest_bic:
lowest_bic = bic[-1]
best_gmm = gmm
bic = np.array(bic)
color_iter = itertools.cycle(['navy', 'turquoise', 'cornflowerblue',
'darkorange'])
clf = best_gmm
bars = []
# Plot the BIC scores
spl = plt.subplot(2, 1, 1)
for i, (cv_type, color) in enumerate(zip(cv_types, color_iter)):
xpos = np.array(n_components_range) + .2 * (i - 2)
bars.append(plt.bar(xpos, bic[i * len(n_components_range):
(i + 1) * len(n_components_range)],
width=.2, color=color))
plt.xticks(n_components_range)
plt.ylim([bic.min() * 1.01 - .01 * bic.max(), bic.max()])
plt.title('BIC score per model')
xpos = np.mod(bic.argmin(), len(n_components_range)) + .65 +\
.2 * np.floor(bic.argmin() / len(n_components_range))
plt.text(xpos, bic.min() * 0.97 + .03 * bic.max(), '*', fontsize=14)
spl.set_xlabel('Number of components')
spl.legend([b[0] for b in bars], cv_types)
# Plot the winner
splot = plt.subplot(2, 1, 2)
Y_ = clf.predict(X)
for i, (mean, cov, color) in enumerate(zip(clf.means_, clf.covariances_,
color_iter)):
v, w = linalg.eigh(cov)
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan2(w[0][1], w[0][0])
angle = 180. * angle / np.pi # convert to degrees
v = 2. * np.sqrt(2.) * np.sqrt(v)
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180. + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(.5)
splot.add_artist(ell)
plt.xticks(())
plt.yticks(())
plt.title('Selected GMM: full model, 2 components')
plt.subplots_adjust(hspace=.35, bottom=.02)
plt.show()
| bsd-3-clause |
equialgo/scikit-learn | examples/cluster/plot_color_quantization.py | 61 | 3444 | # -*- coding: utf-8 -*-
"""
==================================
Color Quantization using K-Means
==================================
Performs a pixel-wise Vector Quantization (VQ) of an image of the summer palace
(China), reducing the number of colors required to show the image from 96,615
unique colors to 64, while preserving the overall appearance quality.
In this example, pixels are represented in a 3D-space and K-means is used to
find 64 color clusters. In the image processing literature, the codebook
obtained from K-means (the cluster centers) is called the color palette. Using
a single byte, up to 256 colors can be addressed, whereas an RGB encoding
requires 3 bytes per pixel. The GIF file format, for example, uses such a
palette.
For comparison, a quantized image using a random codebook (colors picked up
randomly) is also shown.
"""
# Authors: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances_argmin
from sklearn.datasets import load_sample_image
from sklearn.utils import shuffle
from time import time
n_colors = 64
# Load the Summer Palace photo
china = load_sample_image("china.jpg")
# Convert to floats instead of the default 8 bits integer coding. Dividing by
# 255 is important so that plt.imshow behaves works well on float data (need to
# be in the range [0-1])
china = np.array(china, dtype=np.float64) / 255
# Load Image and transform to a 2D numpy array.
w, h, d = original_shape = tuple(china.shape)
assert d == 3
image_array = np.reshape(china, (w * h, d))
print("Fitting model on a small sub-sample of the data")
t0 = time()
image_array_sample = shuffle(image_array, random_state=0)[:1000]
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array_sample)
print("done in %0.3fs." % (time() - t0))
# Get labels for all points
print("Predicting color indices on the full image (k-means)")
t0 = time()
labels = kmeans.predict(image_array)
print("done in %0.3fs." % (time() - t0))
codebook_random = shuffle(image_array, random_state=0)[:n_colors + 1]
print("Predicting color indices on the full image (random)")
t0 = time()
labels_random = pairwise_distances_argmin(codebook_random,
image_array,
axis=0)
print("done in %0.3fs." % (time() - t0))
def recreate_image(codebook, labels, w, h):
"""Recreate the (compressed) image from the code book & labels"""
d = codebook.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = codebook[labels[label_idx]]
label_idx += 1
return image
# Display all results, alongside original image
plt.figure(1)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Original image (96,615 colors)')
plt.imshow(china)
plt.figure(2)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, K-Means)')
plt.imshow(recreate_image(kmeans.cluster_centers_, labels, w, h))
plt.figure(3)
plt.clf()
ax = plt.axes([0, 0, 1, 1])
plt.axis('off')
plt.title('Quantized image (64 colors, Random)')
plt.imshow(recreate_image(codebook_random, labels_random, w, h))
plt.show()
| bsd-3-clause |